def sendMail(self, subject, text): if self.mail_server and self.mail_sender and self.mail_receiver: retry = 0 ready = False while not ready: try: msg = MIMEText('%s\n\n%s' % (text, mail_footer), 'plain') msg['Subject'] = subject msg['From'] = self.mail_sender msg['To'] = self.mail_receiver s = smtplib.SMTP(self.mail_server) s.sendmail(self.mail_sender, self.mail_receiver.split(','), msg.as_string()) s.quit() self.logger.debug('Sent email notification.') ready = True except socket.gaierror as e: if e.errno == socket.EAI_AGAIN: time.sleep(100) retry = retry + 1 ready = retry > 10 else: ready = True if ready: et, ev, tb = sys.exc_info() self.logger.error('got exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) except: et, ev, tb = sys.exc_info() self.logger.error('got exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) ready = True
def xmlrpc_handle_exception_int(e): if isinstance(e, odoo.exceptions.UserError): fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, odoo.tools.ustr(e.value)) elif isinstance(e, odoo.exceptions.RedirectWarning): fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e)) elif isinstance(e, odoo.exceptions.MissingError): fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e)) elif isinstance (e, odoo.exceptions.AccessError): fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e)) elif isinstance(e, odoo.exceptions.AccessDenied): fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e)) elif isinstance(e, odoo.exceptions.DeferredException): info = e.traceback # Which one is the best ? formatted_info = "".join(traceback.format_exception(*info)) #formatted_info = odoo.tools.exception_to_unicode(e) + '\n' + info fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info) else: info = sys.exc_info() # Which one is the best ? formatted_info = "".join(traceback.format_exception(*info)) #formatted_info = odoo.tools.exception_to_unicode(e) + '\n' + info fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info) return xmlrpclib.dumps(fault, allow_none=None)
def xmlrpc_handle_exception_string(e): if isinstance(e, ecore.exceptions.UserError): fault = xmlrpclib.Fault('warning -- %s\n\n%s' % (e.name, e.value), '') response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, ecore.exceptions.RedirectWarning): fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '') elif isinstance(e, ecore.exceptions.MissingError): fault = xmlrpclib.Fault('warning -- MissingError\n\n' + str(e), '') response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, ecore.exceptions.AccessError): fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '') response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, ecore.exceptions.AccessDenied): fault = xmlrpclib.Fault('AccessDenied', str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, ecore.exceptions.DeferredException): info = e.traceback formatted_info = "".join(traceback.format_exception(*info)) fault = xmlrpclib.Fault(ecore.tools.ustr(e.message), formatted_info) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) #InternalError else: info = sys.exc_info() formatted_info = "".join(traceback.format_exception(*info)) fault = xmlrpclib.Fault(ecore.tools.exception_to_unicode(e), formatted_info) response = xmlrpclib.dumps(fault, allow_none=None, encoding=None) return response
def apply(catalog, goal): """ Apply the goal configuration to live catalog """ print 'applying...' counter = 0 ready = False while ready == False: try: catalog.applyCatalogConfig(goal) ready = True except HTTPError as err: print err print err.errno if err.errno == CONFLICT: et, ev, tb = sys.exc_info() print 'Conflict Exception "%s"' % str(ev) counter = counter + 1 if counter >= 5: print '%s' % str(traceback.format_exception(et, ev, tb)) ready = True else: print 'Retrying...' except: et, ev, tb = sys.exc_info() print str(et) print 'Exception "%s"' % str(ev) print '%s' % str(traceback.format_exception(et, ev, tb)) ready = True
def _exc_info_to_string(self, err, test): """Converts a sys.exc_info()-style tuple of values into a string.""" exctype, value, tb = err # Skip test runner traceback levels while tb and self._is_relevant_tb_level(tb): tb = tb.tb_next if exctype is test.failureException: # Skip assert*() traceback levels length = self._count_relevant_tb_levels(tb) msgLines = traceback.format_exception(exctype, value, tb, length) else: msgLines = traceback.format_exception(exctype, value, tb) if self.buffer: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' msgLines.append(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' msgLines.append(STDERR_LINE % error) return ''.join(msgLines)
def xmlrpc_handle_exception(e): if isinstance(e, openerp.osv.osv.except_osv): # legacy fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.Warning): fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance (e, openerp.exceptions.AccessError): fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.AccessDenied): fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.DeferredException): info = e.traceback # Which one is the best ? formatted_info = "".join(traceback.format_exception(*info)) #formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) else: if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) else: info = sys.exc_info() # Which one is the best ? formatted_info = "".join(traceback.format_exception(*info)) #formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info) response = xmlrpclib.dumps(fault, allow_none=None, encoding=None) return response
def process_feed_wrapper(self, feed): """ wrapper for ProcessFeed """ start_time = datetime.datetime.now() try: pfeed = ProcessFeed(feed, self.options) ret_feed, ret_entries = pfeed.process() del pfeed except: (etype, eobj, etb) = sys.exc_info() print '[%d] ! -------------------------' % (feed.id,) print traceback.format_exception(etype, eobj, etb) traceback.print_exception(etype, eobj, etb) print '[%d] ! -------------------------' % (feed.id,) ret_feed = FEED_ERREXC ret_entries = {} delta = datetime.datetime.now() - start_time if delta.seconds > SLOWFEED_WARNING: comment = u' (SLOW FEED!)' else: comment = u'' prints(u'[%d] Processed %s in %s [%s] [%s]%s' % ( feed.id, feed.feed_url, unicode(delta), self.feed_trans[ret_feed], u' '.join(u'%s=%d' % (self.entry_trans[key], ret_entries[key]) for key in self.entry_keys), comment)) self.feed_stats[ret_feed] += 1 for key, val in ret_entries.items(): self.entry_stats[key] += val return ret_feed, ret_entries
def StopTracing(self): assert self.is_tracing_running, 'Can only stop tracing when tracing is on.' self._IssueClockSyncMarker() builder = self._current_state.builder raised_exception_messages = [] for agent in self._active_agents_instances + [self]: try: agent.StopAgentTracing() except Exception: # pylint: disable=broad-except raised_exception_messages.append( ''.join(traceback.format_exception(*sys.exc_info()))) for agent in self._active_agents_instances + [self]: try: agent.CollectAgentTraceData(builder) except Exception: # pylint: disable=broad-except raised_exception_messages.append( ''.join(traceback.format_exception(*sys.exc_info()))) self._iteration_info = None self._active_agents_instances = [] self._current_state = None if raised_exception_messages: raise TracingControllerStoppedError( 'Exceptions raised when trying to stop tracing:\n' + '\n'.join(raised_exception_messages)) return builder.AsData()
def build_error_response(e): if hasattr(e,'get_stacks'): #Process potentially multiple stacks. full_error = '' for i in range(len(e.get_stacks())): full_error += e.get_stacks()[i][0] + "\n" if i == 0: full_error += string.join(traceback.format_exception(*sys.exc_info()), '') else: for ln in e.get_stacks()[i][1]: full_error += str(ln) + "\n" exec_name = e.__class__.__name__ else: exc_type, exc_obj, exc_tb = sys.exc_info() exec_name = exc_type.__name__ full_error = traceback.format_exception(*sys.exc_info()) if service_gateway_instance.log_errors: log.error(full_error) result = { GATEWAY_ERROR_EXCEPTION : exec_name, GATEWAY_ERROR_MESSAGE : str(e.message), GATEWAY_ERROR_TRACE : full_error } if request.args.has_key(RETURN_FORMAT_PARAM): return_format = convert_unicode(request.args[RETURN_FORMAT_PARAM]) if return_format == RETURN_FORMAT_RAW_JSON: return service_gateway_app.response_class(result, mimetype='application/json') return json_response({'data': {GATEWAY_ERROR: result }} )
def notify(self, obj, event): try: return QtGui.QApplication.notify(self, obj, event) except Exception: print "fix me!" print traceback.format_exception(*sys.exc_info()) return False
def __printFail(self, test, err): elapsed = time.time() - self.startTime t, val, trace = err if args.testpilot_json: print( json.dumps( { "op": "test_done", "status": "failed", "test": test.id(), "details": "".join(traceback.format_exception(t, val, trace)), "start_time": self.startTime, "end_time": time.time(), } ) ) else: print( "\033[31mFAIL\033[0m %s (%.3fs)%s\n%s" % ( test.id(), elapsed, self._attempts(), "".join(traceback.format_exception(t, val, trace)), ) )
def MachineMachine(): LATTICE = "star" ##, 'diamond' DIMENSION = (5, 11) PLAYERS = "a", "b", " " ##mc = Mangorona(PLAYERS,'cubic', DIMENSION, None) ##maximum=True ##getall=True mc = Mangorona(PLAYERS, "diamond", (7, 11), None) maximum = True getall = False t = PLAYERS[:2] tab = 0 print(Board(mc).Display()) while True: try: turn = t[tab % 2] movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall) machine = random.choice(movable) print(turn, "move:", machine[0], machine[1]) mc.Move(turn, machine[0], machine[1]) print(Board(mc).Display()) print(mc.gain["a"], mc.gain["b"]) ##, t1-t0 print() tab += 1 except IllegalMove: exc = traceback.format_exception(*sys.exc_info())[-1] print(exc) except NoMoreMove: exc = traceback.format_exception(*sys.exc_info())[-1] print(exc) print("winner:", mc.Winner()) break
def unix_mainloop(repodir, port, logfilename): try: port = int(port) except ValueError: port = None if not os.path.isdir(repodir): sys.stderr.write("Specified directory, %s, is not a directory!\n" % repodir) usage() elif port is None: sys.stderr.write("Bad port number, %s, specified.\n", portnum) usage() try: repo = start_angel(repodir, port, logfilename) except: sys.stderr.write("%s: exception initializing angel:\n%s" % ( time.ctime(), ''.join(traceback.format_exception(*sys.exc_info())))) sys.exit(1) # Finally, start up the server loop! This loop will not exit until # all clients and servers are closed. You may cleanly shut the system # down by sending SIGINT (a.k.a. KeyboardInterrupt). from uplib.plibUtil import note while True: try: asyncore.loop() except (KeyboardInterrupt, SystemExit), x: note(4, "Exited from main loop due to exception:\n%s", ''.join(traceback.format_exception(*sys.exc_info()))) raise except:
def main(): # Initialize color support colorama.init() try: run() except util.DeviceException as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print(colorama.Fore.RED + "There is a problem with your device" " configuration:") print(colorama.Fore.RED + e.message) except ConfigError as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print(colorama.Fore.RED + "There is a problem with your configuration file(s):") print(colorama.Fore.RED + e.message) except util.MissingDependencyException as e: typ, val, tb = sys.exc_info() logging.debug("".join(traceback.format_exception(typ, val, tb))) print(colorama.Fore.RED + "You are missing a dependency for one of your enabled plugins:") print(colorama.Fore.RED + e.message) except KeyboardInterrupt: colorama.deinit() sys.exit(1) except Exception as e: typ, val, tb = sys.exc_info() print(colorama.Fore.RED + "spreads encountered an error:") print(colorama.Fore.RED + "".join(traceback.format_exception(typ, val, tb))) # Deinitialize color support colorama.deinit()
def run(self): """ Thread main part """ ## testlink = threading.Timer(3.0,gprs_source.writeTest) ## testlink.start() while not self.exitEvent.isSet(): asyncore.poll(0.5) # socket IO try: ## lower device IO for i in self.device: #如果连接网关的本地客户端不为空,那么循环读取数据,发送命令 try: pkts = i.readPackets()#返回的就是python字典形式的包 except: pass else: if pkts: self.clientMessageSource.setMessage(pkts) command = self.clientMessageSource.getLowerPacket() print "+++++++"+str(command) i.writePackets(command) except: type, value, tb = sys.exc_info() print traceback.format_exception(type, value, tb) pass
def _format_exception(err, is_failure, stdout=None, stderr=None): """Converts a sys.exc_info()-style tuple of values into a string.""" exctype, value, tb = err # Skip test runner traceback levels while tb and _is_relevant_tb_level(tb): tb = tb.tb_next if is_failure: # Skip assert*() traceback levels length = _count_relevant_tb_levels(tb) msgLines = traceback.format_exception(exctype, value, tb, length) else: msgLines = traceback.format_exception(exctype, value, tb) encoding = locale.getpreferredencoding() msgLines = [_decode(line, encoding) for line in msgLines] if stdout: if not stdout.endswith('\n'): stdout += '\n' msgLines.append(STDOUT_LINE % stdout) if stderr: if not stderr.endswith('\n'): stderr += '\n' msgLines.append(STDERR_LINE % stderr) return ''.join(msgLines)
def format(self, record): """Formatting function @param object record: :logging.LogRecord: """ entry = { 'name': record.name, 'timestamp': self.formatTime(record, datefmt="%Y-%m-%d %H:%M:%S"), 'level': record.levelname } if hasattr(record, 'message'): entry['message'] = record.message else: entry['message'] = super().format(record) # add exception information if available if record.exc_info is not None: entry['exception'] = { 'message': traceback.format_exception( *record.exc_info)[-1][:-1], 'traceback': traceback.format_exception( *record.exc_info)[:-1] } return entry
def post(self): user = User.query().get() key = self.request.get('key') feed = Feed.retrieve(key) try: content = Feed.fetch_feed(feed.url) d = feedparser.parse(content) for entry in d.entries: try: story_date = None try: story_date = datetime.datetime(*(entry.published_parsed[0:6])) except: story_date = datetime.datetime.now() yesterday = datetime.datetime.now() - datetime.timedelta(days=1) try: entry.id == None except: entry.id = entry.link if story_date > yesterday: self.safe_insert(entry, feed, story_date) except: logging.error("error parsing story" + dir(entry)) logging.error("".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))) except: logging.error("error parsing " + feed.url) logging.error("".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
def setupConnection(self): LOG.info('setupConnection:'+self.sql_url) if self._scoped_session: self._scoped_session.remove() try: engine = create_engine(self.sql_url, encoding='utf8', echo=False, pool_recycle=1) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) LOG.error(''.join('!! ' + line for line in lines)) return self._insp = reflection.Inspector.from_engine(engine) self.d_base = declarative_base(bind=engine) a_base = automap_base(bind=engine) try: a_base.metadata.reflect(views=True) self.name = unicode(self.sql_url) except: LOG.info('Unable to reflect the whole DB!') exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) for line in lines: LOG.info(line) a_base.metadata.reflect(views=True, only=[self.sql_table]) self.restricted = True self.name = unicode(self.sql_url+'+'+self.sql_table) a_base.prepare(a_base.metadata.bind, name_for_collection_relationship=unique_collection) self.a_base = a_base self._scoped_session = scoped_session(sessionmaker(bind=self.a_base.metadata.bind, extension=ZopeTransactionExtension(keep_session=True), autocommit=True))
def OnPaint(self, event): namespace = {'self':self, 'event':event, 'wx':wx} if not self.error and self.codes.get('OnPaint', None): try: code = compile((self.codes['OnPaint'].replace('\r\n', '\n') + '\n'), 'canvastest', 'exec') self.error = False except: self.error = True traceback.format_exception(*sys.exc_info()) def f(): d = wx.lib.dialogs.ScrolledMessageDialog(self.parent, (tr("Error compiling script.\n\nTraceback:\n\n") + ''.join(trace)), tr("Error"), wx.DefaultPosition, wx.Size(400,300)) d.ShowModal() d.Destroy() wx.CallAfter(f) return try: exec code in namespace self.error = False except: self.error = True trace = traceback.format_exception(*sys.exc_info()) def f(): d = wx.lib.dialogs.ScrolledMessageDialog(self.parent, (tr("Error running script.\n\nTraceback:\n\n") + ''.join(trace)), tr("Error"), wx.DefaultPosition, wx.Size(400,300)) d.ShowModal() d.Destroy() wx.CallAfter(f) return else: dc = wx.PaintDC(self) dc.Clear() event.Skip
def send_update(log_file_path, program, email_params): try: subject = 'Daily ' + program + ' update' sender = email_params['sender'] enc_pwd = email_params['enc_pwd'] recipient = email_params['recipient'] f = open(log_file_path, 'rb') data = [line for line in f] body = str(len(data)) + \ ' lines of data in log, here are the last 10 lines:<br/>' body += '<br/>'.join([line for line in data[-10:]]) f.close() except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) body = 'Error getting email body... ' + \ ''.join('!! ' + line for line in lines) try: send_mail.mail(sender, enc_pwd, recipient, subject, body) except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) err = 'Error sending update email!: ' + \ ''.join('!! ' + line for line in lines) log(log_file_path, program, subject, err, True)
def send(self, service_name, method, *args): import openerp import traceback import xmlrpclib try: result = openerp.netsvc.dispatch_rpc(service_name, method, args) except Exception,e: # TODO change the except to raise LibException instead of their emulated xmlrpc fault if isinstance(e, openerp.osv.osv.except_osv): fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '') elif isinstance(e, openerp.exceptions.Warning): fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '') elif isinstance(e, openerp.exceptions.AccessError): fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '') elif isinstance(e, openerp.exceptions.AccessDenied): fault = xmlrpclib.Fault('AccessDenied', str(e)) elif isinstance(e, openerp.exceptions.DeferredException): info = e.traceback formatted_info = "".join(traceback.format_exception(*info)) fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info) else: info = sys.exc_info() formatted_info = "".join(traceback.format_exception(*info)) fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info) raise fault
def create_authnrequest(self, environ, server_env, start_response, session, acr_value): try: client = session.getClient() session.setAcrValue(client.authorization_endpoint, acr_value) request_args = { "response_type": self.flow_type, "scope": self.extra["scope"], "state": client.state, } if acr_value is not None: request_args["acr_values"] = acr_value if self.flow_type == "token": request_args["nonce"] = rndstr(16) session.setNonce(request_args["nonce"]) else: use_nonce = getattr(self, "use_nonce", None) if use_nonce: request_args["nonce"] = rndstr(16) session.setNonce(request_args["nonce"]) logger.info("client args: %s" % client.__dict__.items(),) logger.info("request_args: %s" % (request_args,)) # User info claims except Exception: message = traceback.format_exception(*sys.exc_info()) logger.error(message) return self.result(environ, start_response, server_env,(False, "Cannot find the OP! Please view your configuration of pyoidc RP.")) try: cis = client.construct_AuthorizationRequest( request_args=request_args) logger.debug("request: %s" % cis) url, body, ht_args, cis = client.uri_and_body( AuthorizationRequest, cis, method="GET", request_args=request_args) logger.debug("body: %s" % body) except Exception: message = traceback.format_exception(*sys.exc_info()) logger.error(message) return self.result(environ, start_response, server_env,(False, "Authorization request can not be performed!")) logger.info("URL: %s" % url) logger.debug("ht_args: %s" % ht_args) #session.setAuthn_auth(client.authorization_endpoint) #session.setAuthentication("VERIFY") #server_env["CACHE"][sid] = session session.setClient(client) resp_headers = [("Location", str(url))] if ht_args: resp_headers.extend([(a, b) for a, b in ht_args.items()]) logger.debug("resp_headers: %s" % resp_headers) start_response("302 Found", resp_headers) return []
def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, append=0, chroot=None, env=None): """Run command with output redirected. If chroot is not None, chroot to the directory specified before running the command.""" pid = os.fork() fd = None if not pid: session._forget() try: if chroot: os.chroot(chroot) if cwd: os.chdir(cwd) flags = os.O_CREAT | os.O_WRONLY if append: flags |= os.O_APPEND fd = os.open(outfile, flags, 0666) os.dup2(fd, 1) if logerror: os.dup2(fd, 2) # echo the command we're running into the logfile os.write(fd, '$ %s\n' % ' '.join(args)) environ = os.environ.copy() if env: environ.update(env) os.execvpe(path, args, environ) except: msg = ''.join(traceback.format_exception(*sys.exc_info())) if fd: try: os.write(fd, msg) os.close(fd) except: pass print msg os._exit(1) else: if chroot: outfile = os.path.normpath(chroot + outfile) outfd = None remotename = os.path.basename(outfile) while True: status = os.waitpid(pid, os.WNOHANG) time.sleep(1) if not outfd: try: outfd = file(outfile, 'r') except IOError: # will happen if the forked process has not created the logfile yet continue except: print 'Error reading log file: %s' % outfile print ''.join(traceback.format_exception(*sys.exc_info())) incremental_upload(session, remotename, outfd, uploadpath) if status[0] != 0: if outfd: outfd.close() return status[1]
def rateAnalysis(request): if request.method == 'POST': print request.POST try: user = User.objects.get(id=request.POST['user_id']) teacher = TeacherProfile.objects.get(user=user) sa = StandardAnalysis.objects.get(id = request.POST['id']) sar, created = StandardAnalysisRating.objects.get_or_create(standard_analysis=sa, rater=teacher, rating_type='All') except: print traceback.format_exception(*sys.exc_info()) return HttpResponseRedirect('/standard/?standard_id='+str(sa.id)) new_rating = int(request.POST['rating']) try: if created: sar.rating = new_rating sar.save() sa.cumulative_rating = (sa.cumulative_rating*sa.number_raters + sar.rating)/(sa.number_raters+1) sa.number_raters+=1 sa.save() elif (sar.rating != new_rating): sa.cumulative_rating = (sa.cumulative_rating*sa.number_raters - sar.rating + new_rating)/sa.number_raters sar.rating = new_rating sar.save() sa.save() except: print traceback.format_exception(*sys.exc_info()) return HttpResponse(str(sa.cumulative_rating))
def record_results(self, resid, uutid): import os, commands import string import traceback #if environment variable IDTYPE == UUID, then use UUID for database entry id, else implicit auto-increment integer is used. tmp = self.dbRes[uutid] self.info("%s" % (tmp)) if (("IDTYPE" in os.environ.keys()) and (os.environ["IDTYPE"]=="UUID")): try: database.VssdVg1a1Sweep( id = (commands.getoutput('uuidgen -t')).strip(), TestResultsID = resid, **tmp ) except: self.info("An error was thrown: %s" % string.join(traceback.format_exception(*sys.exc_info()), '')) else: try: database.VssdVg1a1Sweep( TestResultsID = resid, **tmp ) except: self.info("An error was thrown: %s" % string.join(traceback.format_exception(*sys.exc_info()), ''))
def import_function(import_str): """ Attempts to import the specified class method or regular function, and returns a callable. :raises ImportError if the specified import_str cannot be found. :raises TypeError if the specified import_str is found but is not a callable. """ mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) fn = getattr(sys.modules[mod_str], class_str) if not callable(fn): msg = '{0} is not callable' LOG.error(msg) raise TypeError(msg) except (ValueError, AttributeError): msg = 'Method or function {0} cannot be found.'.format(import_str) err_details = traceback.format_exception(*sys.exc_info()) LOG.error(msg + ' Details: (%s)'.format(err_details)) raise ImportError(msg) except ImportError: msg = 'Module {0} cannot be found.'.format(import_str) err_details = traceback.format_exception(*sys.exc_info()) LOG.error(msg + ' Details: (%s)'.format(err_details)) raise return fn
def _format_multiple_exceptions(e, debug=False): valid_excs = [] excs = list(e.args) while excs: (etype, value, tb) = excs.pop(0) if (etype == fixtures.MultipleExceptions): excs.extend(value.args) elif (etype == fixtures.SetupError): continue else: valid_excs.append((etype, value, tb)) if len(valid_excs) == 1: (etype, value, tb) = valid_excs[0] if debug: LOG.error("".join(traceback.format_exception(etype, value, tb))) else: raise value else: LOG.error("MultipleExceptions raised:") for n, (etype, value, tb) in enumerate(valid_excs): if debug: LOG.error("- exception %d:" % n) LOG.error("".join( traceback.format_exception(etype, value, tb))) else: LOG.error(value)
def __init__(self, device=None, config_path=None, user_path=".", cmd_line=""): """ Create an option object and check that parameters are valid. :param device: The device to use :type device: str :param config_path: The openzwave config directory. If None, try to configure automatically. :type config_path: str :param user_path: The user directory :type user_path: str :param cmd_line: The "command line" options of the openzwave library :type cmd_line: str """ try: if os.path.exists(device): if os.access(device, os.R_OK) and os.access(device, os.W_OK): self._device = device else: import sys, traceback raise ZWaveException("Can't write to device %s : %s" % (device, traceback.format_exception(*sys.exc_info()))) else: import sys, traceback raise ZWaveException("Can't find device %s : %s" % (device, traceback.format_exception(*sys.exc_info()))) except: import sys, traceback raise ZWaveException("Error when retrieving device %s : %s" % (device, traceback.format_exception(*sys.exc_info()))) libopenzwave.PyOptions.__init__(self, config_path=config_path, user_path=user_path, cmd_line=cmd_line) self._user_path = user_path self._config_path = config_path
def makeVideoThumb(fullPath, thumbPath): print "\n", 80*"-", "\ncreating video thumbnail with command: " # must create two frames because of mplayer bug createCmd = MPLAYER_DIR+" -vo jpeg -quiet -frames 2 -ss 5 "+ fullPath print createCmd try: sp.check_call(createCmd.split()) except: print "".join(tb.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])) # delete one of them try: os.remove("00000001.jpg") except: print "".join(tb.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])) # resize it to 300x300 try: im = wx.Image("00000002.jpg") # read the original image if im.Ok(): #the image may be corrupted... im.Rescale(300, 300) # resize it im.SaveFile("00000002.jpg", wx.BITMAP_TYPE_JPEG) # save it back to a file except: print "".join(tb.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])) # rename the right one try: shutil.move("00000002.jpg", thumbPath) except: print "".join(tb.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
def fmt_last_exception(): exc_info = traceback.format_exception(*sys.exc_info()) return ''.join(exc_info)
def handleException(self, dump_info): """ Our own handleException method doing some additional stuff before calling the original python-meh's one. :type dump_info: an instance of the meh.DumpInfo class :see: python-meh's ExceptionHandler.handleException """ log.debug("running handleException") exception_lines = traceback.format_exception(*dump_info.exc_info) log.critical("\n".join(exception_lines)) ty = dump_info.exc_info.type value = dump_info.exc_info.value try: gi.require_version("Gtk", "3.0") from gi.repository import Gtk # XXX: Gtk stopped raising RuntimeError if it fails to # initialize. Horay! But will it stay like this? Let's be # cautious and raise the exception on our own to work in both # cases initialized = Gtk.init_check(None)[0] if not initialized: raise RuntimeError() # Attempt to grab the GUI initializing lock, do not block if not self._gui_lock.acquire(False): # the graphical interface is running, don't crash it by # running another one potentially from a different thread log.debug("Gtk running, queuing exception handler to the " "main loop") GLib.idle_add(self._main_loop_handleException, dump_info) else: log.debug("Gtk not running, starting Gtk and running " "exception handler in it") self._main_loop_handleException(dump_info) except (RuntimeError, ImportError, ValueError): log.debug("Gtk cannot be initialized") # X not running (Gtk cannot be initialized) if threadMgr.in_main_thread(): log.debug("In the main thread, running exception handler") if issubclass(ty, CmdlineError) or not self._interactive: if issubclass(ty, CmdlineError): cmdline_error_msg = _("\nThe installation was stopped due to " "incomplete spokes detected while running " "in non-interactive cmdline mode. Since there " "cannot be any questions in cmdline mode, " "edit your kickstart file and retry " "installation.\nThe exact error message is: " "\n\n%s.\n\nThe installer will now terminate.") % str(value) else: cmdline_error_msg = _("\nRunning in cmdline mode, no interactive debugging " "allowed.\nThe exact error message is: " "\n\n%s.\n\nThe installer will now terminate.") % str(value) # since there is no UI in cmdline mode and it is completely # non-interactive, we can't show a message window asking the user # to acknowledge the error; instead, print the error out and sleep # for a few seconds before exiting the installer print(cmdline_error_msg) time.sleep(10) sys.exit(1) else: print("\nAn unknown error has occured, look at the " "/tmp/anaconda-tb* file(s) for more details") # in the main thread, run exception handler self._main_loop_handleException(dump_info) else: log.debug("In a non-main thread, sending a message with " "exception data") # not in the main thread, just send message with exception # data and let message handler run the exception handler in # the main thread exc_info = dump_info.exc_info hubQ.send_exception((exc_info.type, exc_info.value, exc_info.stack))
else: FP += 1 FPR[key].append(FP / (FP + TN)) TPR[key].append(TP / (TP + FN)) fnameout = os.path.join( outdir, (key + os.path.splitext(os.path.basename(csvfile))[0] + 'roc.dat')) with open(fnameout, 'w') as fout: for n in range(len(FPR[key])): fout.write(str(FPR[key][n]) + ' ' + str(TPR[key][n])) endmsg = pcl.ok(starttime, command=__script__) logger.info(endmsg) return if __name__ == "__main__": import sys import traceback try: main() logger.info(pcl.ok()) sys.exit(0) except Exception as e: if not isinstance(e, SystemExit): msg = "".join(traceback.format_exception(*sys.exc_info())) logger.critical(msg) sys.exit(1)
def format_exc(): """ replace missing format_exc() """ return ''.join(traceback.format_exception(*list(sys.exc_info())))
import traceback db_last_modified = datetime(2000, 1, 1, 0, 0, 0) try: app = Flask(__name__) app.config['DEBUG'] = True app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get( 'DATABASE_URL' ) or "postgresql://*****:*****@localhost:5432/holiday" db = SQLAlchemy(app) CORS(app) except Exception as e: t, v, tb = sys.exc_info() print(traceback.format_exception(t, v, tb)) print(traceback.format_tb(e.__traceback__)) @app.route('/') def index(): print('index()') @after_this_request def d_header(response): response.headers['Last-Modified'] = format_date_time( mktime(db_last_modified.timetuple())) return response return jsonify(ResultSet=read_holidays())
return -2 except: if url.startswith("rtmp"): error = downloadfileRTMP(url, nombrefichero, silent) if error and not silent: from platformcode import platformtools platformtools.dialog_ok("No puedes descargar ese vídeo", "Las descargas en RTMP aún no", "están soportadas") else: import traceback from pprint import pprint exc_type, exc_value, exc_tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_tb) for line in lines: line_splits = line.split("\n") for line_split in line_splits: logger.error(line_split) try: f.close() except: pass if not silent: try: progreso.close() except: pass
def handler(self, event, context): """ An AWS Lambda function which parses specific API Gateway input into a WSGI request, feeds it to our WSGI app, procceses the response, and returns that back to the API Gateway. """ settings = self.settings # If in DEBUG mode, log all raw incoming events. if settings.DEBUG: logger.debug('Zappa Event: {}'.format(event)) # Set any API Gateway defined Stage Variables # as env vars if event.get('stageVariables'): for key in event['stageVariables'].keys(): os.environ[str(key)] = event['stageVariables'][key] # This is the result of a keep alive, recertify # or scheduled event. if event.get('detail-type') == u'Scheduled Event': whole_function = event['resources'][0].split('/')[-1].split( '-')[-1] # This is a scheduled function. if '.' in whole_function: app_function = self.import_module_and_get_function( whole_function) # Execute the function! return self.run_function(app_function, event, context) # Else, let this execute as it were. # This is a direct command invocation. elif event.get('command', None): whole_function = event['command'] app_function = self.import_module_and_get_function(whole_function) result = self.run_function(app_function, event, context) print("Result of %s:" % whole_function) print(result) return result # This is a direct, raw python invocation. # It's _extremely_ important we don't allow this event source # to be overriden by unsanitized, non-admin user input. elif event.get('raw_command', None): raw_command = event['raw_command'] exec(raw_command) return # This is a Django management command invocation. elif event.get('manage', None): from django.core import management try: # Support both for tests from zappa.ext.django_zappa import get_django_wsgi except ImportError as e: # pragma: no cover from django_zappa_app import get_django_wsgi # Get the Django WSGI app from our extension # We don't actually need the function, # but we do need to do all of the required setup for it. app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS) # Couldn't figure out how to get the value into stdout with StringIO.. # Read the log for now. :[] management.call_command(*event['manage'].split(' ')) return {} # This is an AWS-event triggered invokation. elif event.get('Records', None): records = event.get('Records') result = None whole_function = self.get_function_for_aws_event(records[0]) if whole_function: app_function = self.import_module_and_get_function( whole_function) result = self.run_function(app_function, event, context) logger.debug(result) else: logger.error( "Cannot find a function to process the triggered event.") return result # This is an API Gateway authorizer event elif event.get('type') == u'TOKEN': whole_function = self.settings.AUTHORIZER_FUNCTION if whole_function: app_function = self.import_module_and_get_function( whole_function) policy = self.run_function(app_function, event, context) return policy else: logger.error( "Cannot find a function to process the authorization request." ) raise Exception('Unauthorized') # Normal web app flow try: # Timing time_start = datetime.datetime.now() # This is a normal HTTP request if event.get('httpMethod', None): script_name = '' headers = event.get('headers') if headers: host = headers.get('Host') else: host = None if host: if 'amazonaws.com' in host: # The path provided in th event doesn't include the # stage, so we must tell Flask to include the API # stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014 script_name = '/' + settings.API_STAGE else: # This is a test request sent from the AWS console if settings.DOMAIN: # Assume the requests received will be on the specified # domain. No special handling is required pass else: # Assume the requests received will be to the # amazonaws.com endpoint, so tell Flask to include the # API stage script_name = '/' + settings.API_STAGE # Create the environment for WSGI and handle the request environ = create_wsgi_request( event, script_name=script_name, trailing_slash=self.trailing_slash, binary_support=settings.BINARY_SUPPORT, context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS) # We are always on https on Lambda, so tell our wsgi app that. environ['HTTPS'] = 'on' environ['wsgi.url_scheme'] = 'https' environ['lambda.context'] = context # Execute the application response = Response.from_app(self.wsgi_app, environ) # This is the object we're going to return. # Pack the WSGI response into our special dictionary. zappa_returndict = dict() if response.data: if settings.BINARY_SUPPORT: if not response.mimetype.startswith("text/") \ or response.mimetype != "application/json": zappa_returndict['body'] = base64.b64encode( response.data).decode('utf-8') zappa_returndict["isBase64Encoded"] = "true" else: zappa_returndict['body'] = response.data else: zappa_returndict['body'] = response.get_data( as_text=True) zappa_returndict['statusCode'] = response.status_code zappa_returndict['headers'] = {} for key, value in response.headers: zappa_returndict['headers'][key] = value # Calculate the total response time, # and log it in the Common Log format. time_end = datetime.datetime.now() delta = time_end - time_start response_time_ms = delta.total_seconds() * 1000 response.content = response.data common_log(environ, response, response_time=response_time_ms) return zappa_returndict except Exception as e: # pragma: no cover # Print statements are visible in the logs either way print(e) exc_info = sys.exc_info() message = ( 'An uncaught exception happened while servicing this request. ' 'You can investigate this with the `zappa tail` command.') # If we didn't even build an app_module, just raise. if not settings.DJANGO_SETTINGS: try: self.app_module except NameError as ne: message = 'Failed to import module: {}'.format(ne.message) # Return this unspecified exception as a 500, using template that API Gateway expects. content = collections.OrderedDict() content['statusCode'] = 500 body = {'message': message} if settings.DEBUG: # only include traceback if debug is on. body['traceback'] = traceback.format_exception( *exc_info) # traceback as a list for readability. content['body'] = json.dumps(str(body), sort_keys=True, indent=4) return content
def ephemeris_passes(opt, st0, et0): """ DESCRIPTION: Finds passes from the start time to the end time given the options. Will implement a bash script or execute on the command line. USAGE: ephemeris_passes(opt, st0, et0) INPUTS: opt command line arguments st0 unix time start time et0 unix time end time RETURNS: None AFFECTS: Prints all the passes EXCEPTIONS: None DEPENDENCIES: ephem """ passes = {} objects = __read_config__(opt.config) site = __read_config__(opt.site) if opt.verbose: print("# got objects ", objects) print("# got radio site ", site) print("\n") ctime = st0 etime = et0 last_sat_rise = ctime while ctime < etime: obj = get_next_object(opt, site, objects, ctime) obj_id = obj obj_info = objects[obj] if opt.debug: print("# object ", obj_id, " @ ", ctime) print("# obj_info", obj_info) site_name = site["site"]["name"] site_tag = site["site"]["tag"] obs_lat = site["site"]["latitude"] obs_long = site["site"]["longitude"] obs_elev = float(site["site"]["elevation"]) obj_name = obj_info["name"] obj_tle1 = obj_info["tle1"][1:-1] obj_tle2 = obj_info["tle2"][1:-1] obj_freqs = np.array(string.split(obj_info["frequencies"], ","), np.float32) c_dtime = datetime.datetime.utcfromtimestamp(ctime) c_ephem_time = ephem.Date(c_dtime) try: (sat_rise, sat_transit, sat_set) = satellite_rise_and_set( opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, c_ephem_time, ) if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit: continue if not last_sat_rise == sat_rise: ( sub_lat, sub_long, sat_range, sat_velocity, az, el, ra, dec, alt, ) = satellite_values_at_time( opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, sat_transit, ) (obj_bandwidth, obj_doppler) = satellite_bandwidth( opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, sat_rise, sat_set, op.interval, obj_freqs, ) last_sat_rise = sat_rise if opt.debug: print( "time : ", c_ephem_time, sat_set, (sat_set - c_ephem_time) * 60 * 60 * 24, ) ctime = ctime + (sat_set - c_ephem_time) * 60 * 60 * 24 if opt.el_mask: el_val = np.rad2deg(el) el_mask = np.float(opt.el_mask) if opt.debug: print("# el_val ", el_val, " el_mask ", el_mask) if el_val < el_mask: # check mask here! continue # This should really go out as digital metadata into the recording location print("# Site : %s " % (site_name)) print("# Site tag : %s " % (site_tag)) print("# Object Name: %s" % (obj_name)) print( "# observer @ latitude : %s, longitude : %s, elevation : %s m" % (obs_lat, obs_long, obs_elev)) print( "# GMT -- Rise Time: %s, Transit Time: %s, Set Time: %s" % (sat_rise, sat_transit, sat_set)) print("# Azimuth: %f deg, Elevation: %f deg, Altitude: %g km" % (np.rad2deg(az), np.rad2deg(el), alt / 1000.0)) print("# Frequencies: %s MHz, Bandwidth: %s kHz" % ( obj_freqs / 1.0e6, obj_bandwidth[np.argmax(obj_bandwidth)] / 1.0e3, )) pass_md = { "obj_id": obj_id, "rise_time": sat_rise, "transit_time": sat_transit, "set_time": sat_set, "azimuth": np.rad2deg(az), "elevation": np.rad2deg(el), "altitude": alt, "doppler_frequency": obj_doppler, "doppler_bandwidth": obj_bandwidth, } if opt.schedule: d = sat_rise.tuple() rise_time = "%04d%02d%02d_%02d%02d" % (d[0], d[1], d[2], d[3], d[4]) offset_rise = ephem.date(sat_rise - ephem.minute) d = offset_rise.tuple() offset_rise_time = "%04d-%02d-%02dT%02d:%02d:%02dZ" % ( d[0], d[1], d[2], d[3], d[4], int(d[5]), ) offset_set = ephem.date(sat_set + ephem.minute) d = offset_set.tuple() offset_set_time = "%04d-%02d-%02dT%02d:%02d:%02dZ" % ( d[0], d[1], d[2], d[3], d[4], int(d[5]), ) cmd_lines = [] radio_channel = string.split( site["radio"]["channel"][1:-1], ",") radio_gain = string.split(site["radio"]["gain"][1:-1], ",") radio_address = string.split( site["radio"]["address"][1:-1], ",") recorder_channels = string.split( site["recorder"]["channels"][1:-1], ",") radio_sample_rate = site["radio"]["sample_rate"] cmd_line0 = "%s " % (site["recorder"]["command"]) if site["radio"]["type"] == "b210": # just record a fixed frequency, needs a dual radio Thor3 script. This can be done! idx = 0 freq = obj_freqs[1] cmd_line1 = '-r %s -d "%s" -s %s -e %s -c %s -f %4.3f ' % ( radio_sample_rate, radio_channel[idx], offset_rise_time, offset_set_time, recorder_channels[idx], freq, ) log_file_name = "%s_%s_%s_%dMHz.log" % ( site_tag, obj_id, offset_rise_time, int(freq / 1.0e6), ) cmd_fname = "%s_%s_%s_%dMHz" % ( site_tag, obj_id, rise_time, int(freq / 1.0e6), ) cmd_line2 = " -g %s -m %s --devargs num_recv_frames=1024 --devargs master_clock_rate=24.0e6 -o %s/%s" % ( radio_gain[idx], radio_address[idx], site["recorder"]["data_path"], cmd_fname, ) cmd_line2 += " {0}".format(site["radio"].get( "extra_args", "")).rstrip() if not opt.foreground: cmd_line0 = "nohup " + cmd_line0 cmd_line2 = cmd_line2 + " 2>&1 &" else: cmd_line2 = cmd_line2 if opt.debug: print(cmd_line0, cmd_line1, cmd_line2, cmd_fname) cmd_lines.append(( cmd_line0 + cmd_line1 + cmd_line2, cmd_fname, pass_md, obj_info, )) print("\n") elif site["radio"]["type"] == "n200_tvrx2": cmd_line1 = ( ' -r %s -d "%s %s" -s %s -e %s -c %s,%s -f %4.3f,%4.3f ' % ( radio_sample_rate, radio_channel[0], radio_channel[1], offset_rise_time, offset_set_time, recorder_channels[0], recorder_channels[1], obj_freqs[0], obj_freqs[1], )) log_file_name = "%s_%s_%s_combined.log" % ( site_tag, obj_id, offset_rise_time, ) cmd_fname = "%s_%s_%s_combined" % (site_tag, obj_id, rise_time) cmd_line2 = " -g %s,%s -m %s -o %s/%s" % ( radio_gain[0], radio_gain[1], radio_address[0], site["recorder"]["data_path"], cmd_fname, ) cmd_line2 += " {0}".format(site["radio"].get( "extra_args", "")).rstrip() if not opt.foreground: cmd_line0 = "nohup " + cmd_line0 cmd_line2 = cmd_line2 + " 2>&1 &" else: cmd_line2 = cmd_line2 if opt.debug: print(cmd_line0, cmd_line1, cmd_line2, cmd_fname) cmd_lines.append(( cmd_line0 + cmd_line1 + cmd_line2, cmd_fname, pass_md, obj_info, )) print("\n") if opt.foreground: dtstart0 = dateutil.parser.parse(offset_rise_time) dtstop0 = dateutil.parser.parse(offset_set_time) start0 = int((dtstart0 - datetime.datetime( 1970, 1, 1, tzinfo=pytz.utc)).total_seconds()) stop0 = int((dtstop0 - datetime.datetime( 1970, 1, 1, tzinfo=pytz.utc)).total_seconds()) if opt.verbose: print("# waiting for %s @ %s " % (obj_id, offset_rise_time)) while time.time() < start0 - 30: time.sleep(op.interval) if opt.verbose: print("# %d sec" % (start0 - time.time())) for cmd_tuple in cmd_lines: cmd, cmd_fname, pass_md, info_md = cmd_tuple print("# Executing command %s " % (cmd)) # write the digital metadata start_idx = int(start0) mdata_dir = (site["recorder"]["metadata_path"] + "/" + cmd_fname + "/metadata") # site metadata # note we use directory structure for the dictionary here # eventually we will add this feature to digital metadata for k in site: try: os.makedirs(mdata_dir + "/config/%s" % (k)) except: pass md_site_obj = DigitalMetadataWriter( mdata_dir + "/config/%s" % (k), 3600, 60, 1, 1, k) if opt.debug: print(site[k]) if opt.verbose: print("# writing metadata config / %s " % (k)) md_site_obj.write(start_idx, site[k]) # info metadata try: os.makedirs(mdata_dir + "/info") except: pass md_info_obj = DigitalMetadataWriter( mdata_dir + "/info", 3600, 60, 1, 1, "info") if opt.verbose: print("# writing metadata info") if opt.debug: print(info_md) md_info_obj.write(start_idx, info_md) # pass metadata try: os.makedirs(mdata_dir + "/pass") except: pass md_pass_obj = DigitalMetadataWriter( mdata_dir + "/pass", 3600, 60, 1, 1, "pass") if opt.verbose: print("# writing metadata pass") if opt.debug: print(pass_md) md_pass_obj.write(start_idx, pass_md) # sys.exit(1) # call the command try: subprocess.call(cmd, shell=True) except Exception as eobj: exp_str = str(ExceptionString(eobj)) print("exception: %s." % (exp_str)) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception( exc_type, exc_value, exc_traceback) print(lines) print("# wait...") while time.time() < stop0 + 1: time.sleep(op.interval) if opt.verbose: print("# complete in %d sec" % (stop0 - time.time())) except Exception as eobj: exp_str = str(ExceptionString(eobj)) print("exception: %s." % (exp_str)) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) print(lines) # sys.exit(1) # advance 10 minutes ctime = ctime + 60 * op.interval continue
def error_msg(t=None, v=None, tb=None): if not t: t, v, tb = sys.exc_info() text = "".join(traceback.format_exception(t, v, tb)) master.ding(text)
def run_engine(self): """Run the test function many times, on database input and generated input, using the Conjecture engine. """ # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True try: database_key = self.wrapped_test._hypothesis_internal_database_key except AttributeError: if global_force_seed is None: database_key = function_digest(self.test) else: database_key = None runner = ConjectureRunner( self._execute_once_for_engine, settings=self.settings, random=self.random, database_key=database_key, ) # Use the Conjecture engine to run the test function many times # on different inputs. runner.run() note_engine_for_statistics(runner) if runner.call_count == 0: return if runner.interesting_examples: self.falsifying_examples = sorted( runner.interesting_examples.values(), key=lambda d: sort_key(d.buffer), reverse=True, ) else: if runner.valid_examples == 0: raise Unsatisfiable( "Unable to satisfy assumptions of hypothesis %s." % (get_pretty_function_description(self.test), )) if not self.falsifying_examples: return elif not self.settings.report_multiple_bugs: # Pretend that we only found one failure, by discarding the others. del self.falsifying_examples[:-1] # The engine found one or more failures, so we need to reproduce and # report them. self.failed_normally = True flaky = 0 for falsifying_example in self.falsifying_examples: info = falsifying_example.extra_information ran_example = ConjectureData.for_buffer(falsifying_example.buffer) self.__was_flaky = False assert info.__expected_exception is not None try: self.execute_once( ran_example, print_example=not self.is_find, is_final=True, expected_failure=( info.__expected_exception, info.__expected_traceback, ), ) except (UnsatisfiedAssumption, StopTest): report(traceback.format_exc()) self.__flaky( "Unreliable assumption: An example which satisfied " "assumptions on the first run now fails it.") except BaseException as e: if len(self.falsifying_examples) <= 1: # There is only one failure, so we can report it by raising # it directly. raise # We are reporting multiple failures, so we need to manually # print each exception's stack trace and information. tb = get_trimmed_traceback() report("".join(traceback.format_exception(type(e), e, tb))) finally: # pragma: no cover # Mostly useful for ``find`` and ensuring that objects that # hold on to a reference to ``data`` know that it's now been # finished and they shouldn't attempt to draw more data from # it. ran_example.freeze() # This section is in fact entirely covered by the tests in # test_reproduce_failure, but it seems to trigger a lovely set # of coverage bugs: The branches show up as uncovered (despite # definitely being covered - you can add an assert False else # branch to verify this and see it fail - and additionally the # second branch still complains about lack of coverage even if # you add a pragma: no cover to it! # See https://bitbucket.org/ned/coveragepy/issues/623/ if self.settings.print_blob: report(("\nYou can reproduce this example by temporarily " "adding @reproduce_failure(%r, %r) as a decorator " "on your test case") % (__version__, encode_failure(falsifying_example.buffer))) if self.__was_flaky: flaky += 1 # If we only have one example then we should have raised an error or # flaky prior to this point. assert len(self.falsifying_examples) > 1 if flaky > 0: raise Flaky( ("Hypothesis found %d distinct failures, but %d of them " "exhibited some sort of flaky behaviour.") % (len(self.falsifying_examples), flaky)) else: raise MultipleFailures(("Hypothesis found %d distinct failures.") % (len(self.falsifying_examples)))
def eval_macro(name, args, story, state, module_finder=None): """ Evaluates the macro with the given name in the context of the given story, feeding in the given arguments and using the given state. A return-value, updated-state pair is returned. The given module_finder function should accept a module name and return a Story object for that module, or None if the module can't be found. Examples: ```? eval_macro( "if", ["True", "1", "else", "0"], eval( # Fake object with .nodes and .title (without importing story): "[exec('class T: pass\\\\nt = T()\\\\nt.nodes = []" + "\\\\nt.title=\\\"_\\\"'), " + "eval('t')][1]" ), {} ) ```= ( 1, {} ) ```? eval_macro( "if", ["1 - 1", "`nope`", "2 * 0", 'nope', "`a` or False", "`yes`", "else","0"], eval( # Fake object with .nodes and .title (without importing story): "[exec('class T: pass\\\\nt = T()\\\\nt.nodes = []" + "\\\\nt.title=\\\"_\\\"'), " + "eval('t')][1]" ), {} ) ```= ( "yes", {} ) ``` """ state = copy.deepcopy(state) if name in story.nodes: # node-as-macro call node = story.nodes[name] return eval_text(node.content, story, state, args, module_finder) # Must be built-in or from a module: # Try to resolve as a module: if name.count('.') == 1: # From a module module_name, inner_name = name.split('.') if module_name not in story.modules: return error( "Module '{}' for macro '{}' is not included by story '{}'.".format( module_name, name, story.title ), state ) if not module_finder: return error( "Attempt to use module '{}' without a module finder.".format( module_name ), state ) module = module_finder(module_name) if not module: return error("Module '{}' not found.".format(module_name), state) if inner_name not in module.nodes: return error( "Module '{}' doesn't define macro '{}'.".format( module_name, inner_name ), state ) node = module.nodes[inner_name] # TODO: Some way to return a non-string here? return eval_text( node.content, story, state, args, module_finder ) # if it's not a module reference, it might be a builtin? elif name in BUILTINS: # A built-in macro try: return BUILTINS[name](module_finder, story, state, *args) except Exception as e: return error( "Error calling built-in '{}' with arguments:\n{}\nDetails:\n{}" .format( name, '\n'.join(str(a) for a in args), ''.join( traceback.format_exception( type(e), e, e.__traceback__ ) ) ), state ) # otherwise we don't know what this macro is... else: # Unrecognized macro: expands to error text # TODO: Better error text return error("Unrecognized macro '{}'.".format(name), state)
def satellite_bandwidth( opt, obsLat, obsLong, obsElev, objName, tle1, tle2, satRise, satSet, interval, beaconFreq, ): """ DESCRIPTION: The function finds the bandwidth of a satellite pass INPUTS: obsLat (string) = Latitude of Observer in degrees represented as strings obsLong (string) = Longitude of Observer in degrees represented as strings obsElev (float) = Elevation of Observer in meters objName (string) = Name of the satellite tle1 (string) = First line of TLE tle2 (string) = Second line of TLE satRise (string or ephem.date) = The time at which the satellite rises above horizon satSet (string or ephem.date) = The time at which the satellite sets interval (float) = The rate at which to sample during one rise/set cycle in seconds beaconFreq (float) = The frequency of the beacon RETURNS: Param1 (float) = The bandwidth of the satellite during the rise/set cycle Param2 (list) = All the frequencies during the rise/set cycle sampled by given interval AFFECTS: None EXCEPTIONS: None DEPENDENCIES: ephem.date(...), ephem.hour, ephem.minute, ephem.second """ currTime = satRise dopplerFrequencies = [] dopplerBandwidth = [] if opt.debug: print("satellite_bandwidth ", currTime, satSet, interval) while (currTime.triple())[2] < (satSet.triple( ))[2]: # the 2nd index of the returned tuple has the fraction of the day try: ( sublat, sublong, range_val, range_velocity, az, alt, ra, dec, elevation, ) = satellite_values_at_time(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, currTime) (dopplerFreq) = doppler_shift(beaconFreq, range_velocity) dopplerFrequencies.append(dopplerFreq) dopplerBandwidth.append(dopplerFreq - beaconFreq) currTime = currTime + ephem.second * interval currTime = ephem.date(currTime) except Exception as eobj: exp_str = str(ExceptionString(eobj)) print("exception: %s." % (exp_str)) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) print(lines) if opt.debug: print("# DF:", np.array(dopplerFrequencies) / 1e6, " MHz") print("# OB:", np.array(dopplerBandwidth) / 1e3, " kHz") return (np.array(dopplerBandwidth), np.array(dopplerFrequencies))
def run(self): first_run = True while True: try: if first_run: date_a = datetime.now() - (timedelta( seconds=int(sleep_interval) + 300 + int(preload_logs_interval) * 60 * 60)) else: date_a = datetime.now() - (timedelta( seconds=int(sleep_interval) + 300)) first_run = False date_b = datetime.now() list_files = self.get_files_in_interval_dt( pg_log_dir, date_a, date_b) logger.log("Start iteration. Files: " + str(list_files), 'Info') except Exception as e: logger.log("get_files_in_interval_dt error: " + str(e), "Error") db_pg_stat = None try: db_pg_stat = postgresql.open(sys_stat_conn_str) self.init_sys_stat(db_pg_stat) for file_name in list_files: logger.log("Processing... " + file_name, 'Info') fo = open(file_name, "r") str_val = "" log_lines = [] with self.closing_file(fo) as file: for line in file: line_len = len(line) if line_len > int(pg_log_line_max_len): log_lines.append( str(line)[:int(pg_log_line_max_len)]) else: log_lines.append(str(line)) for line in log_lines: str_val += str(line) if len(str_val) < 1: continue links = re.finditer( r"(\,(\w+\.\w+)\,\d+\,\".*?duration\:\s)", str_val) plans = re.finditer( r"duration\:\s\d+((.|,)\d+)?\sms\s\splan\:", str_val) queries = re.finditer( r"duration\:\s\d+((.|,)\d+)?\sms\s\s(?!plan\:)", str_val) dates = re.finditer( r"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])\.([0-9][0-9][0-9])", str_val) map_all = [] dates_list = [] #====================================== for m in links: link_val = str_val[m.span()[0]:m.span( )[1]] #,597f51d5.7ee4,615,"SELECT",2016-05-04 15:27:30 MSK,55/4139,0,LOG,00000,"duration link_res = re.finditer(r"(\w+\.\w+)", link_val) #597f51d5.7ee4 link_res_text = "" for v in link_res: link_res_text = link_val[v.span()[0]:v.span()[1]] link_res = re.finditer(r"(\,\d+\/\d+\,\d+\,)", link_val) #,55/4139,0, for v in link_res: link_res_text += link_val[v.span()[0]:v.span()[1]] map_all.append( ["l", m.span()[0], m.span()[1], link_res_text]) for m in plans: map_all.append([ "p", m.span()[0], m.span()[1], str_val[m.span()[0]:m.span()[1]] ]) for m in queries: map_all.append([ "q", m.span()[0], m.span()[1], str_val[m.span()[0]:m.span()[1]] ]) for m in dates: map_all.append([ "d", m.span()[0], m.span()[1], str_val[m.span()[0]:m.span()[1]] ]) dates_list.append(m.span()[0]) #====================================== map_all = sorted(map_all, key=itemgetter(1)) dates_list = sorted(dates_list) map_stuctured_l1 = [] elem = {} def find_gt(a, x): i = bisect.bisect_right(a, x) if i != len(a): return a[i] return len(str_val) for v in map_all: #['d', 15784614, 15784637, '2016-09-30 12:50:54.433'] if v[0] == 'd': elem['d'] = [v[1], v[2], v[3]] if v[0] == 'l': elem['l'] = [v[1], v[2], v[3]] if v[0] == 'p': elem['p'] = [ v[1], v[2], v[3], find_gt(dates_list, v[2]) ] if v[0] == 'q': elem['q'] = [ v[1], v[2], v[3], find_gt(dates_list, v[2]) ] if 'd' in elem and 'l' in elem and ('p' in elem or 'q' in elem): map_stuctured_l1.append(elem.copy()) elem.clear() map_stuctured_l2 = {} for v in map_stuctured_l1: #key: 59cf0e58.53d4,95/0,0, #value: [{'d_q': [77713, 77736, '2016-09-30 07:24:56.193'], 'q': [77869, 77893, 'duration: 30145.346 ms ', 78813]}, # {'d_p': [78813, 78836, '2016-09-30 07:24:56.217'], 'p': [78969, 78998, 'duration: 30145.336 ms plan:', 85258]}] if 'q' in v: map_stuctured_l2.setdefault(v['l'][2], []).append({ 'd_q': v['d'], 'q': v['q'] }) if 'p' in v: map_stuctured_l2.setdefault(v['l'][2], []).append({ 'd_p': v['d'], 'p': v['p'] }) ps_queries_and_plans = db_pg_stat.prepare(""" INSERT INTO psc_queries_and_plans( dt, duration, db_id, query, plan, io_read_time, cost_v, shared_hit_v, read_v, dirtied_v, total_blks_size ) VALUES ( (($1::text)::timestamp with time zone), $2::double precision, (select psc_get_db($3::text)), $4::text, $5::text, $6::double precision, $7::double precision, $8::bigint, $9::bigint, $10::bigint, (select pg_size_pretty((($8::bigint + $9::bigint + $10::bigint)*8192)::bigint)) );""") ps_queries_and_plans_check = db_pg_stat.prepare(""" select exists( select 1 from psc_queries_and_plans where dt = (($1::text)::timestamp with time zone) and duration = $2::double precision and query = $3::text )""") for key, value in map_stuctured_l2.items(): if sum('d_q' in v for v in value) == 1 and sum( 'd_p' in v for v in value) == 1: query_date = next( v['d_q'] for v in value if 'd_q' in v) #[77713, 77736, '2016-09-30 07:24:56.193'] plan_date = next( v['d_p'] for v in value if 'd_p' in v) #[78813, 78836, '2016-09-30 07:24:56.217'] query = next( v['q'] for v in value if 'q' in v ) #[77869, 77893, 'duration: 30145.346 ms ', 78813] plan = next( v['p'] for v in value if 'p' in v ) #[78969, 78998, 'duration: 30145.336 ms plan:', 85258] txt_plan = str_val[plan_date[0]:plan[3]] txt_query = str_val[query_date[0]:query[3]] #----------------------------------------------------------------------------------- dt = "2000-01-01 00:00:00" dt_str = re.search( r"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])\.([0-9][0-9][0-9])", txt_query) if (dt_str is not None): dt = dt_str.group() #----------------------------------------------------------------------------------- dbname = "unknown" pos_first = txt_query.find('","') if pos_first > 0: dbname = txt_query[ pos_first + 3:txt_query.find('",', pos_first + 3, len(txt_query))] #----------------------------------------------------------------------------------- shared_hit = 0 read = 0 dirtied = 0 io_read_time = 0 cost = 0 duration = 0 duration_str = re.finditer( r"duration\:\s\d+((.|,)\d+)?\sms", txt_plan) cost_str = re.finditer( r"cost\=\d+((.|,)\d+)?\.\.\d+((.|,)\d+)?\s", txt_plan) io_read_time_str = re.finditer( r"I\/O\sTimings\:\sread\=\d+((.|,)\d+)?", txt_plan) shared_hit_str = re.finditer( r"shared\shit\=\d+((.|,)\d+)?", txt_plan) read_str = re.finditer(r"read\=\d+((.|,)\d+)?", txt_plan) dirtied_str = re.finditer( r"dirtied\=\d+((.|,)\d+)?", txt_plan) durations = [] for m in duration_str: sub_str = txt_plan[m.span()[0]:m.span()[1]] val = re.finditer(r"\d+((.|,)\d+)?", sub_str) for vv in val: durations.append( float(sub_str[vv.span()[0]:vv.span( )[1]])) if len(durations) > 0: duration = max(durations) costs = [] for m in cost_str: sub_str = txt_plan[m.span()[0]:m.span()[1]] val = re.finditer(r"\d+((.|,)\d+)?", sub_str) for vv in val: costs.append( float(sub_str[vv.span()[0]:vv.span( )[1]])) if len(costs) > 0: cost = max(costs) shared_hits = [] for m in shared_hit_str: sub_str = txt_plan[m.span()[0]:m.span()[1]] val = re.finditer(r"\d+((.|,)\d+)?", sub_str) for vv in val: shared_hits.append( float(sub_str[vv.span()[0]:vv.span( )[1]])) if len(shared_hits) > 0: shared_hit = max(shared_hits) io_read_times = [] for m in io_read_time_str: sub_str = txt_plan[m.span()[0]:m.span()[1]] val = re.finditer(r"\d+((.|,)\d+)?", sub_str) for vv in val: io_read_times.append( float(sub_str[vv.span()[0]:vv.span( )[1]])) if len(io_read_times) > 0: io_read_time = max(io_read_times) reads = [] for m in read_str: sub_str = txt_plan[m.span()[0]:m.span()[1]] val = re.finditer(r"\d+((.|,)\d+)?", sub_str) for vv in val: reads.append( float(sub_str[vv.span()[0]:vv.span( )[1]])) if len(reads) > 0: if io_read_time in reads: reads.remove(io_read_time) read = max(reads) dirtieds = [] for m in dirtied_str: sub_str = txt_plan[m.span()[0]:m.span()[1]] val = re.finditer(r"\d+((.|,)\d+)?", sub_str) for vv in val: dirtieds.append( float(sub_str[vv.span()[0]:vv.span( )[1]])) if len(dirtieds) > 0: dirtied = max(dirtieds) if float(duration) != 0: if not ps_queries_and_plans_check.first( str(dt), float(duration), str(txt_query)): query_result = ps_queries_and_plans.first(str(dt), float(duration), str(dbname), str(txt_query), \ str(txt_plan), float(io_read_time), float(cost), int(float(shared_hit)), int(float(read)), int(float(dirtied)) ) logger.log("Writed query at " + str(dt), "Info") except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() exc_res = traceback.format_exception(exc_type, exc_value, exc_traceback) exc_text = "" for vv in exc_res: exc_text += '\n' + str(vv) logger.log("Connection db_pg_stat error: " + str(e) + exc_text, "Error") time.sleep(int(sleep_interval)) finally: if db_pg_stat is not None: if not db_pg_stat.closed: db_pg_stat.close() logger.log( 'Finish iteration! Sleep on ' + str(sleep_interval) + " seconds...", "Info") time.sleep(int(sleep_interval))
def ail_import(self): time_start = time.time() self._err_log = '' invoice = self.env['account.invoice'].browse( self._context['active_id']) accounts = self.env['account.account'].search([ ('user_type_id.type', 'not in', ['view', 'consolidation', 'closed']), ('company_id', '=', invoice.company_id.id) ]) self._accounts_dict = {a.code: a.id for a in accounts} self._get_orm_fields() lines, header = self._remove_leading_lines(self.lines) header_fields = csv.reader(StringIO.StringIO(header), dialect=self.dialect).next() self._header_fields = self._process_header(header_fields) reader = csv.DictReader(StringIO.StringIO(lines), fieldnames=self._header_fields, dialect=self.dialect) inv_lines = [] for line in reader: ail_vals = {} # step 1: handle codepage for i, hf in enumerate(self._header_fields): try: line[hf] = line[hf].decode(self.codepage).strip() except: tb = ''.join(format_exception(*exc_info())) raise UserError( _("Wrong Code Page"), _("Error while processing line '%s' :\n%s") % (line, tb)) # step 2: process input fields for i, hf in enumerate(self._header_fields): if i == 0 and line[hf] and line[hf][0] == '#': # lines starting with # are considered as comment lines break if hf in self._skip_fields: continue if line[hf] == '': continue if self._field_methods[hf].get('orm_field'): self._field_methods[hf]['method']( hf, line, invoice, ail_vals, orm_field=self._field_methods[hf]['orm_field']) else: self._field_methods[hf]['method'](hf, line, invoice, ail_vals) if ail_vals: self._process_line_vals(line, invoice, ail_vals) inv_lines.append(ail_vals) vals = [(0, 0, l) for l in inv_lines] vals = self._process_vals(invoice, vals) if self._err_log: self.note = self._err_log module = __name__.split('addons.')[1].split('.')[0] result_view = self.env.ref('%s.ail_import_view_form_result' % module) return { 'name': _("Import File result"), 'res_id': self.id, 'view_type': 'form', 'view_mode': 'form', 'res_model': 'ail.import', 'view_id': result_view.id, 'target': 'new', 'type': 'ir.actions.act_window', } else: invoice.write({'invoice_line_ids': vals}) import_time = time.time() - time_start _logger.warn('account.invoice (id: %s) import time = %.3f seconds', invoice.id, import_time) return {'type': 'ir.actions.act_window_close'}
def last_exception(): """ Returns last exception as a string, for use in logging. """ exc_type, exc_value, exc_traceback = sys.exc_info() return ''.join( traceback.format_exception(exc_type, exc_value, exc_traceback))
b1 = ToggleRadioButton( bpanel, label='Panel', associated_widget=w1 ) b2 = ToggleRadioButton( bpanel, label='Button', associated_widget=w2 ) b3 = ToggleRadioButton( bpanel, label='List', associated_widget=w3 ) bsizer.Add( b1 ) bsizer.Add( b2 ) bsizer.Add( b3 ) wsizer.Add( w1 ) wsizer.Add( w2 ) wsizer.Add( w3 ) sizer.Add( bpanel, 0, wx.EXPAND | wx.ALL, 5 ) sizer.Add( wpanel, 1, wx.EXPAND | wx.ALL, 5 ) self.Show() if __name__ == '__main__': try: app = wx.App() TestFrame(None, title="ToggleRadioButton Test") app.MainLoop() except Exception, e: #traceback.print_exc() #e = sys.exc_info()[0] #print( 'Exception %s', e ) import sys, traceback xc = traceback.format_exception(*sys.exc_info()) wx.MessageBox(''.join(xc))
def log_exception(self, exception): exc_info = sys.exc_info() logger.error(" Exception: %s ".center(79, "#") % str(exception)) logger.error(''.join(traceback.format_exception(*exc_info)))
def _forward( self, data: TikTensorBatch, fut: List[Future], device: torch.device, batch_size: int, increase_batch_size: bool ) -> Tuple[int, bool]: """ :param data: input data to neural network """ self.logger.debug("this is forward") # TODO: Maybe use todevice # TODO(novikov): Move model creation to worker if device.type == "cuda": with torch.cuda.device(device.index): model = self.training_model.__class__(**self.config.get("model_init_kwargs", {})) else: model = self.training_model.__class__(**self.config.get("model_init_kwargs", {})) model.load_state_dict(self.training_model.state_dict()) model.eval() model = model.to(device=device) keys: List = [d.id for d in data] data: List[torch.Tensor] = data.as_torch() self.logger.debug("forward with batch_size %d (increasing: %s)", batch_size, increase_batch_size) start = 0 last_batch_size = batch_size def create_end_generator(start, end, batch_size): for batch_end in range(start + batch_size, end, batch_size): yield batch_end yield end end_generator = create_end_generator(start, len(keys), batch_size) while start < len(keys): end = next(end_generator) try: with torch.no_grad(): pred = model(torch.stack(data[start:end]).to(dtype=torch.float, device=device)).cpu() except Exception as e: if batch_size > last_batch_size: self.logger.info( "forward pass with batch size %d threw exception '%s'. Using previous batch size %d again.", batch_size, e, last_batch_size, ) batch_size = last_batch_size increase_batch_size = False else: last_batch_size = batch_size batch_size //= 2 if batch_size == 0: self.logger.error( "Forward pass with batch size %d threw exception '%s'\nwith traceback: %s. Processed %d/%d", last_batch_size, e, traceback.format_exception(type(e), e, e.__traceback__), start, len(keys), ) for i in range(start, len(keys)): fut[i].set_exception(e) break increase_batch_size = True self.logger.info( "forward pass with batch size %d threw exception '%s'. Trying again with smaller batch_size %d", last_batch_size, e, batch_size, ) end_generator = create_end_generator(start, len(keys), batch_size) else: for i in range(start, end): try: fut[i].set_result(TikTensor(pred[i], id_=keys[i])) except Exception as e: self.logger.error( "start: %s, end: %s, pred: %s, keys: %s, i: %s", start, end, pred.shape, len(keys), i ) self.logger.exception(e) raise e last_batch_size = batch_size if increase_batch_size and end - start >= batch_size: # do not increase batch size after successfully # computing an incomplete batch max_cpu_batch_size = self.config.get("inference_max_cpu_batch_size", 100) if batch_size >= max_cpu_batch_size: self.logger.info("Reached 'inference_max_cpu_batch_size' of %d", max_cpu_batch_size) increase_batch_size = False else: batch_size += 1 end_generator = create_end_generator(end, len(keys), batch_size) start = end return batch_size, increase_batch_size
def tvm_excepthook(exctype, value, trbk): print('\n'.join(traceback.format_exception(exctype, value, trbk))) if hasattr(multiprocessing, 'active_children'): # pylint: disable=not-callable for p in multiprocessing.active_children(): p.terminate()
def okcmd(self): # tixDemo:Status "You have selected the directory" + self.dlist_dir self.quitcmd() def quitcmd(self): self.exit = 0 def mainloop(self): while self.exit < 0: self.root.tk.dooneevent(TCL_ALL_EVENTS) def destroy(self): self.root.destroy() # This "if" statement makes it possible to run this script file inside or # outside of the main demo program "tixwidgets.py". # if __name__ == '__main__': import tkMessageBox, traceback try: root = Tix.Tk() RunSample(root) except: t, v, tb = sys.exc_info() text = "Error running the demo script:\n" for line in traceback.format_exception(t, v, tb): text = text + line + '\n' d = tkMessageBox.showerror('Tix Demo Error', text)
def emit(self, record: logging.LogRecord) -> None: report = {} # type: Dict[str, Any] # This parameter determines whether Zulip should attempt to # send Zulip messages containing the error report. If there's # syntax that makes the markdown processor throw an exception, # we really don't want to send that syntax into a new Zulip # message in exception handler (that's the stuff of which # recursive exception loops are made). # # We initialize is_bugdown_rendering_exception to `True` to # prevent the infinite loop of zulip messages by ERROR_BOT if # the outer try block here throws an exception before we have # a chance to check the exception for whether it comes from # bugdown. is_bugdown_rendering_exception = True try: report['node'] = platform.node() report['host'] = platform.node() add_deployment_metadata(report) if record.exc_info: stack_trace = ''.join( traceback.format_exception(*record.exc_info)) message = str(record.exc_info[1]) is_bugdown_rendering_exception = record.msg.startswith( 'Exception in Markdown parser') else: stack_trace = 'No stack trace available' message = record.getMessage() if '\n' in message: # Some exception code paths in queue processors # seem to result in super-long messages stack_trace = message message = message.split('\n')[0] is_bugdown_rendering_exception = False report['stack_trace'] = stack_trace report['message'] = message report['logger_name'] = record.name report['log_module'] = find_log_caller_module(record) report['log_lineno'] = record.lineno if hasattr(record, "request"): add_request_metadata( report, record.request ) # type: ignore # record.request is added dynamically except Exception: report['message'] = "Exception in preparing exception report!" logging.warning(report['message'], exc_info=True) report['stack_trace'] = "See /var/log/zulip/errors.log" if settings.DEBUG_ERROR_REPORTING: # nocoverage logging.warning("Reporting an error to admins...") logging.warning( "Reporting an error to admins: {} {} {} {} {}".format( record.levelname, report['logger_name'], report['log_module'], report['message'], report['stack_trace'])) try: if settings.STAGING_ERROR_NOTIFICATIONS: # On staging, process the report directly so it can happen inside this # try/except to prevent looping from zerver.lib.error_notify import notify_server_error notify_server_error(report, is_bugdown_rendering_exception) else: queue_json_publish('error_reports', dict( type="server", report=report, )) except Exception: # If this breaks, complain loudly but don't pass the traceback up the stream # However, we *don't* want to use logging.exception since that could trigger a loop. logging.warning("Reporting an exception triggered an exception!", exc_info=True)
msg = " ".join(words) if len(msg) > 0: break try: generateMarkovOgg(msg, g) headers = {'User-Agent': USER_AGENT} files = {"voice": open("markov.ogg", "rb")} bot.sendVoice( _urlopen_hook=lambda u: requests.post( u, headers=headers, files=files).text, chat_id=chat_id) except KeyboardInterrupt as e: raise e except BaseException as e: exc_type, exc_value, exc_traceback = sys.exc_info() print("\n".join( traceback.format_exception( exc_type, exc_value, exc_traceback))) bot.sendMessage(chat_id=chat_id, text="Could not send voice", reply_to_message_id=replyto) else: bot.sendMessage(chat_id=chat_id, text="[Chain is empty]", reply_to_message_id=replyto) if cmd == "/markovttslang": if t in LAST_USER.keys(): if (curtime - LAST_USER[t]) < 1: continue v = " ".join(message.split(" ")[1:]).strip() if v not in LANGS: bot.sendMessage( chat_id=chat_id,
def _set_exc_info(self, exc_info): with self._cond_var: just_set = self.__set(exc_info=exc_info) if just_set: exc_info_str = ''.join(traceback.format_exception(*exc_info)) LOGGER.debug('%a got exception:\n%s', self, exc_info_str)
def handleException(self, exc_type, exc_param, exc_tb): "Exception handler (False to abort)" self.writeline(''.join( traceback.format_exception(exc_type, exc_param, exc_tb))) return True
def format_exception(cls, exc_info): return ''.join(traceback.format_exception( *exc_info)) if exc_info else ''
def log_exception(): "Log a stack trace" exc_type, exc_value, exc_traceback = sys.exc_info() print(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
fileName = os.path.basename(sys.executable) key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_BROWSER_EMULATION") result = winreg.QueryValueEx(key, fileName) print('Key already found with value: {}. Leaving key unchanged.'.format(result)) except FileNotFoundError: try: print('Attempting to set browser emulation key in registry') key = winreg.CreateKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_BROWSER_EMULATION") winreg.SetValueEx(key, fileName, 0, winreg.REG_DWORD, 11001) print('Registry key set') except: print('\mFailed to set registry key. You may need to start the shell with \'Run as Administrator\'.\n') stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) print('Failure in verifyBrowserEmulation: {}'.format(stacktrace)) except: raise ## end verifyBrowserEmulation return if __name__ == '__main__': try: osType = platform.system() verifyAvailableModules(osType) verifyBrowserEmulation(osType) main.main() except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) print('Failure in adminConsole wrapper: {}'.format(stacktrace))
def rescan(self): """Rescan all folders for changed/new/removed modules. The caller should release all references to removed modules. Returns a tuple: (removed, added) """ print_d("Rescanning..") info = {} # get what is there atm for folder in self.__folders: for name, path, deps in get_importables(folder): # take the basename as module key, later modules win info[name] = (path, deps) # python can not unload a module, so we can only add new ones # or reload if the path is the same and mtime changed, # but we can still pretend we removed something removed = [] added = [] # remove those that are gone and changed ones for name, mod in list(self.__modules.items()): # not here anymore, remove if name not in info: del self.__modules[name] removed.append(name) continue # check if any dependency has changed path, new_deps = info[name] if mod.has_changed(new_deps): del self.__modules[name] removed.append(name) self.__failures.clear() # add new ones for (name, (path, deps)) in info.items(): if name in self.__modules: continue try: # add a real module, so that pickle works # https://github.com/quodlibet/quodlibet/issues/1093 parent = "quodlibet.fake" if parent not in sys.modules: spec = importlib.machinery.ModuleSpec(parent, None, is_package=True) sys.modules[parent] = importlib.util.module_from_spec(spec) vars(sys.modules["quodlibet"])["fake"] = sys.modules[parent] mod = load_module(name, parent + ".plugins", dirname(path)) if mod is None: continue except Exception as err: text = format_exception(*sys.exc_info()) self.__failures[name] = ModuleImportError(name, err, text) else: added.append(name) self.__modules[name] = Module(name, mod, deps, path) print_d("Rescanning done: %d added, %d removed, %d error(s)" % (len(added), len(removed), len(self.__failures))) return removed, added
def get_traceback_text(exec_info): return "".join(["-"] * 20 + ["\n\n"] + list(traceback.format_exception(*exec_info)) + ["\n"] + ["-"] * 37)
t_i = time.time() t_this = t_i print "Starting at", current_time() error_log.write("Started at: " + current_time() + "\n") for f in pdfs_not_done: filename = f + ".pdf" print "Processing", filename, "..." error_log.write("Processing " + filename + " at " + current_time() + "\n") try: (l_luxParse, l_cleaner, l_tot) = do_evrytin(filename) move_to_load(f) time_passed_here = int(time.time() - t_this) time_total = int(time.time() - t_i) error_log.write(current_time() + " - " + filename + " done in " + str(time_passed_here) + "\n") error_log.write(" " + str(l_luxParse) + " parser | " + str(l_cleaner) + " cleaner | " + str(l_tot) + " totales\n") print " ", filename, "done in", time_passed_here, "seconds", "| Total time elapsed:", time_total, "[s]" t_this = time.time() except Exception: (type_, value_, tb) = sys.exc_info() error_log.write(current_time() + " - " + repr(value_) + "\n") for line in traceback.format_exception(type_, value_, tb): print line error_log.write(" " + line) error_log.write("-" * 15 + "\n") print "-" * 15