def run_keyword(self, name, args, kwargs=None): args, kwargs = self._handle_binary_args(args, kwargs or {}) result = {'status': 'FAIL'} self._intercept_std_streams() try: return_value = self._get_keyword(name)(*args, **kwargs) except: exc_type, exc_value, exc_tb = sys.exc_info() self._add_to_result(result, 'error', self._get_error_message(exc_type, exc_value)) self._add_to_result(result, 'traceback', self._get_error_traceback(exc_tb)) self._add_to_result(result, 'continuable', self._get_error_attribute(exc_value, 'CONTINUE'), default=False) self._add_to_result(result, 'fatal', self._get_error_attribute(exc_value, 'EXIT'), default=False) else: try: self._add_to_result(result, 'return', self._handle_return_value(return_value)) except: exc_type, exc_value, _ = sys.exc_info() self._add_to_result(result, 'error', self._get_error_message(exc_type, exc_value)) else: result['status'] = 'PASS' self._add_to_result(result, 'output', self._restore_std_streams()) return result
def downloadFile(self, listadapter, *args): try: self.interface.log("ABOUT TO DOWNLOAD FROM " + listadapter.selection[0].text) s = listadapter.selection[0].text i = self.app.context["peers_addr"].index(s) print("INSIDE DOWNLOAD ") key = str(i)+"_"+str(s) if(self.app.context["downloads_available"][str(key)]): peer = self.app.context["downloads_available"][str(key)] print(peer) ##possiamo far partire il download del file destination = (s , int(peer["porta"])) print(destination) print(peer["md5"]) self.connection_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) self.connection_socket.connect(destination) message = "RETR"+str(peer["md5"]) self.connection_socket.send(message) message_type = self.connection_socket.recv(4) num_chunks = self.connection_socket.recv(6) f = open('shared/'+peer["nome"].strip(" "), "wb") if int(num_chunks) > 0 : print("num chunks " + str(num_chunks)) self.interface.progress.max = int(num_chunks) for i in range(int(num_chunks)): len_chunk = self.connection_socket.recv(5) if (int(len_chunk) > 0): self.interface.progress.value = self.interface.progress.value + 1 chunk = self.connection_socket.recv(int(len_chunk)) #f.write(chunk) #print("downloading chunk " + str(len_chunk)) while len(chunk) < int(len_chunk): new_data = self.connection_socket.recv(int(len_chunk)-len(chunk)) #f.write(new_data) chunk = chunk + new_data f.write(chunk) f.close() self.connection_socket.close() self.interface.progress.value = 0 ## scriviamo alla directory che abbiamo finito il download self.connection_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) self.connection_socket.connect(self.directory) message = "DREG" + self.app.context["sessionid"] + peer["md5"] self.connection_socket.send(message) ack = self.connection_socket.recv(4) n_down = self.connection_socket.recv(5) self.interface.log("RECEIVED "+ str(ack)) self.interface.log("#DOWNLOAD " + str(n_down)) self.connection_socket.close() else: print("NOT AVAILABLE") except: print("exception!!") print(sys.exc_info()[0]) print(sys.exc_info()[1]) print(sys.exc_info()[2])
def test_logger(): fn = setup_file('test_logger.log') rep = LogReporter( filename=fn, show_hidden_frames=False) try: int('a') except: exc_data = collector.collect_exception(*sys.exc_info()) else: assert 0 rep.report(exc_data) content = open(fn).read() assert len(content.splitlines()) == 4 assert 'ValueError' in content assert 'int' in content assert 'test_reporter.py' in content assert 'test_logger' in content try: 1 / 0 except: exc_data = collector.collect_exception(*sys.exc_info()) else: assert 0 rep.report(exc_data) content = open(fn).read() print content assert len(content.splitlines()) == 8 assert 'ZeroDivisionError' in content
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = requests.get(url, headers=h) r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = requests.get(r, headers=h) r = BeautifulSoup(r.content, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class':'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: print("Unexpected error in Furk Script: check_api", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def __init__(self, body=None, delivery_tag=None, content_type=None, content_encoding=None, delivery_info=None, properties=None, headers=None, postencode=None, accept=None, channel=None, **kwargs): delivery_info = {} if not delivery_info else delivery_info self.errors = [] if self.errors is None else self.errors self.channel = channel self.delivery_tag = delivery_tag self.content_type = content_type self.content_encoding = content_encoding self.delivery_info = delivery_info self.headers = headers or {} self.properties = properties or {} self._decoded_cache = None self._state = 'RECEIVED' self.accept = accept compression = self.headers.get('compression') if not self.errors and compression: try: body = decompress(body, compression) except Exception: self.errors.append(sys.exc_info()) if not self.errors and postencode and isinstance(body, text_t): try: body = body.encode(postencode) except Exception: self.errors.append(sys.exc_info()) self.body = body
def _handleConnection(self, sock): try: if sock is None: return csock, caddr = sock.accept() if pyro4.config.COMMTIMEOUT: csock.settimeout(pyro4.config.COMMTIMEOUT) except socket.error: x = sys.exc_info()[1] err = getattr(x, "errno", x.args[0]) if err in socketutil.ERRNO_RETRIES: # just ignore this error for now and continue log.warning("accept() failed errno=%d, shouldn't happen", err) return None if err in socketutil.ERRNO_BADF or err in socketutil.ERRNO_ENOTSOCK: # our server socket got destroyed raise errors.ConnectionClosedError("server socket closed") raise try: conn = socketutil.SocketConnection(csock) if self.daemon._handshake(conn): return conn except: # catch all errors, otherwise the event loop could terminate ex_t, ex_v, ex_tb = sys.exc_info() tb = util.formatTraceback(ex_t, ex_v, ex_tb) log.warning("error during connect/handshake: %s; %s", ex_v, "\n".join(tb)) try: csock.shutdown(socket.SHUT_RDWR) except (OSError, socket.error): pass csock.close() return None
def testMarkExceptionChange(self): state = State(self.tempdir, 'repo') state._gmtime = lambda: (2012, 8, 13, 12, 15, 0, 0, 0, 0) state.markStarted() try: raise ValueError("the same exception") except: exType, exValue, exTraceback = exc_info() state.markException(exType, exValue, "9999/9999/9999/9999") state.close() self.assertEquals({"changedate": "2012-08-13 12:15:00", "status": "Error", "message": "the same exception"}, jsonLoad(open(join(self.tempdir, 'repo.running')))) state = State(self.tempdir, 'repo') state._gmtime = lambda: (2012, 8, 13, 12, 17, 0, 0, 0, 0) state.markStarted() try: raise ValueError("the same exception") except: exType, exValue, exTraceback = exc_info() state.markException(exType, exValue, "9999/9999/9999/9999") state.close() self.assertEquals({"changedate": "2012-08-13 12:15:00", "status": "Error", "message": "the same exception"}, jsonLoad(open(join(self.tempdir, 'repo.running')))) state = State(self.tempdir, 'repo') state._gmtime = lambda: (2012, 8, 13, 12, 19, 0, 0, 0, 0) state.markStarted() try: raise ValueError("the other exception") except: exType, exValue, exTraceback = exc_info() state.markException(exType, exValue, "9999/9999/9999/9999") state.close() self.assertEquals({"changedate": "2012-08-13 12:19:00", "status": "Error", "message": "the other exception"}, jsonLoad(open(join(self.tempdir, 'repo.running'))))
def send(self, data, flags=0, timeout=timeout_default): sock = self._sock if timeout is timeout_default: timeout = self.timeout try: return sock.send(data, flags) except error: ex = sys.exc_info()[1] if ex.args[0] != EWOULDBLOCK or timeout == 0.0: raise sys.exc_clear() try: self._wait(self._write_event) except error: ex = sys.exc_info()[1] if ex.args[0] == EBADF: return 0 raise try: return sock.send(data, flags) except error: ex2 = sys.exc_info()[1] if ex2.args[0] == EWOULDBLOCK: return 0 raise
def main(): #for p in range(1,intGetMaxPage +1): #soup = BeautifulSoup() try: resp = urllib2.urlopen(getUrl,timeout=10) soup = BeautifulSoup(resp) soup = soup.find('div' ,{'id':'prodlist'}) #for k in soup.findAll("div", {'class': 'p-name'}): # 抓< div class='p=name'>...< /div> for k in soup.findAll('a', href=True): try: url = k.get('href') print k.text print url page_url = homeUrl + url print page_url resp_text_page = urllib2.urlopen(homeUrl + url, timeout=10) soup_text_page = BeautifulSoup(resp_text_page) contextPageUrl(soup_text_page,page_url) except: print "Unexpected error:", sys.exc_info()[0] print "Unexpected error:", sys.exc_info()[1] continue except: #continue print "Unexpected error:", sys.exc_info()[0] print "Unexpected error:", sys.exc_info()[1] pass
def sortnums(self, model, iter1, iter2, nums): try: ret = 0 (n, grid) = nums a = self.liststore[grid].get_value(iter1, n) b = self.liststore[grid].get_value(iter2, n) if 'f' in self.cols_to_show[n][4]: try: a = float(a) except: a = 0.0 try: b = float(b) except: b = 0.0 if n == 0 and grid == 1: #make sure it only works on the starting hands a1,a2,a3 = ranks[a[0]], ranks[a[1]], (a+'o')[2] b1,b2,b3 = ranks[b[0]], ranks[b[1]], (b+'o')[2] if a1 > b1 or ( a1 == b1 and (a2 > b2 or (a2 == b2 and a3 > b3) ) ): ret = 1 else: ret = -1 else: if a < b: ret = -1 elif a == b: ret = 0 else: ret = 1 #print "n =", n, "iter1[n] =", self.liststore[grid].get_value(iter1,n), "iter2[n] =", self.liststore[grid].get_value(iter2,n), "ret =", ret except: err = traceback.extract_tb(sys.exc_info()[2]) print _("***sortnums error: ") + str(sys.exc_info()[1]) print "\n".join( [e[0]+':'+str(e[1])+" "+e[2] for e in err] ) return(ret)
def nested(*managers): exits = [] vars = [] exc = (None, None, None) try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): # PEP 3109 e = exc[0](exc[1]) e.__traceback__ = e[2] raise e
def run(self, result=None): orig_result = result if result is None: result = self.defaultTestResult() startTestRun = getattr(result, 'startTestRun', None) if startTestRun is not None: startTestRun() self._resultForDoCleanups = result result.startTest(self) if getattr(self.__class__, "__unittest_skip__", False): # If the whole class was skipped. try: result.addSkip(self, self.__class__.__unittest_skip_why__) finally: result.stopTest(self) return testMethod = getattr(self, self._testMethodName) try: success = False try: self.setUp() except SkipTest, e: result.addSkip(self, str(e)) except Exception: result.addError(self, sys.exc_info()) else: try: testMethod() except self.failureException: result.addFailure(self, sys.exc_info()) except _ExpectedFailure, e: result.addExpectedFailure(self, e.exc_info) except _UnexpectedSuccess: result.addUnexpectedSuccess(self)
def _marshaled_dispatch(self, address, data): params, method = xmlrpclib.loads(data) if not self.instance.check_acls(address, method): raise XMLRPCACLCheckException try: if '.' not in method: params = (address, ) + params response = self.instance._dispatch(method, params, self.funcs) # py3k compatibility if type(response) not in [bool, str, list, dict]: response = (response.decode('utf-8'), ) else: response = (response, ) raw_response = xmlrpclib.dumps(response, methodresponse=True, allow_none=self.allow_none, encoding=self.encoding) except xmlrpclib.Fault: fault = sys.exc_info()[1] raw_response = xmlrpclib.dumps(fault, methodresponse=True, allow_none=self.allow_none, encoding=self.encoding) except: err = sys.exc_info() self.logger.error("Unexpected handler error", exc_info=1) # report exception back to server raw_response = xmlrpclib.dumps( xmlrpclib.Fault(1, "%s:%s" % (err[0].__name__, err[1])), methodresponse=True, allow_none=self.allow_none, encoding=self.encoding) return raw_response
def buildDicts(n): cleaner = Cleaner() cleaner.javascript = True cleaner.style = True i = 0 tagsDict = set() while (i < n): if (os.path.isfile("spam/%d.txt" % i)): try: readInFile = open("spam/%d.txt" % i) content = readInFile.read() noSymbols = re.sub('[^A-Za-z-]+', ' ', content.lower()) # noSymbols is stripped of symbols tags = set(noSymbols.split()) # allCopy is the set of words without symbols tagsDict = tagsDict.union(tags) except Exception, err: print traceback.format_exc() print sys.exc_info()[0] if (os.path.isfile("notspam/%d.txt" % i)): try: readInFile = open("notspam/%d.txt" % i) content = readInFile.read() noSymbols = re.sub('[^A-Za-z-]+', ' ', content.lower()) # noSymbols is stripped of symbols tags = set(noSymbols.split()) # allCopy is the set of words without symbols tagsDict = tagsDict.union(tags) except Exception, err: print traceback.format_exc() print sys.exc_info()[0]
def tokenize(n, tagsDict): cleaner = Cleaner() cleaner.javascript = True cleaner.style = True i = 0 df = pandas.DataFrame(columns=[list(tagsDict)]) while (i < n): allVector = {} if (os.path.isfile("spam/%d.txt" % i)): try: for word in tagsDict: allVector[word] = 0 readInFile = open("spam/%d.txt" % i) content = readInFile.read() noSymbols = re.sub('[^A-Za-z-]+', ' ', content.lower()) # noSymbols is stripped of symbols allCopy = noSymbols.split() # allCopy is the set of words without symbols for tag in allCopy: df.ix[i[tag]] = df.ix[i[tag]] + 1 df.ix[i['isSpam']] = 'spam' except Exception, err: print traceback.format_exc() print sys.exc_info()[0] i = i + 1
def exp_render_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = openerp.registry(db).cursor() try: result, format = openerp.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True
def write(self,text,last=False): try: raise "Dummy" except: lineText = str(sys.exc_info()[2].tb_frame.f_back.f_lineno) codeObject = sys.exc_info()[2].tb_frame.f_back.f_code fileName = codeObject.co_filename funcName = codeObject.co_name if not hasattr(self,'text'): self.text="" self.text+=text lines=self.text.split("\n") linestoprint=lines if last else lines[:-1] self.text=lines[-1] for l in linestoprint: r=self.func(l.strip(),fileName,lineText,funcName) if r.strip()!="": self.get_origOut().write(r+"\n")
def __call__(self, environ, start_response): try: return self.app(environ, start_response) except: traceback.print_exception(*sys.exc_info()) pdb.post_mortem(sys.exc_info()[2]) raise
def testScript(): OSHVResult = ObjectStateHolderVector() DebugMode = 'false' DateParsePattern = 'EEE MMM dd HH:mm:ss z yyyy' userExtDir = 'E:\\data\\Desktop\\Pull_From_Remedy_backup\\' if (DebugMode != None): DebugMode = DebugMode.lower() if DebugMode == "true": logger.info ('[NOTE] UCMDB Integration is running in DEBUG mode. No data will be pushed to the destination server.') return filePathDir = userExtDir + 'TQLExport\\Atrium\\results\\' directory = File(filePathDir) files = directory.listFiles() try: for file in files: if file != None or file != '': builder = SAXBuilder () doc = builder.build(file) logger.info("Start processing CIs to update in the destination server...") allObjects = doc.getRootElement().getChild('data').getChild('objects').getChildren('Object') (objVector, ciDict) = processObjects(allObjects, DateParsePattern) OSHVResult.addAll(objVector) logger.info("Start processing Relationships to update in the destination server...") allLinks = doc.getRootElement().getChild('data').getChild('links').getChildren('link') linkVector = processLinks(allLinks, ciDict) OSHVResult.addAll(linkVector) print OSHVResult.toXmlString() except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) logger.info('Failure in processing data %s' % stacktrace) logger.info('Ending Push to UCMDB') #testScript()
def getAnyChecksum(self, info, username=None, password=None, session=None, is_source=0): """ returns checksum info of available packages also does an existance check on the filesystem. """ log_debug(3) pkg_infos = info.get('packages') channels = info.get('channels', []) force = info.get('force', 0) orgid = info.get('org_id') if orgid == 'null': null_org = 1 else: null_org = None if not session: org_id, force = rhnPackageUpload.authenticate(username, password, channels=channels, null_org=null_org, force=force) else: try: org_id, force = rhnPackageUpload.authenticate_session( session, channels=channels, null_org=null_org, force=force) except rhnSession.InvalidSessionError: raise_with_tb(rhnFault(33), sys.exc_info()[2]) except rhnSession.ExpiredSessionError: raise_with_tb(rhnFault(34), sys.exc_info()[2]) if is_source: ret = self._getSourcePackageChecksum(org_id, pkg_infos) else: ret = self._getPackageChecksum(org_id, pkg_infos) return ret
def _read_raw_athena(filename): """try to read athena project file as plain text, to determine validity """ # try gzip text = None try: fh = GzipFile(filename) text = bytes2str(fh.read()) except Exception: errtype, errval, errtb = sys.exc_info() text = None finally: fh.close() if text is None: # try plain text file try: fh = open(filename, 'r') text = bytes2str(fh.read()) except Exception: errtype, errval, errtb = sys.exc_info() text = None finally: fh.close() return text
def exception_info(current_filename=None, index=-1): "Analizar el traceback y armar un dict con la info amigable user-friendly" # guardo el traceback original (por si hay una excepción): info = sys.exc_info() # exc_type, exc_value, exc_traceback # importante: no usar unpacking porque puede causar memory leak if not current_filename: # genero un call stack para ver quien me llamó y limitar la traza: # advertencia: esto es necesario ya que en py2exe no tengo __file__ try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back current_filename = os.path.normpath(os.path.abspath(f.f_code.co_filename)) # extraer la última traza del archivo solicitado: # (útil para no alargar demasiado la traza con lineas de las librerías) ret = {'filename': "", 'lineno': 0, 'function_name': "", 'code': ""} try: for (filename, lineno, fn, text) in traceback.extract_tb(info[2]): if os.path.normpath(os.path.abspath(filename)) == current_filename: ret = {'filename': filename, 'lineno': lineno, 'function_name': fn, 'code': text} else: print filename except Exception, e: pass
def sendData(sock, data): """ Send some data over a socket. Some systems have problems with ``sendall()`` when the socket is in non-blocking mode. For instance, Mac OS X seems to be happy to throw EAGAIN errors too often. This function falls back to using a regular send loop if needed. """ if sock.gettimeout() is None: # socket is in blocking mode, we can use sendall normally. try: sock.sendall(data) return except socket.timeout: raise TimeoutError("sending: timeout") except socket.error: x=sys.exc_info()[1] raise ConnectionClosedError("sending: connection lost: "+str(x)) else: # Socket is in non-blocking mode, use regular send loop. retrydelay=0.0 while data: try: sent = sock.send(data) data = data[sent:] except socket.timeout: raise TimeoutError("sending: timeout") except socket.error: x=sys.exc_info()[1] err=getattr(x, "errno", x.args[0]) if err not in ERRNO_RETRIES: raise ConnectionClosedError("sending: connection lost: "+str(x)) time.sleep(0.00001+retrydelay) # a slight delay to wait before retrying retrydelay=__nextRetrydelay(retrydelay)
def create_pplan(self, topologyName, pplan): """ create physical plan """ if not pplan or not pplan.IsInitialized(): raise StateException("Physical Plan protobuf not init properly", StateException.EX_TYPE_PROTOBUF_ERROR), None, sys.exc_info()[2] path = self.get_pplan_path(topologyName) LOG.info("Adding topology: {0} to path: {1}".format( topologyName, path)) pplanString = pplan.SerializeToString() try: self.client.create(path, value=pplanString, makepath=True) return True except NoNodeError: raise StateException("NoNodeError while creating pplan", StateException.EX_TYPE_NO_NODE_ERROR), None, sys.exc_info()[2] except NodeExistsError: raise StateException("NodeExistsError while creating pplan", StateException.EX_TYPE_NODE_EXISTS_ERROR), None, sys.exc_info()[2] except ZookeeperError: raise StateException("Zookeeper while creating pplan", StateException.EX_TYPE_ZOOKEEPER_ERROR), None, sys.exc_info()[2] except Exception: # Just re raise the exception. raise
def create_execution_state(self, topologyName, executionState): """ create execution state """ if not executionState or not executionState.IsInitialized(): raise StateException("Execution State protobuf not init properly", StateException.EX_TYPE_PROTOBUF_ERROR), None, sys.exc_info()[2] path = self.get_execution_state_path(topologyName) LOG.info("Adding topology: {0} to path: {1}".format( topologyName, path)) executionStateString = executionState.SerializeToString() try: self.client.create(path, value=executionStateString, makepath=True) return True except NoNodeError: raise StateException("NoNodeError while creating execution state", StateException.EX_TYPE_NO_NODE_ERROR), None, sys.exc_info()[2] except NodeExistsError: raise StateException("NodeExistsError while creating execution state", StateException.EX_TYPE_NODE_EXISTS_ERROR), None, sys.exc_info()[2] except ZookeeperError: raise StateException("Zookeeper while creating execution state", StateException.EX_TYPE_ZOOKEEPER_ERROR), None, sys.exc_info()[2] except Exception: # Just re raise the exception. raise
def editar_campania(cid, nombre_empresa, ruc, text_content, fecha_publicacion, img_banner, url_banner, subject_email): m = get_mailchimp_api() campania = mailchimp.Campaigns(m) direccion = get_lan_ip() img_banner = img_banner[1:] try: result = campania.update(cid, 'content', {'sections': { 'banner_img': '<a target="_blank" href="{0}"><img src="http://{1}/{2}" /></a>'.format(url_banner, direccion, img_banner), 'text_content': text_content, }}) except: print ('Error al editar campania:', sys.exc_info()[0]) try: result = campania.update(cid, 'options', {'list_id': '25be04f5d4', 'subject': subject_email, 'from_email': '*****@*****.**', 'from_name': 'Innobee - Facturación electrónica', 'to_name': 'Subscriptores Innobee Portal', 'template_id': 111625, 'title': 'campania---{0}---{1}'.format(nombre_empresa, hoy), 'tracking': {'opens': True, 'html_clicks': True, 'text_clicks': True} },) print result campania.schedule(cid, fecha_publicacion) except: print ('Error al editar campania:', sys.exc_info()[0])
def run(self): """Starts or resumes the generator, running until it reaches a yield point that is not ready. """ if self.running or self.finished: return try: self.running = True while True: future = self.future if not future.done(): return self.future = None try: orig_stack_contexts = stack_context._state.contexts exc_info = None try: value = future.result() except Exception: self.had_exception = True exc_info = sys.exc_info() if exc_info is not None: yielded = self.gen.throw(*exc_info) exc_info = None else: yielded = self.gen.send(value) if stack_context._state.contexts is not orig_stack_contexts: self.gen.throw( stack_context.StackContextInconsistentError( 'stack_context inconsistency (probably caused ' 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: self.finished = True self.future = _null_future if self.pending_callbacks and not self.had_exception: # If we ran cleanly without waiting on all callbacks # raise an error (really more of a warning). If we # had an exception then some callbacks may have been # orphaned, so skip the check in that case. raise LeakedCallbackError( "finished without waiting for callbacks %r" % self.pending_callbacks) self.result_future.set_result(_value_from_stopiteration(e)) self.result_future = None self._deactivate_stack_context() return except Exception: self.finished = True self.future = _null_future self.result_future.set_exc_info(sys.exc_info()) self.result_future = None self._deactivate_stack_context() return if not self.handle_yield(yielded): return finally: self.running = False
def handle(self, *args, **options): def get_tmdb_ids(): def get_args(): if args: movie_id = args[0] try: batch = args[1] except: batch = False else: movie_id = None batch = None return movie_id, batch movie_id, batch = get_args() movies = Movie.objects.all() if movie_id is not None: if batch: movies = movies.filter(pk__gte=movie_id) else: movies = movies.filter(pk=movie_id) return movies.values_list("tmdb_id", flat=True) for tmdb_id in get_tmdb_ids(): try: print add_movie_to_db(tmdb_id, True) except TMDBRequestInvalid: print "Movie id - %d" % Movie.objects.get(tmdb_id=tmdb_id).id print sys.exc_info()[1]
def _handle_request_exception(self, e): if not isinstance(e, Interruption): return tornado.web.RequestHandler._handle_request_exception(self, e) # copy of tornado.web.RequestHandler._handle_request_exception # but remove exception report if isinstance(e, tornado.web.Finish): # Not an error; just finish the request without logging. if not self._finished: self.finish() return # this is not an error # do not report exception # self.log_exception(*sys.exc_info()) if self._finished: # Extra errors after the request has been finished should # be logged, but there is no reason to continue to try and # send a response. return if isinstance(e, tornado.web.HTTPError): if e.status_code not in tornado.httputil.responses and not e.reason: gen_log.error("Bad HTTP status code: %d", e.status_code) else: self.send_error(e.status_code, exc_info=sys.exc_info()) return self.send_error(500, exc_info=sys.exc_info())
def nested(*managers): # Code from `contextlib` exits = [] vars = [] exc = (None, None, None) try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): # Don't rely on sys.exc_info() still containing # the right information. Another exception may # have been raised and caught by an exit method raise exc[1].with_traceback(exc[2])
def get_user(self, user: str) -> Union[UserAccessMeta, None]: try: return self._from_db(self._auth_collection.find_one({"user": user})) except: logging.error("Get user(%s) Error: %s", user, sys.exc_info()[1]) return None
def getToken(self, username: str, password: str, terminalId: str, clientIp: str): try: with orm.db_session: passwordhash = hashlib.sha512( str(password).encode('utf-8')).hexdigest() query = Users.select(lambda u: u.Username == str(username) and u.Password == str(passwordhash)) mylist = list(query) response = "" if len(mylist) > 0: if mylist[0].IsActive: token = uuid.uuid4().hex tokens = Tokens(Token=token, UserID=mylist[0].UserID, GenerationDate=datetime.now(), ClientIP=clientIp) orm.commit() resp = exceptionHandling.getErrorMessage('SEC00') response = { "token": token, "RetCode": resp[0], "RetMsg": resp[1], "RetMsgFa": resp[2] } systemLog.InsertInfoLog( resp[0], 'getToken', '{"username":"******"","password":"******"}', datetime.now(), token, terminalId, clientIp, resp[1]) else: resp = exceptionHandling.getErrorMessage('SEC03') response = { "RetCode": resp[0], "RetMsg": resp[1], "RetMsgFa": resp[2] } systemLog.InsertErrorLog( resp[0], 'getToken', '{"username":"******"","password":"******"}', datetime.now(), None, terminalId, clientIp, resp[1], None) else: resp = exceptionHandling.getErrorMessage('SEC04') response = { "RetCode": resp[0], "RetMsg": resp[1], "RetMsgFa": resp[2] } systemLog.InsertErrorLog( resp[0], 'getToken', '{"username":"******"","password":"******"}', datetime.now(), None, terminalId, clientIp, resp[1], None) return response except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] error = str(e) + "-(Filename:" + str(fname) + ", LineNo:" + str( exc_tb.tb_lineno) + ")" resp = exceptionHandling.getErrorMessage('SYS500') response = { "RetCode": resp[0], "RetMsg": resp[1], "RetMsgFa": resp[2] } systemLog.InsertErrorLog( resp[0], 'getToken', '{"username":"******"","password":"******"}', datetime.now(), None, terminalId, clientIp, resp[1], error) return response
def message(): s9_ip = config.miner_ip s9_port = int(config.port_number) size = 4096 twilio_id = config.twilio_id twilio_token = config.twilio_token if twilio_id == None: error = 'Valid twilio_id required. Messaging feature will not work.' log.error(error) print(error) if twilio_token == None: error = 'Valid twilio_token required. Messaging feature will not work.' log.error(error) print(error) text_source = config.text_source text_consumer = config.text_consumer if text_source == None: error = 'Valid text_source required. Messaging feature will not work.' log.error(error) print(error) if text_consumer == None: error = 'Valid text_consumer required. Messaging feature will not work.' log.error(error) print(error) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(5) sock.connect((s9_ip, s9_port)) msg = json.dumps({'command': 'stats+summary'}) sock.sendto(msg.encode(), (s9_ip, s9_port)) response = '' while 1: chunk = sock.recv(size).decode("ascii").rstrip(' \t\r\n\0') if chunk: response += chunk else: break if '}{' in response: # For some reason the json returned from the api call is invalid. This should fix it. response = response.replace('}{', '},{') # Send shutdown so the socket closes the connection cleanly. sock.shutdown(socket.SHUT_RDWR) # close the socket. sock.close() # Decode the response. response_decoded = json.loads(response) except: error = sys.exc_info()[0] log.error(error) print(error) try: client = Client(twilio_id, auth_token) temp1 = response_decoded['stats'][0]['STATS'][1]['temp2_6'] temp2 = response_decoded['stats'][0]['STATS'][1]['temp2_7'] temp3 = response_decoded['stats'][0]['STATS'][1]['temp2_8'] status1 = response_decoded['stats'][0]['STATS'][1]['chain_acs6'] status2 = response_decoded['stats'][0]['STATS'][1]['chain_acs7'] status3 = response_decoded['stats'][0]['STATS'][1]['chain_acs8'] rate1 = response_decoded['stats'][0]['STATS'][1]['chain_rate6'] rate2 = response_decoded['stats'][0]['STATS'][1]['chain_rate7'] rate3 = response_decoded['stats'][0]['STATS'][1]['chain_rate8'] found_block = response_decoded['summary'][0]['SUMMARY'][0]['Found Blocks'] avg_rate = response_decoded['summary'][0]['SUMMARY'][0]['GHS av'] # Log data in hopes of seeing trends. # Can also be used to see what temps maximize hash speed. data = '{} {} {} {} {} {} {}'.format( str(temp1).ljust(3), str(rate1).ljust(8), str(temp2).ljust(3), str(rate2).ljust(8), str(temp3).ljust(3), str(rate3).ljust(8), str(avg_rate)) data_logger.info(data) # Check for bad chips. for status in [status1.lower(), status2.lower(), status3.lower()]: if 'x' in status: ct = 0 for chip in status: if chip == 'x': ct += 1 if ct <= 1: log.warning('Bad chip detected') else: msg = 'Bad chips detected!!!' log.critical(msg) txt_msg = "{}\n".format(msg) sent_message = client.messages \ .create( body=txt_msg, from_=text_source, to=text_consumer ) print(sent_message.sid) log.info(sent_message.sid) # Check for overheating. if temp1 >= 80 \ or temp2 >= 80 \ or temp3 >= 80: # Send temperature twilio message txt_msg = ("Miner is too hot!\n" "temp2_6: {}\u00B0 C ({}\u00B0 F)\n" "temp2_7: {}\u00B0 C ({}\u00B0 F)\n" "temp2_8: {}\u00B0 C ({}\u00B0 F)" ).format(temp1, c_to_f_conversion(temp1), \ temp2, c_to_f_conversion(temp2), \ temp3, c_to_f_conversion(temp3) ) sent_message = client.messages \ .create( body=txt_msg, from_=text_source, to=text_consumer ) print(sent_message.sid) log.info(sent_message.sid) # Check if we found a block. if found_block > 0: # log that we found it. msg = 'WE FOUND A BLOCK!!!!' log.info(msg) except: # Catch all errors. exc_type, exc_obj, exc_tb = sys.exc_info() error = '{}, Line {}'.format(exc_type, exc_tb.tb_lineno) log.error(error) print(error)
def backtest(self): i = 0 cash=10000 while i < len(self.s.df)-1: try: #Look for Buy Signal if self.s.df[self.buy_signal][i] == 1: buy_date = self.s.df.index[i] #Buy buy_price = self.s.df['close'][i] stocks = math.floor(cash / buy_price) cash = cash%buy_price print(buy_date) print("Buy: " + str(buy_price)) #Cannot buy and sell on the same day. Skips to next day to look for exit. i= i+1 #Look for exit signal while i < len(self.s.df): #Set stop loss and take profit +- 5 * atr of previous day atr = self.s.df['atr'][i-1] stop_loss = self.s.df['close'][i-1] - atr * 5 take_profit = self.s.df['close'][i-1] + atr* 5 #len = #Look for stop loss if self.s.df['close'][i] < stop_loss or self.s.df['low'][i] < stop_loss: sell_price = stop_loss comment = "Stop loss!" break #Look for take profit elif self.s.df['close'][i] > take_profit or self.s.df['high'][i] > take_profit: sell_price = take_profit comment = "Take profit!" break #Look for sell signal elif self.s.df[self.sell_signal][i] == 1: comment = "Exit signal!" sell_price = self.s.df['close'][i] break #Look for last day elif i == len(self.s.df)-1: comment = "End of sequence" sell_price = self.s.df['close'][i] break else: i=i+1 sell_date = self.s.df.index[i] cash = cash + (sell_price*stocks) self.result.trades.append(BacktestingTrade(buy_price, sell_price, buy_date, sell_date, cash, stocks, comment)) print(self.s.df.index[i]) print("Sell: " + str(sell_price)) print("Profit: " + str((sell_price*stocks)-(buy_price*stocks))) stocks = 0 print("Cash: " + str(cash)) print(" --------- ") #Hvis det ikke er mer penger igjen if(cash<=0): print("No funds!") break i=i+1 except: error = sys.exc_info() pprint(error) i=i+1 #Selg aksjer hvis det er noe igjen if stocks > 0: sell_price = self.s.df['close'][i-1] cash = cash + (sell_price*stocks) print(self.s.df.index[i-1]) self.result.trades.append(BacktestingTrade(buy_price, sell_price, buy_date, sell_date, cash, stocks, "Selling remaining")) print("Sell: " + str(sell_price)) print("Profit: " + str((sell_price*stocks)-(buy_price*stocks))) stocks = 0 print("Cash: " + str(cash)) print(" --------- ")
im = Image.open(filepath) title = description(filepath, im) if monochrome and im.mode not in ["1", "L"]: im.draft("L", im.size) im = im.convert("L") if printerArgs: p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE) fp = p.stdin else: fp = sys.stdout ps = PSDraw.PSDraw(fp) ps.begin_document() ps.setfont("Helvetica-Narrow-Bold", 18) ps.text((letter[0], letter[3] + 24), title) ps.setfont("Helvetica-Narrow-Bold", 8) ps.text((letter[0], letter[1] - 30), VERSION) ps.image(letter, im) ps.end_document() if printerArgs: fp.close() except: print("cannot print image", end=' ') print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
def receiveMessage(self, snmpEngine, transportDomain, transportAddress, wholeMsg): """Message dispatcher -- de-serialize message into PDU""" # 4.2.1.1 snmpInPkts, = self.mibInstrumController.mibBuilder.importSymbols( '__SNMPv2-MIB', 'snmpInPkts' ) snmpInPkts.syntax += 1 # 4.2.1.2 try: restOfWholeMsg = null # XXX fix decoder non-recursive return msgVersion = verdec.decodeMessageVersion(wholeMsg) except error.ProtocolError: snmpInASNParseErrs, = self.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs') snmpInASNParseErrs.syntax += 1 return null # n.b the whole buffer gets dropped debug.logger & debug.flagDsp and debug.logger('receiveMessage: msgVersion %s, msg decoded' % msgVersion) messageProcessingModel = msgVersion try: mpHandler = snmpEngine.messageProcessingSubsystems[int(messageProcessingModel)] except KeyError: snmpInBadVersions, = self.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInBadVersions') snmpInBadVersions.syntax += 1 return restOfWholeMsg # 4.2.1.3 -- no-op # 4.2.1.4 try: (messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, pduType, sendPduHandle, maxSizeResponseScopedPDU, statusInformation, stateReference) = mpHandler.prepareDataElements( snmpEngine, transportDomain, transportAddress, wholeMsg ) debug.logger & debug.flagDsp and debug.logger('receiveMessage: MP succeded') except error.StatusInformation: statusInformation = sys.exc_info()[1] if 'sendPduHandle' in statusInformation: # Dropped REPORT -- re-run pending reqs queue as some # of them may be waiting for this REPORT debug.logger & debug.flagDsp and debug.logger( 'receiveMessage: MP failed, statusInformation %s, forcing a retry' % statusInformation) self.__expireRequest( statusInformation['sendPduHandle'], self.__cache.pop(statusInformation['sendPduHandle']), snmpEngine, statusInformation ) return restOfWholeMsg except PyAsn1Error: debug.logger & debug.flagMP and debug.logger('receiveMessage: %s' % (sys.exc_info()[1],)) snmpInASNParseErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs') snmpInASNParseErrs.syntax += 1 return restOfWholeMsg debug.logger & debug.flagDsp and debug.logger('receiveMessage: PDU %s' % PDU.prettyPrint()) # 4.2.2 if sendPduHandle is None: # 4.2.2.1 (request or notification) debug.logger & debug.flagDsp and debug.logger('receiveMessage: pduType %s' % pduType) # 4.2.2.1.1 processPdu = self.getRegisteredApp(contextEngineId, pduType) # 4.2.2.1.2 if processPdu is None: # 4.2.2.1.2.a snmpUnknownPDUHandlers, = self.mibInstrumController.mibBuilder.importSymbols('__SNMP-MPD-MIB', 'snmpUnknownPDUHandlers') snmpUnknownPDUHandlers.syntax += 1 # 4.2.2.1.2.b statusInformation = { 'errorIndication': errind.unknownPDUHandler, 'oid': snmpUnknownPDUHandlers.name, 'val': snmpUnknownPDUHandlers.syntax } debug.logger & debug.flagDsp and debug.logger('receiveMessage: unhandled PDU type') # 4.2.2.1.2.c try: (destTransportDomain, destTransportAddress, outgoingMessage) = mpHandler.prepareResponseMessage( snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference, statusInformation ) snmpEngine.transportDispatcher.sendMessage( outgoingMessage, destTransportDomain, destTransportAddress ) except PySnmpError: debug.logger & debug.flagDsp and debug.logger( 'receiveMessage: report failed, statusInformation %s' % sys.exc_info()[1]) else: debug.logger & debug.flagDsp and debug.logger('receiveMessage: reporting succeeded') # 4.2.2.1.2.d return restOfWholeMsg else: snmpEngine.observer.storeExecutionContext( snmpEngine, 'rfc3412.receiveMessage:request', dict(transportDomain=transportDomain, transportAddress=transportAddress, wholeMsg=wholeMsg, messageProcessingModel=messageProcessingModel, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pdu=PDU) ) # pass transport info to app (legacy) if stateReference is not None: self.__transportInfo[stateReference] = ( transportDomain, transportAddress ) # 4.2.2.1.3 processPdu(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference) snmpEngine.observer.clearExecutionContext( snmpEngine, 'rfc3412.receiveMessage:request' ) # legacy if stateReference is not None: del self.__transportInfo[stateReference] debug.logger & debug.flagDsp and debug.logger('receiveMessage: processPdu succeeded') return restOfWholeMsg else: # 4.2.2.2 (response) # 4.2.2.2.1 cachedParams = self.__cache.pop(sendPduHandle) # 4.2.2.2.2 if cachedParams is None: snmpUnknownPDUHandlers, = self.mibInstrumController.mibBuilder.importSymbols('__SNMP-MPD-MIB', 'snmpUnknownPDUHandlers') snmpUnknownPDUHandlers.syntax += 1 return restOfWholeMsg debug.logger & debug.flagDsp and debug.logger( 'receiveMessage: cache read by sendPduHandle %s' % sendPduHandle) # 4.2.2.2.3 # no-op ? XXX snmpEngine.observer.storeExecutionContext( snmpEngine, 'rfc3412.receiveMessage:response', dict(transportDomain=transportDomain, transportAddress=transportAddress, wholeMsg=wholeMsg, messageProcessingModel=messageProcessingModel, securityModel=securityModel, securityName=securityName, securityLevel=securityLevel, contextEngineId=contextEngineId, contextName=contextName, pdu=PDU) ) # 4.2.2.2.4 processResponsePdu = cachedParams['cbFun'] processResponsePdu(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, statusInformation, cachedParams['sendPduHandle'], cachedParams['cbCtx']) snmpEngine.observer.clearExecutionContext( snmpEngine, 'rfc3412.receiveMessage:response' ) debug.logger & debug.flagDsp and debug.logger('receiveMessage: processResponsePdu succeeded') return restOfWholeMsg
def write_fic_to_csv(fandom, fic_id, only_first_chap, storywriter, chapterwriter, errorwriter, storycolumns, chaptercolumns, header_info='', output_dirpath='', write_whole_fics=False): ''' fandom is the grouping that determines filenames etc. fic_id is the AO3 ID of a fic, found every URL /works/[id]. writer is a csv writer object the output of this program is a row in the CSV file containing all metadata and the fic content itself. header_info should be the header info to encourage ethical scraping. write_whole_fics: Whether to write whole fic output (True) or by default (False), will write separate files for chapters ''' tqdm.write('Scraping {}'.format(fic_id)) get_comments = True url = 'http://archiveofourown.org/works/' + str( fic_id) + '?view_adult=true' if not only_first_chap: url = url + '&view_full_work=true' if get_comments: url = url + '&show_comments=true' headers = {'user-agent': header_info} src = robust_get(url, headers) soup = BeautifulSoup(src, 'lxml') if (access_denied(soup)): print('Access Denied') open("err_" + str(fic_id) + ".err.txt", "w").write(src) error_row = [fic_id] + ['Access Denied'] errorwriter.writerow(error_row) else: meta = soup.find("dl", class_="work meta group") (series, seriespart, seriesid) = get_series(meta) tags = get_tags(meta) stats = get_stats(meta) title = unidecode(soup.find("h2", class_="title heading").string).strip() author = unidecode(soup.find(class_="byline").text).strip() try: href = soup.find(class_="byline").find("a")["href"] author_key = href.split("/")[2] author_pseudo = href.split("/")[4] except Exception as e: print('Unexpected error getting authorship: ', sys.exc_info()[0]) error_row = [fic_id] + [sys.exc_info()[0]] errorwriter.writerow(error_row) author_key = author author_pseudo = author #get the fic itself content = soup.find("div", id="chapters") #chapters = content.findAll("div", id=re.compile('^chapter-')) chapnodes = content.findAll("div", id=re.compile('^chapter-')) if len(chapnodes) == 0: chapnodes = soup.findAll("div", id="chapters") chapter_titles = [t.h3.text.strip() for t in chapnodes] chapters = [ch.find("div", class_="userstuff") for ch in chapnodes] #content.findAll("div", class_="userstuff") #id=re.compile('^chapter-')) #chapters = content.findAll("div", class_="userstuff") #id=re.compile('^chapter-')) #chapter_titles = [unidecode(t.find("h3").text).strip() for t in content.findAll("div", class_="preface")] #if len(chapter_titles) == 0: # chapter_titles = [title] st_summary = "" st_preface_notes = "" st_afterword_notes = "" for preface in soup.find_all("div", class_="preface"): if "afterword" in preface.attrs['class']: try: st_afterword_notes = into_text(preface.find("blockquote")) except: pass elif "chapter" not in preface.attrs['class']: try: st_preface_notes = into_text( preface.find("div", class_="notes").find("blockquote")) except: pass try: st_summary = into_text( preface.find("div", class_="summary").find("blockquote")) except: pass strow = { "fic_id": fic_id, "title": title.encode("utf-8"), "summary": st_summary.encode("utf-8"), "preface_notes": st_preface_notes.encode("utf-8"), "afterword_notes": st_afterword_notes.encode("utf-8"), "series": series, "seriespart": seriespart, "seriesid": seriesid, "author": author_pseudo.encode("utf-8"), "author_key": author_key.encode("utf-8"), "additional tags": tags["freeform"], "chapter_count": len(chapters) } strow = dict(strow, **tags) strow = dict(strow, **stats) #storywriter.writerow([safe(maybe_json(strow.get(k,"null"))) for k in storycolumns]) storywriter.writerow( [maybe_json(strow.get(k, "null")) for k in storycolumns]) # get div class=notes under div class=preface, and under div class=afterword; class-level notes # get div class=summary under div class=preface outlines = [] for ch, chall in enumerate(chapters): chapter_title = chapter_titles[ch] paras = [ t.text if type(t) is bs4.element.Tag else t for t in into_chunks(chall) ] #paras = [unidecode(t).strip() for t in paras if len(t.strip()) > 0 and t.strip() != "Chapter Text"] paras = [ t.strip() for t in paras if len(t.strip()) > 0 and t.strip() != "Chapter Text" ] ch_preface_notes = "" ch_summary = "" ch_afterword_notes = "" chapnode = chapnodes[ch] try: ch_summary = into_text( chapnode.find("div", class_="preface").find( "div", id="summary").find("blockquote")) except: pass try: ch_preface_notes = into_text( chapnode.find("div", class_="preface").find( "div", id="notes").find("blockquote")) except: pass try: ch_afterword_notes = into_text( chapnode.find("div", class_="end").find("blockquote")) except: pass # div class=end notes --> id=notes chrow = { "fic_id": fic_id, "title": title, "summary": ch_summary, "preface_notes": ch_preface_notes, "afterword_notes": ch_afterword_notes, "chapter_num": str(ch + 1), "chapter_title": chapter_title, "paragraph_count": len(paras) } chapterwriter.writerow( [chrow.get(k, "null") for k in chaptercolumns]) if not write_whole_fics: content_out = csv.writer( open(contentfile(output_dirpath, fandom, fic_id, ch + 1), "w")) content_out.writerow( ['fic_id', 'chapter_id', 'para_id', 'text']) for pn, para in enumerate(paras): try: #content_out.writerow([fic_id, ch+1, pn+1, para.encode("utf-8")]) content_out.writerow([fic_id, ch + 1, pn + 1, para]) except: print('Unexpected error: ', sys.exc_info()[0]) pdb.set_trace() error_row = [fic_id] + [sys.exc_info()[0]] errorwriter.writerow(error_row) content_out = None else: # will write out whole fic at once for pn, para in enumerate(paras): outlines.append([fic_id, ch + 1, pn + 1, para]) if write_whole_fics: content_out = csv.writer( open(contentfile(output_dirpath, fandom, fic_id, None), "w")) content_out.writerow(['fic_id', 'chapter_id', 'para_id', 'text']) for line in outlines: content_out.writerow(line) tqdm.write('Done.') tqdm.write(' ')
class Rectangle(Rectangle): \"\"\" Random documentation \"\"\" def __init__(self, width, height, x=0, y=0, id=None): \"\"\" Random documentation \"\"\" if x == 0 and y == 0 and id is None: width, height = height, width super().__init__(width, height, x, y, id) """ with open(file_path_to_update, "w") as file: file.write(new_content) # run tests nb_tests, passing = run_unittest() if nb_tests <= 0: print("No test found") if passing: print("No test found for this case") except: print("An error occured... {}".format(sys.exc_info()[0])) # rollback file if os.path.exists(file_path_updated): if os.path.exists(file_path_to_update): os.remove(file_path_to_update) os.rename(file_path_updated, file_path_to_update) print("OK", end="")
from testtools.matchers import ( AfterPreprocessing, Equals, MatchesDict, MatchesListwise, ) from testtools import runtest # Importing to preserve compatibility. safe_hasattr # GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle try: raise Exception except Exception: an_exc_info = sys.exc_info() # Deprecated: This classes attributes are somewhat non deterministic which # leads to hard to predict tests (because Python upstream are changing things. class LoggingResult(TestResult): """TestResult that logs its event to a list.""" def __init__(self, log): self._events = log super().__init__() def startTest(self, test): self._events.append(('startTest', test)) super().startTest(test) def stop(self):
def get_users(self) -> List[UserAccessMeta]: try: return [self._from_db(doc) for doc in self._auth_collection.find()] except: logging.error("Get Users Error: %s", sys.exc_info()[1]) return []
def process_recognition_failure(self): try: self.client.setRecognitionState('failure') except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
for line in filetoread: gps = GPSString(line) if verbose >=3: gps.debug = verbose try: gps.identify() # populates gps.id except NotImplementedError: if verbose >= 1: sys.stderr.write('Unrecognized NMEA string: %s\n' % gps.msg) continue except: eprint("Unexpected error:", sys.exc_info()[0]) raise if gps.debug: print('String Type: ' + gps.id) 'Only handle string specified' if gps.id != stringtype: continue '''Since GPS NMEA strings have no date, we have to create one. If the data is timestaped with an ISO format time, then use that. If not, use the system time.''' # This will die silently if there is not a pc timestamp or if it of unsupported type. PCtime = gps.stripisotime()
def _eventLoop(self): try: self.eventLoop() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
def process_recognition_other(self, words): try: self.client.setRecognitionState('success') except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
elif int(search_term) not in graph_dict: return 'not valid' input_tweet = int(search_term) #compute outside of compute_sim b/c it's used as input in get_tweets low = compute_sim(input_tweet, graph_dict, tweet_ids, hashtag_ids, scores) if low == 'no similar': return 'no similar' low_tweets = get_tweets(graph_dict, input_tweet, tweet_ids, low) return low_tweets def compute_sim_hashtags(search_term, graph_dict, tweet_ids, hashtag_ids, scores): if search_term.isdigit() == False: return 'not valid' elif int(search_term) not in graph_dict: return 'not valid' input_tweet = int(search_term) low = compute_sim(input_tweet, graph_dict, tweet_ids, hashtag_ids, scores) if low == 'no similar': return 'no similar' input_tweet_hashtags = graph_dict[input_tweet] low_hashtags = get_hashtags(graph_dict, input_tweet_hashtags, hashtag_ids, input_tweet, low) return low_hashtags if __name__ == '__main__': try: main() except: type, value, tb = sys.exc_info() traceback.print_exc() pdb.post_mortem(tb)
def _process_begin(self, executable, title, handle): try: self.client.setRecognitionState('thinking') except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
"description": description, "images" : images, "videos": videos, } products_array.append(item) print("Scraped " + str(counter) + " of " + str( len( item_urls ) ) ) counter += 1 except Exception as err: skipped_urls.append(url) print("============================================") print("SKIPPED #{} of {}".format( str(counter), str( len(item_urls) ) ) ) print("URL: {}".format(url)) print("Error:") print("line: " + str(sys.exc_info()[-1].tb_lineno) ) print(err) print("============================================") counter +=1 # -------------------------------------------------------------------------------- # SERIALIZE THE PRODUCT DATA: # -------------------------------------------------------------------------------- # Make dictionary from array for subsequent steps product_dictionary = dict() [product_dictionary.update({item['sku']:item}) for item in products_array] # -------------------------------------------------------------------------------- # COMPARE MISSING / ADDED ITEMS AND LOG THEM:
def _execute(self, data=None): try: self.dclient.onMatch(self.grammarString, data) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
'parse_uint': ["0", "1234567890987654321hehehey", "1"], 'parse_int': [ "0", "1234567890987654321hehehey", "-1dasda", "-eedea", "-123123123", "1" ], 'string_equals': ['ashdb asdhabs dahb', ' ', '', "asd"], 'string_equals not equals': ['ashdb asdhabs dahb', ' ', '', "asd"] } if __name__ == "__main__": found_error = False for t in tests: for arg in inputs[t.name]: if not found_error: try: print ' testing', t.name, 'on "' + arg + '"' res = t.perform(arg) if res: print ' [', colored(' ok ', 'green'), ']' else: print '* [ ', colored('fail', 'red'), ']' found_error = True except: print '* [ ', colored( 'fail', 'red'), '] with exception', sys.exc_info()[0] found_error = True if found_error: print 'Not all tests have been passed' else: print colored("Good work, all tests are passed", 'green')
def trans_config(filename, cv=None, done_list=None, config_type=1): """通过对应的数据表 生成相应的配置""" rs = [] xl = openpyxl.load_workbook( filename=filename) # 仅支持xlsx use_iterators=True import game_config config_name_list_raw = game_config.config_name_list config_name_list = zip([i[0] for i in config_name_list_raw if i[3]], [i[4] for i in config_name_list_raw if i[3]]) done_list = done_list or [] cv = cv or ConfigVersionManager.get_config_version_obj( config_type=config_type) need_private_city_test = False for sheet in xl.worksheets: config_name = '' sheet_title = sheet.title.strip() if 'map' in sheet_title or 'title_detail' in sheet_title: need_private_city_test = True # 特殊处理 tt = sheet_title.split('_') if len(tt) == 2 and tt[1].isdigit(): if tt[0] == 'map': upload_map_status = upload_map(tt[1], sheet, config_type=config_type) if upload_map_status: cv.update_single('map', hex_version=upload_map_status) continue elif tt[0] == 'middle': upload_middle_map_data_status = upload_middle_map_data( tt[1], sheet, config_type=config_type) if upload_middle_map_data_status: cv.update_single('middle_map_data', hex_version=upload_middle_map_data_status) done_list.append(sheet_title) continue # 处理box_reward_ if len(tt) == 3 and '%s_%s' % (tt[0], tt[1]) == 'box_reward': upload_box_reward_status = upload_box_reward( int(tt[2]), sheet, config_type=config_type) if upload_box_reward_status: cv.update_single('box_reward_new', hex_version=upload_box_reward_status) done_list.append(sheet_title) continue # 特殊处理 for i in config_name_list: if sheet_title in i: config_name = i[0] if not config_name: continue if config_name == 'guide': upload_guide_status = upload_guide(config_name, sheet, config_type=config_type) if upload_guide_status: cv.update_single(config_name, hex_version=upload_guide_status) done_list.append(sheet_title) continue data = xls_convert.to_pyobj(sheet) # 针对在config_templates里指明的label进行内容逻辑测试 # 含'guide', 'middle', 'map', 'box_reward'的表在获得本函数使用的data前,已进行预处理。 # 如要对上述表进行检测,需阅读预处理代码,避免潜在冲突 has_bug = content_logic_check(config_name, sheet_title, data) if has_bug: return has_bug, 'bug', need_private_city_test try: str_config = xls_convert.to_config_string(config_name, data) except Exception, e: etype, value, tb = sys.exc_info() line = traceback.format_exception_only(etype, value) raise KeyError("table=", sheet_title, line) try: d = eval(str_config) except Exception, e: file_dir = os.path.join(_settings.BASE_ROOT, 'logs') if not os.path.exists(file_dir): os.makedirs(file_dir) error_file = os.path.join(file_dir, 'config_error.py') with open(error_file, 'w') as f: f.write('# encoding: utf-8 \n') f.write('# config_name: %s \n' % config_name) f.write(str_config.encode('utf-8')) traceback() raise e
def StartRun(self, request, _context): if self._shutdown_once_executions_finish_event.is_set(): return api_pb2.StartRunReply( serialized_start_run_result=serialize_dagster_namedtuple( StartRunResult( success=False, message= "Tried to start a run on a server after telling it to shut down", serializable_error_info=None, ))) try: execute_run_args = check.inst( deserialize_json_to_dagster_namedtuple( request.serialized_execute_run_args), ExecuteExternalPipelineArgs, ) run_id = execute_run_args.pipeline_run_id recon_pipeline = self._recon_pipeline_from_origin( execute_run_args.pipeline_origin) except: # pylint: disable=bare-except return api_pb2.StartRunReply( serialized_start_run_result=serialize_dagster_namedtuple( StartRunResult( success=False, message=None, serializable_error_info= serializable_error_info_from_exc_info(sys.exc_info()), ))) event_queue = multiprocessing.Queue() termination_event = multiprocessing.Event() execution_process = multiprocessing.Process( target=start_run_in_subprocess, args=[ request.serialized_execute_run_args, recon_pipeline, event_queue, termination_event, ], ) with self._execution_lock: execution_process.start() self._executions[run_id] = ( execution_process, execute_run_args.instance_ref, ) self._termination_events[run_id] = termination_event success = None message = None serializable_error_info = None while success is None: time.sleep(EVENT_QUEUE_POLL_INTERVAL) # We use `get_nowait()` instead of `get()` so that we can handle the case where the # execution process has died unexpectedly -- `get()` would hang forever in that case try: dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait( ) except queue.Empty: if not execution_process.is_alive(): # subprocess died unexpectedly success = False message = ( "GRPC server: Subprocess for {run_id} terminated unexpectedly with " "exit code {exit_code}".format( run_id=run_id, exit_code=execution_process.exitcode, )) serializable_error_info = serializable_error_info_from_exc_info( sys.exc_info()) else: if isinstance(dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful): success = True elif isinstance(dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete): continue if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage): success = False message = dagster_event_or_ipc_error_message_or_done.message serializable_error_info = ( dagster_event_or_ipc_error_message_or_done. serializable_error_info) # Ensure that if the run failed, we remove it from the executions map before # returning so that CanCancel will never return True if not success: with self._execution_lock: self._clear_run(run_id) return api_pb2.StartRunReply( serialized_start_run_result=serialize_dagster_namedtuple( StartRunResult( success=success, message=message, serializable_error_info=serializable_error_info, )))
def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get('preload_content', True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] <https://github.com/shazow/urllib3/issues/651> release_this_conn = release_conn # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. if self.scheme == 'http': headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) if is_new_proxy_conn: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request(conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw['request_method'] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib(httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw) # Everything went great! clean_exit = True except queue.Empty: # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") except (TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False if isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError('Connection aborted.', e) retries = retries.increment(method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]) retries.sleep() # Keep track of the error for the retry warning. err = e finally: if not clean_exit: # We hit some kind of exception, handled or otherwise. We need # to throw the connection away unless explicitly told not to. # Close the connection, set the variable to None, and make sure # we put the None back in the pool to avoid leaking it. conn = conn and conn.close() release_this_conn = True if release_this_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. self._put_conn(conn) if not conn: # Try again log.warning("Retrying (%r) after connection " "broken by '%r': %s", retries, err, url) return self.urlopen(method, url, body, headers, retries, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) def drain_and_release_conn(response): try: # discard any remaining response body, the connection will be # released back to the pool once the entire response is read response.read() except (TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError) as e: pass # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = 'GET' try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: # Drain and release the connection for this response, since # we're not returning it to be released manually. drain_and_release_conn(response) raise return response # drain and return the connection to the pool before recursing drain_and_release_conn(response) retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) # Check if we should retry the HTTP response. has_retry_after = bool(response.getheader('Retry-After')) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_status: # Drain and release the connection for this response, since # we're not returning it to be released manually. drain_and_release_conn(response) raise return response # drain and return the connection to the pool before recursing drain_and_release_conn(response) retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) return response
def award_summaries_to_html(self, awardsdf): soup = BeautifulSoup("", "html.parser") table = Tag(soup, name="table") table["class"] = "blueTable" table["id"] = "divawardsum" table["style"] = "text-align: left;" columns = ["Award", "Players", "Pep talk"] tr = Tag(soup, name="tr") table.append(tr) for col in columns: th = Tag(soup, name="th") tr.append(th) th.append(col) rank_cols, ranked_cols, unranked_cols, inverse_ranked_cols = self.awards.ranked_column_types( ) #print(rank_cols, ranked_cols, unranked_cols, inverse_ranked_cols) for col in rank_cols: col_value = col.replace("_rank", "") if col in inverse_ranked_cols: if (awardsdf[col].max() == 0): continue #do nothing if no one achieved anything result = awardsdf.loc[awardsdf[ awardsdf[col] == awardsdf[col].max()].index, col_value] elif col in ranked_cols: if (awardsdf[col].min() == 5): continue #do nothing if no one achieved anything result = awardsdf.loc[awardsdf[ awardsdf[col] == awardsdf[col].min()].index, col_value] elif col in unranked_cols: result = awardsdf.loc[awardsdf[col_value] == awardsdf[col_value].max(), col_value] elif col == "RankPts_rank": "We'll give it a pass this time" else: print( "[!] Warning: something left over in awards table. Add it to HTMLReport.ranked_column_types and awards_order: " + col) try: if col_value in ["Rounds"]: # do not award "Nothing" else: #content += self.award_info.awards[col_value].render(result.index.values, result.values.min()) tr = Tag(soup, name="tr") table.append(tr) cells = self.award_info.awards[col_value].get_award_row( result.index.values, result.values.min()) for i in range(0, 3): text = cells[i] td = Tag(soup, name="td") if i == 1: #people string patch if len(text) > 25: td["title"] = text text = text[0:25] + "..." td.insert(1, text) tr.append(td) table.append(tr) except: print("[!] Summary award failed!") import sys print(sys.exc_info()) print("[!] Columns being processed: " + col_value + " and " + col) print("[!] Award dataframe:") print(awardsdf[[col_value, col]]) soup.append(table) return soup
def RunScript(self, handler, logging_handler, main_method=''): """Run the specified handler in the target environment. The target environment will be installed prior to and removed after script execution. The script itself will appear to be the __main__ module while it is executed. Args: handler: A str specifying the path to a python file in the tree or WSGI application logging_handler: A logging.Handler to be installed during script execution. The logging level will temporarily be set to DEBUG. main_method: python code to be appended to the file source and executed. This can be used to automatically run a function within the compiled source. Raises: ScriptNotFoundError: if the specified path does not refer to a known file (either in the external or target file systems). """ self._SetUp() try: sys.modules.pop('__main__', None) # force a reload of __main__ # prevent mimic's main.py from masking the user's main.py in the case # where the user's app.yaml script handler specifies a script which # imports main sys.modules.pop('main', None) # force a reload of appengine_config.py, which gets automatically loaded # before mimic's main code sys.modules.pop('appengine_config', None) logger = logging.getLogger() if logging_handler: logger.addHandler(logging_handler) saved_level = logger.level logger.setLevel(logging.DEBUG) # This code relies on the fact that the os module has been patched at this # point and can be used to check for both external and target files. if os.access('appengine_config.py', os.F_OK): loader = _Loader(self, _TARGET_ROOT, 'appengine_config.py', False) loader.load_module('appengine_config') if handler.endswith('.py'): # CGI handler (or WSGI done manually) file_path = handler elif '.' in handler: # "native" WSGI handler left, right = handler.rsplit('.', 1) file_path = '{}.py'.format(left.replace('.', '/')) self._wsgi_app_name = right if not os.access(file_path, os.F_OK): file_path = '{}/__init__.py'.format(left.replace('.', '/')) else: # Assume this is a package, like "foo/bar" or "foo/". Note that a # package that is just "foo" doesn't work and is validated in # target_info. file_path = os.path.join(handler, '__init__.py') self._cwd = os.path.dirname(file_path) # see self._GetCwd() if not os.access(file_path, os.F_OK): raise ScriptNotFoundError() self._main_method = main_method # In case of an app.yaml script handler using a package script is_pkg = file_path.endswith('/__init__.py') loader = _Loader(self, _TARGET_ROOT, file_path, is_pkg) loader.load_module('__main__') except ScriptNotFoundError: raise except: # Materialize the traceback here, before _TearDown() is called by our # finally block, because here the 'open' builtin is still patched, # giving the formatter the ability incorporate target environment user # source code into the output. Without this, the offending lines of user # source code would not appear in the traceback or, more confusingly, # would be substitued by mimic's overlapping module source. exc_info = sys.exc_info() # Note format_exception relies on 'linecache', which must be reset during # _TearDown(), in order to prevent caching of stale user source. formatted_exception = traceback.format_exception( exc_info[0], exc_info[1], exc_info[2]) raise TargetAppError(formatted_exception) finally: self._TearDown() logger.setLevel(saved_level) logger.removeHandler(logging_handler)
def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, types.StringTypes): args = [args] else: args = list(args) if shell: args = ['/bin/sh', '-c'] + args if executable: args[0] = executable if executable is None: executable = args[0] def _close_in_parent(fd): os.close(fd) to_close.remove(fd) errpipe_read, errpipe_write = self.pipe_cloexec() try: try: gc_was_enabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: if gc_was_enabled: gc.enable() raise self._child_created = True if self.pid == 0: try: if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) os.close(errpipe_read) if c2pwrite == 0: c2pwrite = os.dup(c2pwrite) if errwrite == 0 or errwrite == 1: errwrite = os.dup(errwrite) def _dup2(a, b): if a == b: self._set_cloexec_flag(a, False) elif a is not None: os.dup2(a, b) return _dup2(p2cread, 0) _dup2(c2pwrite, 1) _dup2(errwrite, 2) closed = {None} for fd in [p2cread, c2pwrite, errwrite]: if fd not in closed and fd > 2: os.close(fd) closed.add(fd) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() if close_fds: self._close_fds(but=errpipe_write) if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) os._exit(255) if gc_was_enabled: gc.enable() finally: os.close(errpipe_write) data = _eintr_retry_call(os.read, errpipe_read, 1048576) finally: if p2cread is not None and p2cwrite is not None: _close_in_parent(p2cread) if c2pwrite is not None and c2pread is not None: _close_in_parent(c2pwrite) if errwrite is not None and errread is not None: _close_in_parent(errwrite) os.close(errpipe_read) if data != '': try: _eintr_retry_call(os.waitpid, self.pid, 0) except OSError as e: if e.errno != errno.ECHILD: raise child_exception = pickle.loads(data) raise child_exception return
def _parse(self, data): try: self.value = data.decode("utf-8") except UnicodeDecodeError as e: reraise(APEBadItemError, e, sys.exc_info()[2])
def main(self): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave g0coinds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop g0coinds after the test execution") parser.add_option("--srcdir", dest="srcdir", default="../../src", help="Source directory containing g0coind/g0coin-cli (default: %default)") parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") self.add_options(parser) (self.options, self.args) = parser.parse_args() if self.options.trace_rpc: import logging logging.basicConfig(level=logging.DEBUG) if self.options.coveragedir: enable_coverage(self.options.coveragedir) os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH'] check_json_precision() success = False try: if not os.path.isdir(self.options.tmpdir): os.makedirs(self.options.tmpdir) self.setup_chain() self.setup_network() self.run_test() success = True except JSONRPCException as e: print("JSONRPC error: "+e.error['message']) traceback.print_tb(sys.exc_info()[2]) except AssertionError as e: print("Assertion failed: "+ str(e)) traceback.print_tb(sys.exc_info()[2]) except Exception as e: print("Unexpected exception caught during testing: " + repr(e)) traceback.print_tb(sys.exc_info()[2]) if not self.options.noshutdown: print("Stopping nodes") stop_nodes(self.nodes) wait_bitcoinds() else: print("Note: g0coinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown: print("Cleaning up") shutil.rmtree(self.options.tmpdir) if success: print("Tests successful") sys.exit(0) else: print("Failed") sys.exit(1)
def _error_handler(task): try: task.result() except Exception: sys.excepthook(*sys.exc_info())
def format_exception(self): import traceback exc_type, exc_value, exc_traceback = sys.exc_info() return traceback.format_exception(exc_type, exc_value, exc_traceback)
def runApp(self, options, cmdargs=None, timeout=None, debuggerInfo=None, symbolsPath=None, valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None, **profileArgs): if cmdargs is None: cmdargs = [] cmdargs = cmdargs[:] if self.use_marionette: cmdargs.append('-marionette') binary = options.app profile = self.createReftestProfile(options, **profileArgs) # browser environment env = self.buildBrowserEnv(options, profile.profile) self.log.info("Running with e10s: {}".format(options.e10s)) self.log.info("Running with fission: {}".format(options.fission)) def timeoutHandler(): self.handleTimeout(timeout, proc, options.utilityPath, debuggerInfo) interactive = False debug_args = None if debuggerInfo: interactive = debuggerInfo.interactive debug_args = [debuggerInfo.path] + debuggerInfo.args def record_last_test(message): """Records the last test seen by this harness for the benefit of crash logging.""" def testid(test): if " " in test: return test.split(" ")[0] return test if message['action'] == 'test_start': self.lastTestSeen = testid(message['test']) elif message['action'] == 'test_end': if self.lastTest and message['test'] == self.lastTest: self.lastTestSeen = "Last test finished" else: self.lastTestSeen = '{} (finished)'.format( testid(message['test'])) self.log.add_handler(record_last_test) kp_kwargs = { 'kill_on_timeout': False, 'cwd': SCRIPT_DIRECTORY, 'onTimeout': [timeoutHandler], 'processOutputLine': [self.outputHandler], } if mozinfo.isWin or mozinfo.isMac: # Prevents log interleaving on Windows at the expense of losing # true log order. See bug 798300 and bug 1324961 for more details. kp_kwargs['processStderrLine'] = [self.outputHandler] if interactive: # If an interactive debugger is attached, # don't use timeouts, and don't capture ctrl-c. timeout = None signal.signal(signal.SIGINT, lambda sigid, frame: None) runner_cls = mozrunner.runners.get( mozinfo.info.get('appname', 'firefox'), mozrunner.Runner) runner = runner_cls(profile=profile, binary=binary, process_class=mozprocess.ProcessHandlerMixin, cmdargs=cmdargs, env=env, process_args=kp_kwargs) runner.start(debug_args=debug_args, interactive=interactive, outputTimeout=timeout) proc = runner.process_handler self.outputHandler.proc_name = 'GECKO({})'.format(proc.pid) # Used to defer a possible IOError exception from Marionette marionette_exception = None if self.use_marionette: marionette_args = { 'socket_timeout': options.marionette_socket_timeout, 'startup_timeout': options.marionette_startup_timeout, 'symbols_path': options.symbolsPath, } if options.marionette: host, port = options.marionette.split(':') marionette_args['host'] = host marionette_args['port'] = int(port) try: marionette = Marionette(**marionette_args) marionette.start_session() addons = Addons(marionette) if options.specialPowersExtensionPath: addons.install(options.specialPowersExtensionPath, temp=True) addons.install(options.reftestExtensionPath, temp=True) marionette.delete_session() except IOError: # Any IOError as thrown by Marionette means that something is # wrong with the process, like a crash or the socket is no # longer open. We defer raising this specific error so that # post-test checks for leaks and crashes are performed and # reported first. marionette_exception = sys.exc_info() status = runner.wait() runner.process_handler = None self.outputHandler.proc_name = None if status: msg = "TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % \ (self.lastTestSeen, status) # use process_output so message is logged verbatim self.log.process_output(None, msg) crashed = mozcrash.log_crashes(self.log, os.path.join(profile.profile, 'minidumps'), options.symbolsPath, test=self.lastTestSeen) if not status and crashed: status = 1 runner.cleanup() self.cleanup(profile.profile) if marionette_exception is not None: exc, value, tb = marionette_exception raise reraise(exc, value, tb) self.log.info( "Process mode: {}".format('e10s' if options.e10s else 'non-e10s')) return status