def checkDriver(self): if not self.connected: logging.Error( "The Script have to be connected first, have you used the connect function?" ) raise Exception("Error: not connected. Please connect first") if not self.excel: logging.Error( "The excel file must be ready already, have you used the openFile function?" ) raise Exception( "Error: Excel File not opened. Please open the excel file first" )
def __init__(self): self.zip_files = [f for f in AWARD_DATA.glob('*.zip')] if not self.zip_files: logging.Error( f'No awards found in {AWARD_DATA}. ' f'Try downloading by year using the download.py script.')
def load_history(someday, caching=True): formatted_date = someday.strftime("%B_%d") if caching: try: with open(_events_list_file, 'rb') as f: _events_calendar = pickle.load(f) except IOError as e: logging.Error('File I/O Error occured: ' + e) if formatted_date in _events_calendar: logging.debug(formatted_date + ' exists in calendar') html = _events_calendar[formatted_date] else: html = _opener.open(_url + formatted_date).read().decode('utf-8', 'ignore').encode('ascii', 'ignore') _events_calendar[formatted_date] = html pickle.dump(_events_calendar, open(_events_list_file, 'wb')) else: html = _opener.open(_url + formatted_date).read().decode('utf-8', 'ignore').encode('ascii', 'ignore') uls = BeautifulSoup(html, "html.parser").html.body.findAll('ul') result = {} for i in [1, 2, 3]: item_dict = {} for li in uls[i].findAll('li'): res = li.text.split(" ", 1) item_dict[str(res[0].strip().encode('utf-8').decode("utf-8"))] = str( res[1].strip().encode('utf-8').decode("utf-8")) result[i] = item_dict return result
def getarticle(self, url, folder='./', charset='utf-8'): ''' 保存新浪博客网址(url)对应的图片及文字到指定目录(folder) ''' logging.debug('getarticle: {0}'.format(url)) r = requests.get(url, headers=self.getUserAgent()) if r.ok: soup = BeautifulSoup(r.content, 'lxml') title = self.getTitle(soup) title2 = self.getTitle2(soup) article_comment_list = self.getArticle_comment_list(soup) body = self.getMainBody(soup) if body is None: return title body = self.cleanrubbish(body) body.title = title # 处理两种图片识别格式 body = self.getImages(body, title, self.delaySecond) body = self.getImages(body, title + '_', self.delaySecond, '') self.saveToFile(self.getFilePath(title, folder), self.alterArticalContents(body, title, title2, article_comment_list, self.copyRight), charset) return title else: logging.Error('Requests Error: {0}'.format(url)) return ""
def auto_get(self): try: logging.info("1. API (ipapi) deneniyor..") data = json.loads( requests.get("https://ipapi.co/json").content.decode( 'utf-8')).get("city") logging.info("Gelen Veri: {}".format(data)) if data == None: logging.warning("1. API (ipapi) Data vermedi..") logging.info("2. API (ip-api) deneniyor..") data = json.loads( requests.get("http://ip-api.com/json").content.decode( 'utf-8')).get("city") logging.info("Gelen Veri: {}".format(data)) if data == None: logging.warning("2. API (ip-api) Data vermedi..") logging.info("3. API (extreme-ip-lookup) deneniyor..") data = json.loads( requests.get("http://extreme-ip-lookup.com/json/"). content.decode('utf-8')).get("city") logging.info("Gelen Veri: {}".format(data)) self.sehir = data return data except: logging.Error("Konum API hiç çalışmadı.") print("İnternet Bağlatınızı Kontrol Edin.") logging.info("Uygulama Kapatılıyor") exit()
def check_pokeapi_request(url): '''Handle RequestExceptions''' try: return requests.get(url) except requests.exceptions.RequestException as error: logging.Error(error) abort(500)
def _LoadConfigs(): """Load default layout configuration parameters.""" configs = {} for cf, filename in CONFIG_FILES.iteritems(): with open(filename, 'r') as fp: configs[cf] = json.load(fp) if 'plot_defs' not in configs: logging.Error('Missing definitions for plotting javascripts.') return configs
def clear_consistency_flag(fname): try: child_sig = subprocess.call([f"h5clear -s {fname}"], shell=True) if child_sig != 0: logging.Error("Consistency flag has not been cleared.", child_sig, file=sys.stderr) except OSError as e: logger.Error("Clearing the hdf5 flag failed:", e, file=sys.stderr)
def copy(src, dest): """ copy source to destination. """ try: shutil.copy2(src, dest) except IOError: logging.Error("Could not copy %s to %s" % (src, dest)) else: logging.info("Copied %s to %s" % (src, dest))
def validate_json_keys(json, keys, **kwargs): for key in keys: if key not in json: logging.Error("Key {key} not found in {json}".format(key=key, json=json)) return False for key,value in kwargs.items(): if key not in json: json[key] = value return True
async def create_item(item: Hardware_interface): try: task = item.message params = {"param_a": item.ctrl_param_a, 'param_b': item.ctrl_param_b} log.info(f'aquired request on: {task} with parameters: {params}') message_worker.sorter(task=task, params=params) return item except Exception as e: log.Error(f"FAILED WHILE PARSING REQUEST:{item}") return 300
def check_funtranslation_request(url, key, description): '''Handle RequestExceptions''' try: payload = {'text': description} headers = {'x-funtranslations-api-secret': key} return requests.post(url, data=json.dumps(payload), headers=headers) except requests.exceptions.RequestException as error: logging.Error(error) abort(500)
def copy_if_needed(src, dest): """ Copy a file if the destination does not already exist """ if os.path.isfile(dest): return try: shutil.copy2(src, dest) except IOError: logging.Error("Could not copy %s to %s" % (src, dest)) else: logging.info("Copied %s to %s" % (src, dest))
def writeSVGFile(self): svg_filename = "/tmp/%s.svg" % self.figure_name try: ET.ElementTree(self.svg_dom_root).write(svg_filename, encoding='UTF-8', xml_declaration=True, method="xml") logging.debug("lara: Experiment XML outputfile %s written" % svg_filename) except IOError: logging.Error("Cannot write Experiment XML outputfile %s !!!" % svg_filename)
def writeVWorksXMLFile(self): vworks_out_file_name = self.experiment.name() + ".xpro" try: ET.ElementTree(self.v11_root).write(vworks_out_file_name, encoding='UTF-8', xml_declaration=True, method="xml") logging.debug("lara: Experiment XML outputfile %s written" % vworks_out_file_name) except IOError: logging.Error("Cannot write Experiment XML outputfile %s !!!" % vworks_out_file_name)
def updateCarbonMonoxide(self, publisFunc): url = self.QUERY_PAGE + self.CARBON_MONOXIDE_PATH + str(int(float(self.LAT))) + "," + str(int(float(self.LON))) + "/current.json?" +self.APPID json = self.getJSON(url) for (name, items) in json.items(): if name in ["location", "time"]: continue elif name == "data" and len(items) > 0 and "value" in items[0]: logging.info("carbon_monoxide_ratio_per_billion_at_1bar : " + str(float(items[0]["value"]) * 1000000000)) publisFunc("carbon_monoxide_ratio_per_billion_at_1bar", float(items[0]["value"]) * 1000000000) continue else: logging.Error("New Items: " + str(name) + " " + str(items))
def _crawl(self, pn): url = self.turl + '&page=' + str(pn) try: r = self.ss.get(url) except Exception as e: mylog.Error('fail== %s %s' % (url, e)) return if r.status_code != 200: mylog.info('fail== %s' % url) return self.saveFile(r.content, url, pn) self._parseData(r.content) '''
def updateUVindex(self, publisFunc): url = self.QUERY_PAGE + self.UVI_PATH + self.COORDENATES + self.UNITS + self.APPID #print url json = self.getJSON(url) for (name, items) in json.items(): if name in ["lat", "date", "lon", "date_iso"]: continue elif name == "value": logging.info("ultraviolet_index : " + str(items)) publisFunc("ultraviolet_index", items) else: logging.Error("New Items: " + str(name) + " " + str(items))
def updateSulfurDioxide(self, publisFunc): url = self.QUERY_PAGE + self.SULFUR_DIOXIDE_PATH + str(int(float(self.LAT))) + "," + str(int(float(self.LON))) + "/current.json?" +self.APPID json = self.getJSON(url) for (name, items) in json.items(): if name in ["location", "time"]: continue elif name == "data" and len(items) > 18 and "value" in items[18]: logging.info("sulfur_dioxide_ratio_per_billion_at_1hPa : " + str(float(items[18]["value"]) * 1000000000)) publisFunc("sulfur_dioxide_ratio_per_billion_at_1hPa", float(items[18]["value"]) * 1000000000) continue else: logging.Error("New Items: " + str(name) + " " + str(items))
def do_ssh_conn(ip_addr=None, username=None, password=None): sshclient = None try: sshclient = ssh_client.sshclient(ip_addr, username=username, password=password) except Exception: log.Error( "SSH Connection to Windows PC : %s Connected Android Devices failed" % ip_addr) raise Exception("ssh connection to windows pc %s failed" % ip_addr) return sshclient
def _crawl(self, pn): url = self.turl + '&pn=' + str(pn * 10) try: r = self.ss.get(url) except Exception as e: mylog.Error('fail== %s %s' % (url, e)) return if r.status_code != 200: mylog.info('fail== %s' % url) return self.saveFile(r.content, url, pn) b = self.saveDB(r.content) if not b: mylog.error('faildb== %s %s' % (url, pn))
def DatabaseAccess(): DB=None if DB is None: config = configparser.ConfigParser() if not config.read('/var/www/html/Dash/assets/config.ini'): logging.critical('could not open config file') exit(1) else: try: logging.info('try to establish a DB connection') DB = MySQLdb.connect(host=config['DATABASE'].get('SERVER'),user=config['DATABASE'].get('USERNAME'),passwd=config['DATABASE'].get('PASSWORD'),db=config['DATABASE'].get('DATABASE')) logging.info('Database Connection established') return DB except configparser.Error as es: logging.Error('Error occured: ' + es) return 1
def get_info(self, sehir): self.sehir = sehir try: data = json.loads( requests.get( "http://api.openweathermap.org/data/2.5/weather?q={id}&appid=fe76c224322cdde0049342cb45d26b04" .format(id=sehir)).content.decode('utf-8')) if data.get("cod") == 200: return data else: print("Invalid city name!!") logging.warning( "Api Olumlu cevap vermedi. Cevap: {}".format(data)) return None except: logging.Error("Hava Durumu API hiç çalışmadı..") print("Error: Can not reach ")
def writeProcess(self): try: with open('P_' + self.experiment.name() + ".cxx", 'w') as file_output: file_output.write(self.momentum_profileHead) file_output.write(self.momentum_profileDevices) file_output.write(self.momentum_profileVariables) file_output.write(self.momentum_processHead) file_output.write(self.momentum_processContainers) file_output.write(self.momentum_process_variables) file_output.write(self.momentum_process) file_output.close() logging.debug("mcg: outputfiel P_%s.cxx written" % self.experiment.name()) except IOError: logging.Error("Cannot write momentum code file %s !!!" % experiment.name())
def writeExperiment(self): try: with open('E_' + self.experiment.name() + ".cxx", 'w') as file_output: #logging.debug(self.momentum_experiment_head) file_output.write(self.momentum_experiment_head) #logging.debug(self.momentum_experiment_head) file_output.write(self.momentum_experiment_start_end) file_output.write(self.momentum_nest_restrictions) file_output.write(self.momentum_experiment_tail) file_output.close() logging.debug("mcg: Experiment outputfiel E_%s.cxx written" % self.experiment.name()) except IOError: logging.Error("Cannot write momentum code file %s !!!" % experiment.name())
def apply_entity(unapplied_entity, delete_unapplied_entity=True): """Re-write an entity representing an unapplied write to apply it. Args: entity: An app engine datastore entity, typically loaded by datastore.Get. This will not work for a model instance, e.g. one loaded from db.get. delete_unapplied_entity: If true, the record of the unapplied write will be removed from the datastore. """ key = unapplied_entity.key() path = unapplied_entity.key().to_path() kind = path[-2] if not kind.startswith(UNAPPLIED_WRITE_KIND_PREFIX): logging.Error("Attempting to apply an already applied write: %r", key) return kind = kind[UNAPPLIED_WRITE_KIND_PREFIX_LEN:] id_or_name = path[-1] namespace = unapplied_entity.namespace() # You can insert code here to change id_or_name. if isinstance(id_or_name, basestring): entity_to_apply = datastore.Entity(kind, key.parent(), name=id_or_name, namespace=namespace) elif id_or_name: entity_to_apply = datastore.Entity(kind, key.parent(), id=id_or_name, namespace=namespace) else: entity_to_apply = datastore.Entity(kind, key.parent(), namespace=namespace) entity_to_apply.update(unapplied_entity) # You can insert code here to change entity_to_apply. datastore.Put(entity_to_apply) if delete_unapplied_entity: datastore.Delete(unapplied_entity)
def load(self, filename = None, cur_time = None): if filename == None: filename = self.save_file if not filename or not os.path.exists(filename): return self.lock.acquire() if cur_time == None: cur_time = time.time() age_cutoff = (cur_time - self.age_limit if self.age_limit != None else 0) old_data = self.data try: fp = open(filename, "r", newline='') reader = csv.reader(fp, delimiter=' ') self.data = deque(maxlen=self.data.maxlen) error_count = 0 for row in reader: try: row = [float(v) for v in row] if row[0] >= age_cutoff: self.record(*row) except (IndexError, ValueError) as err: if error_count == 0: logging.error( "Bad tracker csv line < %s > in file \"%s\"" % (row, filename)) logging.error( "Skipping line, suppressing further errors") error_count += 1 if error_count > 0: logging.Error("%d bad lines found") fp.close() except Exception as err: logging.error("Could not load data from %s: %s" % (filename, err)) self.data = old_data self.lock.release()
def getURL(WebServer, Port, URL): try: # Setup connection string ConnectionString = ("http://%s:%s%s") % (WebServer, Port, URL) logging.info("Trying to get stats from " + ConnectionString) conn = urllib.urlopen(ConnectionString) URLresponse = conn.read() logging.debug("Response \n" + URLresponse) # Clean up the connection conn.close() # The response to the function is the output of the URL called return URLresponse # Catch all exceptions except: logging.Error("Error getting URL: " + ConnectionString)
def check_terminate(self): num_consecutive = 10 if self.test_mode or len(self.iterations) < num_consecutive: return False text_sent_date_key = self.now.strftime('%Y-%m-%d') if text_sent_date_key in self.texts_sent and self.texts_sent[ text_sent_date_key] >= 8 * len(constants.PHONE_NUMBERS): error_message = 'Reached maximum texts sent per day: {}'.format( self.texts_sent[text_sent_date_key]) self.client.messages.create(body=error_message, from_=constants.TWILIO_NUMBER, to=constants.MY_PHONE_NUMBER) return True consecutive_fails = True for i in range( len(self.iterations) - 1, len(self.iterations) - num_consecutive, -1): if i - 1 >= 0: diff = self.iterations[i] - self.iterations[i - 1] if diff.total_seconds() > self.min_sleep_time: consecutive_fails = False self.iterations = self.iterations[-1:] break if consecutive_fails: error_message = '10 consecutive sleep times of less than {}s'.format( self.min_sleep_time) self.client.messages.create(body=error_message, from_=constants.TWILIO_NUMBER, to=constants.MY_PHONE_NUMBER) logging.Error('Message {} sent to {}'.format( error_message, constants.MY_PHONE_NUMBER)) return consecutive_fails
def benchmark_indexing(n_docs_options, retriever_doc_stores, data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, update_json, **kwargs): retriever_results = [] for n_docs in n_docs_options: for retriever_name, doc_store_name in retriever_doc_stores: logger.info( f"##### Start indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### " ) try: doc_store = get_document_store(doc_store_name) retriever = get_retriever(retriever_name, doc_store) docs, _ = prepare_data( data_dir=data_dir, filename_gold=filename_gold, filename_negative=filename_negative, data_s3_url=data_s3_url, embeddings_filenames=embeddings_filenames, embeddings_dir=embeddings_dir, n_docs=n_docs) tic = perf_counter() index_to_doc_store(doc_store, docs, retriever) toc = perf_counter() indexing_time = toc - tic print(indexing_time) retriever_results.append({ "retriever": retriever_name, "doc_store": doc_store_name, "n_docs": n_docs, "indexing_time": indexing_time, "docs_per_second": n_docs / indexing_time, "date_time": datetime.datetime.now(), "error": None }) retriever_df = pd.DataFrame.from_records(retriever_results) retriever_df = retriever_df.sort_values( by="retriever").sort_values(by="doc_store") retriever_df.to_csv(index_results_file) doc_store.delete_all_documents(index=doc_index) doc_store.delete_all_documents(index=label_index) time.sleep(10) del doc_store del retriever except Exception as e: tb = traceback.format_exc() logging.ERROR( f"##### The following Error was raised while running indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs #####" ) logging.Error(tb) retriever_results.append({ "retriever": retriever_name, "doc_store": doc_store_name, "n_docs": n_docs, "indexing_time": 0, "docs_per_second": 0, "date_time": datetime.datetime.now(), "error": str(tb) }) doc_store.delete_all_documents(index=doc_index) doc_store.delete_all_documents(index=label_index) time.sleep(10) del doc_store del retriever if update_json: populate_retriever_json()