def tesa(a): logging.info("into the function tesa ") #logging.critical("the function got the value "+str(a)) logging.critical("the function got the value " + str(a)) logging.error("the function got the value " + str(a)) logging.warning("the function got the value " + str(a)) pdb.set_trace() logging.info("the function got the value " + str(a)) logging.debug("the function got the value " + str(a)) try: b = a / 2 except Exception as ex: logging.DEBUG("an exception has occured DEBUG") logging.INFO("an exception has occuredINFO") logging.WARNING("an exception has occuredWARNING") logging.ERROR("an exception has occuredERROR") logging.CRITICAL("an exception has occuredCRITICAL") logging.CRITICAL(str(ex)) logging.ERROR(str(ex)) logging.WARNING(str(ex)) logging.INFO(str(ex)) logging.DEBUG(str(ex))
def click_all_read_more_buttons(self): time.sleep(0.2) try: see_more_all = self.company_page.find_elements_by_css_selector( ".more-link") except NoSuchElementException: logging.WARNING( "For reviews, see_more elements are not found. url:{}".format( self.company_info["url_name"])) return except Exception as e: error_msg = "Unknown fail while trying to get see_more elements. exception:{}. url:{}" logging.error( error_msg.format(str(e), self.company_info["url_name"])) return for s in see_more_all: try: time.sleep(0.1) self.company_page.execute_script( "return arguments[0].scrollIntoView();", s) self.company_page.execute_script( "return arguments[0].click();", s) except Exception as e: logging.WARNING( "Can not click on see_more elements for reviews: exception: {}" .format(str(e)))
def remove_packs_task(): """server deletes packs when over 2 minutes""" # logging.debug("当前线程数量:{}".format(len(threading.enumerate()))) for i in list(response_packs.keys()): if time.time() - response_packs[i][0] > 120: # response_packs.pop(bytes(i)) logging.info("全部的值:{}".format(response_packs.keys())) logging.info("超时删除设备ID:{}".format(i)) del response_packs[bytes(i)] for j in list(requests_packs.keys()): if time.time() - requests_packs[j][0] > 120: # response_packs.pop(bytes(j)) logging.info("全部的值:{}".format(requests_packs.keys())) logging.info("超时删除设备ID:{}".format(j)) del requests_packs[bytes(j)] try: logging.WARNING("删除过期日志") remove_log() except OSError as o: logging.WARNING(o) global remove_timer remove_timer = Timer(120, remove_packs_task) remove_timer.start()
def load_logfile_filename(): """ Return the configured log file or None Throws an exception only if a manually specified log file is invalid """ filename = os.path.join(os.path.dirname(mlmonkey.__file__), 'log' 'mlmonkey.log') if filename is not None: try: filename = os.path.abspath(filename) dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(os.path.dirname(filename)) with open(filename, 'a'): pass except: logging.WARNING('"%s" is not a valid value for logfile_filename.' % filename) logging.WARNING( 'Set the envvar DIGITS_LOGFILE_FILENAME to fix your configuration.' ) raise else: filename = None return filename
def __init__(self, _msg): self._illegal = True self.__extra_info = '' self.__header = '' if isinstance(_msg, tuple) and isinstance(_msg[0], str) and isinstance( _msg[1], list): self.__header = _msg[0] self.__content = _msg[1] if len(_msg) == 3: if isinstance(_msg[2], str): self.__extra_info = _msg[2] else: self.__illegal = False logging.WARNING( "Error : Class Message Error > __init__ - Extra Information Must Be A String" ) elif isinstance(_msg, str): tmp_msg = _msg.split(":") if len(tmp_msg) < 2: self.__illegal = False return self.__header = tmp_msg[0] self.__content = tmp_msg[1].split(' ') if len(tmp_msg) == 3: if isinstance(_msg[2], str): self.__extra_info = tmp_msg[2] else: self.__illegal = False logging.WARNING( "Error : Class Message Error > __init__ - Extra Information Must Be A String" ) else: self.__illegal = False logging.WARNING("Error : Class Message Error > __init__")
def handle(command, channel, user): """ If the parser function finds a correct bot call or !sticky command, firehose() will pass the link and channel ID to this function, where the link will be validated and then stickied if it passes the check. """ success = 'Roger that! I\'ve stickied your post successfully :)' unknown = 'Uh oh...something went wrong. You\'ll need to alert my master!' not_safe = 'Whoops...Looks like there\'s already two stickies up, or you\'re less than 6 hours from a scheduled sticky going live :(' val_failed = 'Look, dammit. I can\'t sticky something if you f**k up the URL. Get it right and then come talk to me.' default = 'Uhh...who tf is this guy?' unstickied = 'You got it! The post has been unstickied.' unsticky_val_failed = 'Hmm...I couldn\'t find a current sticky matching the link you gave me.' unsticky_failed = 'For some reason, I just...couldn\'t figure out how to unsticky this post...please forgive me? :\'(' if user in settings['users'].values(): if command.startswith(BOT_CMD_UNSTICKY): link = command.split(BOT_CMD_UNSTICKY)[1].strip().lower() url = link.strip('<>') unsticky_id = sticky_bot.validate_unsticky(url) if unsticky_id: if sticky_bot.unsticky(unsticky_id): logging.INFO('Unstickied!') post_message(channel, unstickied) return else: logging.WARNING('Un-sticky failed!') post_message(channel, unsticky_failed) return else: logging.WARNING('Un-sticky validation failed!') post_message(channel, unsticky_val_failed) return link = command.strip('<>') post_id = sticky_bot.validate(link) if post_id: if sticky_bot.is_sticky_safe(): if sticky_bot.sticky(post_id): logging.INFO('Stickied!') post_message(channel, success) if settings['call_home']: call_home.sendAlert(link) return else: logging.WARNING('Sticky failed!') post_message(channel, unknown) return else: logging.WARNING('Not safe to sticky.') post_message(channel, not_safe) return else: logging.WARNING('Validation failed.') post_message(channel, val_failed) return else: post_message(channel, default) return
def handle_get(): if 'openid' not in request.args and 'timestamp' in request.args: timestamp = request.args.get('timestamp') nonce = request.args.get('nonce') signature = request.args.get('signature') if ow.token_verify(timestamp=timestamp, nonce=nonce, signature=signature): logging.info("Token Verify Succeed.") return request.args.get('echostr') else: logging.WARNING("Token Verify Failed.") return "" elif 'code' in request.args: time.sleep(0.5) code = request.args.get('code') state = request.args.get('state') result = ow.get_member_info_by_code(code=code) unionId = result['unionid'] t = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S') ret = execute( sql= "insert into Weixin.followers(old_member, new_member, connect_time, subscribe_status, " "create_time, update_time values(?, ?, ?, ?, ?, ?)", args=(state, unionId, t, '0', t, t)) if ret: url = 'https://mp.weixin.qq.com/s?__biz=MzUzNDk2ODE3Nw==&mid=100001788&idx=1&sn=3006adb45440eb754dd69a0007447ffb&chksm=7a8de5e24dfa6cf4e452b69133cd0c3cd2195f4ed3e16c46ec291406fc5ef6f0b6605cda40d2&mpshare=1&scene=1&srcid=11147dk6bwNSgStEkFerTnMO#rd' return redirect(url) else: return render_template('index.html')
def table_create(self, table_name='test_user', column_list=[ 'ID Integer PRIMARY KEY autoincrement', 'name text', 'age int' ]): if self.teble_is_exist(table_name): logging.debug('表已经存在:' + table_name) return True if len(column_list) == 0: logging.WARNING('建表失败,没有传递字段参数列表:' + table_name) return False try: sql = 'create table ' + table_name + ' (' for x in column_list: sql = sql + x + ',' sql = sql.rstrip(',') + ');' logging.debug(sql) self.cursor.execute(sql) self.connect.commit() logging.debug("Table created done:" + sql) except: logging.debug("Table created error: " + sql) return False if self.teble_is_exist(table_name): logging.debug('Create table successfully !') return True
def getContentsUrls(url, crawler): try: driver = crawler.getWebDriver(url) contents = driver.find_element_by_xpath( '//*[@id="aNews_List"]/ul').find_elements_by_tag_name('li') logging.info("getContentsUrls() - url :: " + url) urlList = [] for content in contents: newsDate = str( list( datefinder.find_dates( str( content.find_element_by_xpath( '//a/div[@class="aNews_date"]').text))) [0]).split()[0] if newsDate == crawlingDate: urlList.append( content.find_element_by_css_selector('a').get_attribute( 'href')) except NoSuchElementException: logging.WARNING("Element exception :: " + url) return urlList
def main(): config = "user='******' host='comp421.cs.mcgill.ca' dbname='cs421' password='******'" connection = psycopg2.connect(config) cur = connection.cursor() while True: option = input('select 1 to add single player \ \nselect 2 to find the players who have won the most gold medals given the country \ \nselect 3 to find female with the most gold medals from country with most total medals \ \nselect 4 to update the medal number of each player who participated in a \'finals\' match \ \nselect 5 to update the total medal number for each country based on the player medal numbers \ \nselect 6 to find all player that get gold medal given the name of the sport \ \nselect 7 to quit') # Redirects to option that user chose if (option == 1): option_1(cur) elif (option == 2): option_2(cur) elif (option == 3): option_3(cur) elif (option == 4): option_4(cur) elif (option == 5): option_5(cur) elif (option == 6): option_6(cur) elif (option == 7): break else: logging.WARNING('You did not choose a correct option.') connection.close()
def do_get_trigger_operating_mode(self): ''' Gets the instrument trigger operating mode Input: None Output: mode(string):'AUTO', 'TRIG', 'GATE', 'BURS' or 'EWID' ''' response = self._visainstrument.ask('INP:TRIG:MODE?') if response == 'AUTO': return ('AUTO') elif response == 'TRIGGER': return ('TRIG') elif response == 'GATE': return ('GATE') elif response == 'BURST': return ('BURS') elif response == 'EWIDTH': return ('EWID') else: logging.WARNING( 'Trigger operating mode response not recognized: %s' % response)
def Server_background_scan(): dir_find = [] if chinses_mode: target = input("请输入目标IP或域名:") http_or_https = input("使用HTTP协议还是HTTPS协议(1 http,2 https)") else: target = input("Enter the target IP or domain:") http_or_https = input("You want to use http or https?(1 http,2 https)") if http_or_https == "1": http = True elif http_or_https == "2": http = False else: logging.WARNING("user do not choose use http or https.") if chinses_mode: print("你没有选择是用http还是https") else: print("You have no choice whether to use HTTP or HTTPS.") return for i in progress(server_dir_dictionary): if http: r = requests.get("http://" + target + i, headers=headers) else: r = requests.get("https://" + target + i, headers=headers) if r.status_code == 200: dir_find.append(target + i) for i in dir_find: if chinses_mode: print("目录:" + i + " 响应为200") else: print("dir:" + i + " response is 200.")
async def save(self): args = list(map(self.getValueOrDefault, self.__fields__)) args.append(self.getValueOrDefault(self.__primary_key__)) rows = await execute(self.__insert__, args) if rows != 1: logging.WARNING('failed to insert record: affected rows: %s' % rows)
def update_config(config_file): with open(config_file, 'r') as f: try: file_conf = json.load(f) config.update(file_conf) except: logging.WARNING(f'JSON in {config_file} is not a valid json dict')
async def updateData(self): args = list(map(self.getValue, self.__fields__)) args.append(self.getValue(self.__primary_key__)) rows = await execute(self.__update__, args) if rows != 1: logging.WARNING( 'failed to update by primary key: affected rows: %s' % rows)
def get_court_details(self, response): court_name = response.css('h4::text').re_first('In The (.+)') court_id = uuid.uuid4().hex[:5] if 'Supreme' in court_name: court_name_abbreviation = 'SCNG' court_url = 'https://api.firmtext.com/courts/scng/' court_slug = 'scng' elif 'Appeal' in court_name: court_name_abbreviation = 'ACNG' court_url = 'https://api.firmtext.com/courts/apng/' court_slug = 'apng' else: logging.WARNING( "Court details isn't Supreme Court or Appeal Court. check get_court_details method" ) return None return { 'name_abbreviation': court_name_abbreviation, 'url': court_url, 'slug': court_slug, 'id': court_id, 'name': court_name }
def import_PV(path=path_PvGen): try: absPath = gen_Path(path_PvGen) totalPvGen = pd.read_csv(absPath, header=None) logging.info("PV Gen Successfully Imported from {}".format(path_PvGen)) return totalPvGen except: logging.WARNING("Pv Gen Input from {} Error".format(path_PvGen))
def connect(device): try: sensor = serial.Serial(device) except: logging.WARNING('Could not connect to sensor on {device}'.format( device=device)) sensor = None return sensor
def import_Load(path=path_Load): try: absPath = gen_Path(path_Load) totalload = pd.read_csv(absPath, header=None, delimiter=';') logging.info("Load Successfully Imported from {}".format(path_Load)) return totalload except: logging.WARNING("Load Input from {} Error".format(path_Load))
def _order_query_status(self, order_field='-click_nums'): results = self._order_queryset(order_field) if isinstance(results, QuerySet): return results else: logging.WARNING(results) results = None return results
def __init__(self, _msg): if isinstance(_msg, tuple) and isinstance(_msg[0], str) and isinstance( _msg[1], list): Message.__init__(self, _msg) elif isinstance(_msg, str): Message.__init__(self, _msg) else: logging.WARNING("Error : Class Request Error > __init__")
def load_atoms_single(self, atoms): resids = [] self.residues = [] for atom in atoms: try: ires = resids.index(atom.resid) except ValueError: # new residue self.residues.append(Residue(atom.resid)) resids.append(atom.resid) ires = len(self.residues) - 1 # load atoms into conformer 0 self.residues[ires].conformers[0].atoms.append(atom) # separate side chain atoms from backbone - BK atoms remain in conformer 0, the rest go to conformer 1 for res in self.residues: conflist = [ x.strip() for x in env.tpl[("CONFLIST", res.resname)].strip().split(",") ] if res.conformers: new_conf0 = [] for atom in res.conformers[0].atoms: # find the first conformer type this atom fits for conftype in conflist: if atom.name in env.atomnames[conftype]: if conftype[ -2:] == "BK": # stays in this conformer, break search conf, next atom new_conf0.append(atom) else: if len(res.conformers) > 1: res.conformers[1].atoms.append(atom) else: conf = Conformer() conf.history = "%2s________" % ( conftype[-2:]) # last two characters res.conformers.append(conf) res.conformers[1].confname = conftype res.conformers[1].resname = res.resname res.conformers[1].atoms.append(atom) break # do not search other conformers res.conformers[0].atoms = new_conf0 # delete atoms don't belong to conformer 1 for res in self.residues: if len(res.conformers) > 1: confname = res.conformers[1].confname valid_atoms = env.atomnames[confname] conf1_atoms = [] for atom in res.conformers[1].atoms: if atom.name in valid_atoms: conf1_atoms.append(atom) else: logging.WARNING( " Deleted atom \"%s\" of %s because it doesn't fit into initial conformer." % (atom.name, res.resname)) return
def read_log(log_name, errors_level): l_lexer = lexer(RULES) lines, errors = 0, 0 dict_data: Dict[str, list[float]] = collections.defaultdict(list) for line in lines_from_file(log_name): lines += 1 try: tokens = l_lexer(line) except Exception: errors += 1 logging.exception("Error in line '%s'", line) continue # пропускаем битые строки dict_for_data = {} field_idx = 0 for re_match, token_type in tokens: if token_type == WSP: continue # пробелы игнорируем elif token_type == NO_DATA: value = None # NO_DATA заменяем на None elif token_type == RAW: value = re_match.group( 1 ) # group(i) возвращает i-ую заключённую в круглые скобки группу elif token_type == QUOTED_STRING: value = re_match.group( 1) # снимаем экранирование с заэкранированных кавычек elif token_type == DATE: value = datetime.datetime.strptime( re_match.group(1)[:-6], "%d/%b/%Y:%H:%M:%S") # парсим дату else: raise SyntaxError("Unknown token", token_type, re_match) field_name = LOG_STRUCTURE[field_idx] dict_for_data[field_name] = value field_idx += 1 try: url = dict_for_data['request'].split()[1] except: errors += 1 continue dict_data[url].append(float(dict_for_data['request_time'])) if not errors_level is None: if 100 * errors / lines > errors_level: logging.WARNING( f'Too much errors: {errors} errors from {lines} rows') return [] return dict_data
def transcribe_long(self): self.upload_to_object_storage() voice_msg_temp_link = os.path.join(OBJECT_STORAGE_API_LINK, BUCKET_NAME, f'{self.file_id}.oga') req_params = { "config": { "specification": { "languageCode": "ru-RU", "profanityFilter": "false", "audioEncoding": "OGG_OPUS" } }, "audio": { "uri": voice_msg_temp_link } } transcribation_request = requests.post( 'https://transcribe.api.cloud.yandex.net/speech/stt/v2/longRunningRecognize', json=req_params, headers=API_AUTH_HEADER) trans_req_json = json.loads(transcribation_request.content) logging.debug( f'Uploaded and send transcribation request for long voice message, ' + f'status:{transcribation_request.status_code}, id:{trans_req_json["id"]}' ) retries = 0 retries_amount = 15 while retries < retries_amount: time.sleep(3) get_results_request = requests.get( f'https://operation.api.cloud.yandex.net/operations/{trans_req_json["id"]}', headers=API_AUTH_HEADER) results = json.loads(get_results_request.content) logging.debug( f'Checked results of transcribation, is done? - {results["done"]}' ) if results['done'] is True: self.join_long_text(results['response']['chunks']) self.delete_from_object_storage() break else: retries += 1 if retries >= retries_amount: logging.WARNING(f'Retries of transcribing long message exceeded') self.delete_from_object_storage()
def gather_and_send_data(): counter = 0 clickhouseaction = ClickHouseActions(simulate_drops=True) try: clickhouseaction.gather_data(limit=None) counter = clickhouseaction.send_data() except Exception as e: logging.WARNING(f'gather_and_send_data: {e}') return f'{counter} records send successfully'
def getPrice(self, symbol, side): if side == "Buy": return self.Offers.get(symbol, 0) elif side == "Sell": return self.Bids.get(symbol, 0) logging.WARNING("Not able to find pricer for symbol " + symbol) return 0
def import_Prices(path=path_Prices): try: absPath = gen_Path(path_Prices) totalPrices = pd.read_csv(absPath, sep=';') logging.info( "Prices Successfully Imported from {}".format(path_Prices)) return totalPrices except: logging.WARNING("Price Input from {} Error ".format(path_Prices))
def read(sensor): if sensor is not None: try: data = [sensor.read() for _ in range(10)] except: logging.WARNING('Could not read data') data = None else: data = None return data
def _check(self): """检查连接""" try: self.conn.ping() except: # log.msg("MySQL断开连接,重新连接。", level=log.WARNING) logging.WARNING("MySQL断开连接,重新连接。") self.conn.close() self.conn = pymysql.connect(**MYSQL_LOCALHOST['mysql']) self.cursor = self.conn.cursor()
def add_game(self, _game): try: if isinstance(_game, Game.Game): self.__active_game_list.append(_game) else: logging.WARNING( 'Class:GameManager:add_game - input paramster must be a Game' ) except Exception: logging.exception('Class:GameManager:add_game')