def load_config(path: str = DEFAULT_PATH) -> dict: ''' 读取并验证配置 参数: [path]: 配置文件路径,默认为config.toml 返回: dict: 验证过的配置字典 ''' global CFG try: logger.debug('开始读取配置') with open(path, 'rb') as f: content = f.read() detect = chardet.detect(content) encode = detect.get('encode', 'utf-8') raw_cfg = dict(toml.loads(content.decode(encode))) CFG = verify_config(raw_cfg) debug = os.environ.get('mode', 'release').lower() level = 0 if debug == 'debug' else 20 init_logger(level) logger.debug('配置验证通过') return (CFG) except FileNotFoundError: logger.error(f'[*] 配置文件[{path}]不存在') raise FileNotFoundError(f'[*] 配置文件[{path}]不存在') except ValueError as e: logger.error(f'[*] 配置文件验证失败 [{e}]', exc_info=True)
def init(): try: # 初始化日志模块 init_logger() logger.info('nms_starter start up!') # 初始化导入模块 init_importer() # 初始化配置模块 init_config() logger.info('init config ok!') # 初始化线程模块 init_thread() except Exception as e: logger.error('nms_starter init error! %s' % e) exit(-1)
def notest_split_logfile_by_level(app, client): from utils.log import init_logger logger = init_logger('mylogger', 'debug', True, 'mylogger', '/tmp/pytest/', True) logger.debug('msg') logfile = open('/tmp/pytest/mylogger.debug.log') log = logfile.readlines()[-1] assert '[DEBUG]' in log logfile.close() logger.info('msg') logfile = open('/tmp/pytest/mylogger.info.log') log = logfile.readlines()[-1] assert '[INFO]' in log logfile.close() logfile = open('/tmp/pytest/mylogger.debug.log') log = logfile.readlines()[-1] assert '[INFO]' not in log logfile.close() logger.warning('msg') logfile = open('/tmp/pytest/mylogger.warning.log') log = logfile.readlines()[-1] assert '[WARNING]' in log logfile.close() logfile = open('/tmp/pytest/mylogger.info.log') log = logfile.readlines()[-1] assert '[WARNING]' not in log logfile = open('/tmp/pytest/mylogger.debug.log') log = logfile.readlines()[-1] assert '[WARNING]' not in log logfile.close() import os os.system('rm -rf /tmp/pytest')
import boto3 import botocore from utils.log import init_logger from prettytable import PrettyTable logger = init_logger(__name__, testing_mode=False) s3 = boto3.resource('s3') public = [] denied = [] pubTable = PrettyTable() deniedTable = PrettyTable() def check_if_public(): pubTable.field_names = ['Public_Buckets'] deniedTable.field_names = ['Access_Denied'] for bucket in s3.buckets.all(): try: for grant in s3.BucketAcl(bucket.name).grants: if grant['Grantee']['Type'] == 'Group' and grant['Grantee'][ 'URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers': buckStr = str(bucket) public.append([ buckStr.strip('s3/.Bucket').strip("(name='").strip( "')") ]) except botocore.exceptions.ClientError: buckStr = str(bucket) denied.append( [buckStr.strip('s3/.Bucket').strip("(name='").strip("')")])
parser.add_argument("-kernel_sizes", default=config.kernel_sizes, type=list) parser.add_argument("-max_char_len", default=config.max_char_len, type=int) parser.add_argument('-save_dir', default='/epoch_{}_batch_{}_ch_in_{}_ch_out_{}') # parser.add_argument("-is_distill", type=str2bool, nargs='?',const=True,default=False) args = parser.parse_args() log_path = './result' + args.save_dir.format( args.epoch, args.batch_size, args.channel_in, args.channel_out, ) init_logger(log_path, '/log.txt') tb_writer = SummaryWriter('{}/runs'.format(log_path)) # Load Entity Dictionary, Train and Test data vocab_size = len(torch.load('./data/word_vocab.pt')) char_vocab_size = len(torch.load('./data/char_vocab.pt')) pos_vocab_size = len(torch.load('./data/pos_vocab.pt')) entitiy_to_index = torch.load('./data/processed_data/entity_to_index.pt') num_class = len(entitiy_to_index) print("Load processed data...") # Load process train and validation data train_dataset = NERDataset('tr') valid_dataset = NERDataset('valid') # Build train_and validation loaders which generate data with batch_size
# -*- coding: utf-8 -*- import sys import logging from utils import log from utils import utils from utils.INIParser import INIParser from weibo import crawler as weibo_crawler log.init_logger('crawler') logger = logging.getLogger('crawler') sys.stderr = log.ErrOutPutToLogger("crawler") def main(): ini_filename = utils.get_path_with_base_file(__file__, 'data/input.ini') config = INIParser.read(ini_filename) if config['default']['crawler_type'] == 'weibo': weibo_crawler.start(config) # logger.info(ini_filename) if __name__ == '__main__': main()
import requests import json import csv from flask import Flask, request import os.path from utils import log from utils.characters_csv import create_csv from utils.normalize_query import normalize logger = log.init_logger('Flask_app', testing_mode=False) app = Flask(__name__) @app.route('/') @app.route('/healthcheck') def health_check(): logger.info("health_check") res = requests.get('https://rickandmortyapi.com/api/character/') return res.reason, res.status_code @app.route('/all') def fetch_all(): characters = [] without_cache = request.args.get("without_cache", 'False') if not os.path.exists( 'the_right_characters.csv') and without_cache.lower not in [ 'true', '1', 't', 'y', 'yes' ]: create_csv() with open('the_right_characters.csv') as csv_file:
import requests import json import csv from utils import log logger = log.init_logger('APP', testing_mode=False) def create_csv(): the_characters = get_all_characters() with open('the_right_characters.csv', 'w', newline='') as csv_file: writer = csv.writer(csv_file) for character in the_characters: writer.writerow(character) logger.info("File created") def get_all_characters(): the_characters = [] is_next = 'https://rickandmortyapi.com/api/character/?page1' while is_next: logger.info(f"characters in page {is_next}:") characters_in_page, is_next = get_page(is_next) for character in characters_in_page: logger.info(character) the_characters.append(character) return the_characters def get_page(url: str) -> list: try:
def initialisation(): log.init_logger()