def get_handlers(self, log_name): logger_dir = LOGGER_CONF["path"] logger_fmt = LOGGER_CONF["format"] # logger_size = int(LOGGER_CONF["size"]) logger_level = LOGGER_CONF["level"].upper() if not os.path.exists(logger_dir): os.makedirs(logger_dir) def log_type(record, handler): log = logger_fmt.format( date = record.time, # 日志时间 level = record.level_name, # 日志等级 filename = os.path.split(record.filename)[-1], # 文件名 func_name = record.func_name, # 函数名 lineno = record.lineno, # 行号 msg = record.message, # 日志内容 channel = record.channel, # 通道 pid = self._pid, ppid = self._ppid ) return log # 日志打印到屏幕 log_std = ColorizedStderrHandler(bubble=True, level=logger_level) log_std.formatter = log_type # 日志打印到文件 log_file = TimedRotatingFileHandler(os.path.join(logger_dir, '{}.log'.format(log_name)), date_format='%Y-%m-%d', rollover_format='{basename}_{timestamp}{ext}', bubble=True, level=logger_level, encoding='utf-8') log_file.formatter = log_type logbook.set_datetime_format("local") return [log_std, log_file]
def __init__(self, conf): self.conf = conf handler = TimedRotatingFileHandler(conf.log_file, date_format="%Y-%m-%d") handler.push_application() self.logger = Logger("Firetower-server") self.queue = redis_util.get_redis_conn(host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db) self.classifier = classifier.Levenshtein() self.last_archive = None
def __init__(self): handler = TimedRotatingFileHandler('../logs/filesystem_recovery.log') handler.push_application() self.logger = Logger(name='filesystem recovery') self.path_lists, regex_express = self.get_conf() self.reg_express = re.compile(regex_express) self.tmp_path = os.path.join(BASE_DIR, 'data') self.file_title = '.walden'
def defLogging(): global debug global logPath global loggingLevel global logging_setup try: if debug: logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler( sys.stdout, bubble=False, level=loggingLevel ), TimedRotatingFileHandler( logPath, level=0, backup_count=3, bubble=True, date_format='%Y-%m-%d', ), ]) else: logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), FingersCrossedHandler( TimedRotatingFileHandler( logPath, level=0, backup_count=3, bubble=False, date_format='%Y-%m-%d', ), action_level=ERROR, buffer_size=1000, # pull_information=True, # reset=False, ) ]) except (KeyboardInterrupt, SystemExit): raise except: print("Critical error attempting to setup logging. Falling back to console only.") logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler( sys.stdout, bubble=False ) ])
def __init__(self): handler = TimedRotatingFileHandler('../logs/images_manager.log') handler.push_application() self.logger = Logger(name='Docker Images Manage Api', level=11) os.environ['DOCKER_API_VERSION'] = 1.39 self.logger.debug(os.environ.get('DOCKER_API_VERSION')) DOCKER_API_VERSION = 1.39 self.docker_client = docker.from_env()
def __init__(self): handler = TimedRotatingFileHandler('../logs/nodes_discovery.log') handler.push_application() self.logger = Logger(name='nodes discovery', level='info') self.node_hosts, self.nodes_port, self.file_sd_filename, self.nodes_file_backup_name, self.exclude_file, self.metric_filename, self.metric_store_path = self.get_conf() self.nodes = {} self.ips = {} self.nodes_list = [] self.ips_list = []
def __init__(self): handler = TimedRotatingFileHandler('../logs/filesystem_recovery.log') handler.push_application() self.logger = Logger(name='filesystem recovery') self.node_hosts, self.nodes_port, self.file_sd_filename = self.get_conf() self.nodes = {} self.ips = {} self.nodes_list = [] self.ips_list = []
def __init__(self): handler = TimedRotatingFileHandler('../logs/nodes_metrics.log') handler.push_application() self.logger = Logger(name='nodes metrics', level='info') self.nodes_filename, self.nodes_file_backup_name, self.exclude_file, self.url, self.query_all_ip, self.query_current_ip = self.get_conf() self.nodes = {} self.ips = {} self.nodes_list = [] self.ips_list = []
def __init__(self, conf): """conf: dict, yaml parameters.""" self.conf = conf handler = TimedRotatingFileHandler( conf.log_file, date_format='%Y-%m-%d') handler.push_application() self.logger = Logger('Firetower-admin') self.queue = redis_util.get_redis_conn( host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db ) self.classifier = classifier.Levenshtein()
def print_handler(): # 日志路径,在主工程下生成log目录 Log_Dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log') if not os.path.exists(Log_Dir): os.makedirs(Log_Dir) # 打印到屏幕句柄 user_std_handler = ColorizedStderrHandler(bubble=True) user_std_handler.formatter = log_formatter # 打印到文件句柄 file_handler = TimedRotatingFileHandler(os.path.join(Log_Dir, "%s.log" % "test_log"), date_format="%Y%m%d", bubble=True) file_handler.formatter = log_formatter return user_std_handler, file_handler
def __init__(self): cp = ConfigParser.SafeConfigParser() with codecs.open('config/config.ini', 'r', encoding='utf-8') as f: cp.readfp(f) # self.dir = cp.get('xml', 'dir').strip() # self.xml_searchids_file = cp.get('files', 'xml_searchids_file').strip() self.pool = threadpool.ThreadPool(30) self.files_rights = {} self.lock = threading.Lock() self.q = Queue.Queue() handler = TimedRotatingFileHandler('../logs/get_rights.log') handler.push_application() self.logger = Logger(name='get rights')
def __init__(self, conf): self.conf = conf handler = TimedRotatingFileHandler( conf.log_file, date_format='%Y-%m-%d') handler.push_application() self.logger = Logger('Firetower-server') self.queue = redis_util.get_redis_conn( host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db ) self.classifiers = [] for classifier_name in conf.class_order: self.classifiers.append(getattr(classifier, classifier_name)()) self.last_archive = None
def setup(): if not os.path.exists(LOG_FILE_DIR): os.mkdir(LOG_FILE_DIR) file_handler = TimedRotatingFileHandler( filename=LOG_FILE_PATH, backup_count=config.get_logging_backup_count()) stream_handler = StreamHandler(sys.stdout, level='CRITICAL') stream_handler.format_string = '{record.level_name}: {record.channel}: {record.message}' file_handler.push_application() stream_handler.push_application()
def setup(name, path='log', enable_debug=False): """ Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup """ path_tmpl = os.path.join(path, '{name}_{level}.log') info = path_tmpl.format(name=name, level='info') warn = path_tmpl.format(name=name, level='warn') err = path_tmpl.format(name=name, level='err') crit = path_tmpl.format(name=name, level='crit') # a nested handler setup can be used to configure more complex setups setup = [ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), # then write messages that are at least info to to a logfile TimedRotatingFileHandler(info, level='INFO', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least warnings to to a logfile TimedRotatingFileHandler(warn, level='WARNING', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least errors to to a logfile TimedRotatingFileHandler(err, level='ERROR', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least critical errors to to a logfile TimedRotatingFileHandler(crit, level='CRITICAL', encoding='utf-8', date_format='%Y-%m-%d'), ] if enable_debug: debug = path_tmpl.format(name=name, level='debug') setup.insert(1, TimedRotatingFileHandler(debug, level='DEBUG', encoding='utf-8', date_format='%Y-%m-%d')) if src_server is not None and smtp_server is not None \ and smtp_port != 0 and len(dest_mails) != 0: mail_tmpl = '{name}_error@{src}' from_mail = mail_tmpl.format(name=name, src=src_server) subject = 'Error in {}'.format(name) # errors should then be delivered by mail and also be kept # in the application log, so we let them bubble up. setup.append(MailHandler(from_mail, dest_mails, subject, level='ERROR', bubble=True, server_addr=(smtp_server, smtp_port))) return NestedSetup(setup)
def __init__(self): # logging.basicConfig(level=logging.INFO, # filename='./logs/nodes_discovery.log', # datefmt='%Y/%m/%d %H:%M:%S', # format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s') # logger = logging.getLogger(__name__) handler = TimedRotatingFileHandler('../logs/nodes_discovery.log') handler.push_application() self.logger = Logger(name='nodes discovery') # self.logger = loggor self.node_hosts, self.nodes_port, self.file_sd_filename = self.get_conf() # self.win_nodes_port, self.node_hosts, self.nodes_port, self.file_sd_filename = self.get_conf() self.nodes = {} self.ips = {}
def __init__(self): # 脚本日志 self.run_log = Logger("script_log") # 日志存放路径 self.LOG_DIR = log_path if not os.path.exists(self.LOG_DIR): os.makedirs(self.LOG_DIR) def log_type(record, handler): log = "[{date}] [{level}] [{filename}] [{func_name}] [{lineno}] {msg}".format( date=record.time, # 日志时间 level=record.level_name, # 日志等级 filename=os.path.split(record.filename)[-1], # 文件名 func_name=record.func_name, # 函数名 lineno=record.lineno, # 行号 msg=record.message) # 日志内容 return log # 日志打印到屏幕 self.log_std = ColorizedStderrHandler(bubble=False) self.log_std.formatter = log_type # 日志打印到文件 self.log_file = TimedRotatingFileHandler(os.path.join( self.LOG_DIR, '%s.log' % 'log'), date_format='%Y-%m-%d', bubble=True, encoding='utf-8') self.log_file.formatter = log_type
def handle(level): return TimedRotatingFileHandler( path('torabot.%s.log' % level), date_format='%Y-%m-%d', level=getattr(logbook, level.upper()), bubble=True, )
def basicConfig(outputFile=None, level='INFO', redirectLogging=False, colorized=False, loop=None): if outputFile: if platform.system() == 'Linux' and 0: handler = AsyncDatedFileHandler(outputFile, date_format='%Y-%m-%d') else: handler = TimedRotatingFileHandler(outputFile, level=level, bubble=True, date_format='%Y-%m-%d') else: if not colorized: handler = StreamHandler(sys.stderr, level=level, bubble=True) else: handler = ColorizedHandler(level=level, bubble=True) handler.force_color() handler.format_string = mainFormatString handler.push_application() if redirectLogging: redirect_logging() redirect_warnings()
def logger(file_name=LogName.DEFAULT): # 日志打印到文件 log_file = TimedRotatingFileHandler( os.path.join(LOG_DIR, '%s.log' % file_name), date_format='%Y-%m-%d', bubble=True, rollover_format='{basename}_{timestamp}{ext}', encoding='utf-8', ) log_file.formatter = log_type logbook.set_datetime_format('local') run_log = Logger('script_log') run_log.handlers = [] run_log.handlers.append(log_file) return run_log
def get_logger(name='LOGBOOK', log_path='', file_log=False): logbook.set_datetime_format('local') ColorizedStderrHandler(bubble=True).push_application() log_dir = os.path.join('log') if not log_path else log_path if not os.path.exists(log_dir): os.makedirs(log_dir) if file_log: TimedRotatingFileHandler(os.path.join(log_dir, '%s.log' % name.lower()), date_format='%Y-%m-%d', bubble=True).push_application() return Logger(name)
def handle(level, bubble=True): return TimedRotatingFileHandler( path('torabot.%s.log' % level), date_format='%Y-%m-%d', format_string= '[{record.time:%Y-%m-%d %H:%M:%S}] {record.level_name}: {record.channel}: {record.message} {record.extra[context]}', level=getattr(logbook, level.upper()), bubble=bubble, )
def rotating_logger(name: str, level=INFO, folder: str = default_path('test_logs')) -> Logger: set_datetime_format('local') TimedRotatingFileHandler(f'{folder}/{name}.log', date_format='%Y-%m-%d', bubble=True, level=level, backup_count=60).push_application() return Logger(name)
def generate_logger(): # 日志名称 logger = Logger(PROJECT_NAME_EN, level=LOG_LEVEL) logbook.set_datetime_format("local") logger.handlers = [] # 日志打印到文件 log_file = TimedRotatingFileHandler(os.path.join( LOG_DIR, '%s.log' % PROJECT_NAME_EN), date_format=DAY_FORMAT, bubble=True, encoding='utf-8') log_file.formatter = log_type logger.handlers.append(log_file) if DEBUG: # 日志打印到屏幕 log_std = ColorizedStderrHandler(bubble=True) log_std.formatter = log_type logger.handlers.append(log_std) return logger
def __init__(self, log_dir=BASE_DIR, log_name='log.log', backup_count=10, log_type=log_type, stdOutFlag=False): if not os.path.exists(log_dir): os.mkdir(log_dir) self.log_dir = log_dir self.backup_count = backup_count handler = TimedRotatingFileHandler(filename= os.path.join(self.log_dir, log_name), date_format='%Y-%m-%d', backup_count=self.backup_count) self.handler = handler if log_type is not None: handler.formatter = log_type handler.push_application() if not stdOutFlag: return handler_std = ColorizedStderrHandler(bubble=True) if log_type is not None: handler_std.formatter = log_type handler_std.push_application()
def get_logger(logname): user_log = logbook.Logger('log') logbook.set_datetime_format("local") # 格式化时间 # 日志打印到屏幕 log_std = ColorizedStderrHandler(bubble=True) log_std.formatter = log_type # 日志打印到文件 log_file = TimedRotatingFileHandler(os.path.join(full_path, '%s.log' % logname), date_format='%Y-%m-%d', bubble=True, encoding='utf-8') # 日期分割显示文件(带日期的) log_file.formatter = log_type user_log.handlers = [] user_log.handlers.append(log_file) user_log.handlers.append(log_std) return user_log
def rotating_logger_with_shell( name: str, stream_level=DEBUG, file_level=DEBUG, folder: str = default_path('test_logs')) -> Logger: set_datetime_format('local') StreamHandler(sys.stdout, level=stream_level, bubble=True).push_application() TimedRotatingFileHandler(f'{folder}/{name}.log', date_format='%Y-%m-%d', bubble=True, level=file_level, backup_count=60).push_application() return Logger(name)
def __init__(self, clazz): logbook.set_datetime_format("local") self.serverName = clazz.__name__[clazz.__name__.rfind('.') + 1:] if not os.path.exists(log_dir): os.makedirs(log_dir) self.log_file = TimedRotatingFileHandler(os.path.join( log_dir, '%s.log' % self.serverName), date_format='%Y-%m-%d', bubble=True, encoding='utf-8') self.log_std = ColorizedStderrHandler(bubble=True) self.log = Logger(self.serverName) self.__init_logger() self.__setting()
def __init__(self, logname, toscreen=False): # 设置日志名称 self.logname = logname self.toscreen = toscreen # 设置日志目录 self.LOG_DIR = self.setpath() # 设置本地时间 logbook.set_datetime_format("local") # 设置终端输出格式 self.log_standard = ColorizedStderrHandler(bubble=True) self.log_standard.formatter = self.logformat # 设置文件输出格式 self.log_file = TimedRotatingFileHandler(os.path.join( self.LOG_DIR, '{}.log'.format(self.logname)), date_format='%Y-%m-%d', bubble=True, encoding='utf-8') self.log_file.formatter = self.logformat # 执行log记录 self.log = Logger("SatanLogging") self.logrun()
'There are %s nodes stopped now' % stop_nodes) self.logger.info('************************************') return stop_nodes, failed_hosts if __name__ == "__main__": url = 'https://prom.demo.com/prome/api/v1/query?' query_current_ip = 'query=node_virtual_type' # query_current_ip = 'query=avg({__name__=~"node_disk_read_time_(ms|seconds_total)",job="nodes",device=~"(dm-|sd[a-z]).*$"}) by (instance_ip)' # query_current_ip = 'query=node_filesystem_inode_used' # query_all_ip = 'query=avg(up{job=~"nodes"}) by (instance_ip)' query_all_ip = 'query=up{job="nodes"}' handler = TimedRotatingFileHandler('../logs/test.log') handler.push_application() logger = Logger(name='test') # self.logger.add('../logs/test.log', rotation="1 day", compression="zip") nodes_metrics = NodesMetrics() #get_lost_nodes() #get_lost_nodes_file() lost_nodes_by_discovery, failed_hosts = nodes_metrics.get_lost_nodes_discovery()
func_name=record.func_name, lineno=record.lineno, msg=record.message) return log LOG_DIR = os.path.join("Log") if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) log_std = ColorizedStderrHandler() log_std.formatter = log_type log_file = TimedRotatingFileHandler(os.path.join(LOG_DIR, '%s.log' % 'log'), date_format='%Y-%m-%d', bubble=True, encoding='utf-8', level=ERROR) log_file.formatter = log_type def init_logger(): """Write error info log file as default. Return the distance of logbook logger. """ filename = inspect.getframeinfo(inspect.currentframe().f_back).filename logger = Logger(filename) logbook.set_datetime_format("local") logger.handlers = [] logger.handlers.append(log_file) logger.handlers.append(log_std)
config.logPath = os.path.join(config.savePath, savePath_filename) try: if options.debug: logging_mode = "Debug" logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler(sys.stdout, bubble=False, level=options.logginglevel), TimedRotatingFileHandler( config.logPath, level=0, backup_count=3, bubble=True, date_format='%Y-%m-%d', ), ]) else: logging_mode = "User" logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), FingersCrossedHandler( TimedRotatingFileHandler( config.logPath, level=0, backup_count=3,
@author: Amosun Sunday ''' import requests import service_config import json from logbook import Logger from logbook import TimedRotatingFileHandler import importlib import utility.utility_functions as util_func import ast from multiprocessing import Pool logger = Logger('ProcessControllerLogger') log_handler = TimedRotatingFileHandler('ProcessController.log', date_format='%Y-%m-%d') log_handler.push_application() headers = {'Content-Type': 'application/json'} url_process = service_config.HTTP_LONG_PROCESS_REQUEST url_process_task = service_config.HTTP_LONG_PROCESS_TASK_LIST_REQUEST process_count = service_config.PROCESS_COUNT def load_module(full_module_path): module = importlib.import_module(full_module_path) return module def load_class(full_class_string):
def main(run_name, settings, exec_mode): # File path and test data path fp = os.path.dirname(__file__) tdp = join(fp,"..", "tests", "test_data") composition = settings.get("composition_file", join(tdp,"composition.fa")) coverage = settings.get("coverage_file", join(tdp,"coverage")) result_path = settings.get("results_path_base", join(fp,"..","tmp_out_test")) kmer_lengths = settings.get("kmer_lengths", [4]) pcas = settings.get("total_percentage_pca", [80]) thresholds = settings.get("length_threshold", [1000]) cv_types = settings.get("covariance_type", ["full"]) clusters = settings.get("clusters", "2,100,2") max_n_processors = settings.get("max_n_processors", 1) email = settings.get("email", None) log_path = settings.get("log_path", join(os.path.expanduser("~"),"log","concoctr.log")) handler = TimedRotatingFileHandler(log_path) logger = Logger(run_name) handler.push_application() result_rows = [] indx = [] con_ps = [] if exec_mode == 'drmaa': s = drmaa.Session() s.initialize() result_dir = os.path.join(result_path, run_name) os.mkdir(result_dir) slurm_dir = os.path.join(result_dir, 'slurm') os.mkdir(slurm_dir) sbatch_dir = os.path.join(result_dir, 'sbatch') os.mkdir(sbatch_dir) concoct_dir = os.path.join(result_dir, 'concoct_output') os.mkdir(concoct_dir) for k in kmer_lengths: for pca in pcas: for thr in thresholds: for cv in cv_types: job_name = "_".join(map(str, [k, pca, thr, cv])) con_p = ConcoctParams(composition, coverage, kmer_length = k, total_percentage_pca= pca, length_threshold = thr, covariance_type = cv, basename = os.path.join(concoct_dir, job_name) + "/", max_n_processors = max_n_processors, clusters = clusters) con_ps.append(con_p) cr = ConcoctR() if (k > 9): # Throw in some extra memory n_cores = 4 else: n_cores = 1 if exec_mode == 'drmaa': jt = s.createJobTemplate() jt.nativeSpecification = '-A b2010008 -p core -n {} -t 7-00:00:00'.format(n_cores) jt.email = email jt.workingDirectory = result_path jobid = cr.run_concoct(con_p, drmaa_s=s, drmaa_jt=jt) elif exec_mode == 'sbatch': script_file = os.path.join(result_dir, 'sbatch', job_name) sbatch_params = ['-A b2010008', '-p core', '-n {}'.format(n_cores), '-t 7-00:00:00', "-J {}".format(job_name), "-o {}".format(os.path.join(result_dir, 'slurm', 'slurm-%j.out'))] cr.generate_sbatch_script(con_p, sbatch_params, script_file) jobid = cr.run_concoct(con_p, sbatch_script = script_file) if jobid: result_rows.append(con_p.options) indx.append(jobid) logger.info("Submitted jobid {0}".format(jobid)) results_df = p.DataFrame(result_rows, index=indx) results_df.to_csv(os.path.join(result_path, run_name + "_all_results.csv")) handler.pop_application()
) return log std_handler = ColorizedStderrHandler(bubble=True) std_handler.formatter = log_formatter # 日志存放路径 LOG_DIR = env.log_dir if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) # 日志打印到文件 log_file_debug = TimedRotatingFileHandler(os.path.join(LOG_DIR, '{}.log'.format('debug')), date_format='%Y-%m-%d', level=logbook.DEBUG, bubble=True, encoding='utf-8') log_file_debug.formatter = log_formatter log_file_info = TimedRotatingFileHandler(os.path.join(LOG_DIR, '{}.log'.format('info')), date_format='%Y-%m-%d', level=logbook.INFO, bubble=True, encoding='utf-8') log_file_info.formatter = log_formatter log_file_err = TimedRotatingFileHandler(os.path.join(LOG_DIR, '{}.log'.format('err')), date_format='%Y-%m-%d', level=logbook.WARNING, bubble=True, encoding='utf-8') log_file_err.formatter = log_formatter def init_logger(level=logbook.INFO): logbook.set_datetime_format("local") system_log = Logger("system_log") system_log.handlers = [] system_log.handlers.append(log_file_debug) system_log.handlers.append(log_file_info)
) log_dir = os.getcwd() print('123') if os.path.exists(log_dir): print('文件存在') else: print('文件不存在') # print(path) #日志打印到屏幕 log_led = ColorizedStderrHandler(bubble=True) #获取打印在屏幕上的句柄 log_led.formatter = log_type #log打印的格式设置 # 日志打印到文件 log_file = TimedRotatingFileHandler(os.path.join(log_dir,'%s.log' % 'log'),date_format='%Y-%m-%d',bubble=True,encoding='utf-8') log_file.formatter = log_type #脚本日志 run_log = Logger("script_log") def init_logger(): logbook.set_datetime_format("local") run_log.handlers = [] run_log.handlers.append(log_file) run_log.handlers.append(log_led) logger = init_logger()
import simplejson as json from logbook import Logger from logbook import TimedRotatingFileHandler import config import redis_util handler = TimedRotatingFileHandler('firetower-client.log', date_format='%Y-%m-%d') handler.push_application() log = Logger('Firetower-client') class Client(object): """Basic Firetower Client.""" def __init__(self, conf): self.conf = config.Config(conf) self.redis_host = self.conf.redis_host self.redis_port = self.conf.redis_port self.redis_db = self.conf.redis_db self.queue_key = self.conf.queue_key self.queue = redis_util.get_redis_conn( host=self.redis_host, port=self.redis_port, redis_db=self.redis_db) def push_event(self, event): self.queue.lpush(self.queue_key, event) def emit(self, event):