def set_log_file(self, log_file): hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') hdlr.setFormatter(formatter) for hd in logging.handlers: # remove all old handlers logging.removeHandler(hd) logging.addHandler(hdlr)
def initLog(): global _early_log_handler log = logging.getLogger() if FLAGS.use_stderr: log.addHandler(logging.StreamHandler(sys.stderr)) if _early_log_handler is not None: log.removeHandler(_early_log_handler) _early_log_handler = None if FLAGS.use_syslog: syslog = logging.handlers.SysLogHandler(address='/dev/log') log.addHandler(syslog) log_file = _get_log_file() if log_file is not None: logging.addHandler(logging.handlers.WatchedFileHandler(log_file)) mode = int(FLAGS.log_file_mnode, 8) os.chmod(log_file, mode) if FLAGS.verbose: log.setLevel(logging.DEBUG) elif FLAGS.default_log_level is not None: log.setLevel(FLAGS.default_log_level) else: log.setLevel(logging.INFO)
def setup_logging(self): """Setup Logging""" logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG) if self.debugging: # read this from config / init logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler('/var/log/wifiserver.log') logging.addHandler(handler)
def log_init(): logging.setLevel(logging.DEBUG) if args.log_file is not False: #fh = logging.handlers.RotatingFileHandler(filename = args.log_file, mode='w+', maxBytes = 5000000) fh = logging.FileHandler(filename=args.log_file, mode='w+') fh.setLevel(getattr(logging, args.log_level)) ff = logging.Formatter('%(asctime)s - %(levelname)8s - %(message)s') #ff = logging.Formatter('%(message)s') fh.setFormatter(ff) logging.addHandler(fh)
def make_logger(name=str(os.getpid())): """Build and return a Logging Logger.""" if not sys.platform.startswith("win") and sys.stderr.isatty(): def add_color_emit_ansi(fn): """Add methods we need to the class.""" def new(*args): """Method overload.""" if len(args) == 2: new_args = (args[0], copy(args[1])) else: new_args = (args[0], copy(args[1]), args[2:]) if hasattr(args[0], 'baseFilename'): return fn(*args) levelno = new_args[1].levelno if levelno >= 50: color = '\x1b[31;5;7m\n ' # blinking red with black elif levelno >= 40: color = '\x1b[31m' # red elif levelno >= 30: color = '\x1b[33m' # yellow elif levelno >= 20: color = '\x1b[32m' # green elif levelno >= 10: color = '\x1b[35m' # pink else: color = '\x1b[0m' # normal try: new_args[1].msg = color + str(new_args[1].msg) + ' \x1b[0m' except Exception as reason: print(reason) # Do not use log here. return fn(*new_args) return new # all non-Windows platforms support ANSI Colors so we use them log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit) else: log.debug("Colored Logs not supported on {0}.".format(sys.platform)) log_file = os.path.join(gettempdir(), str(name).lower().strip() + ".log") log.basicConfig(level=-1, filemode="w", filename=log_file, format="%(levelname)s:%(asctime)s %(message)s %(lineno)s") log.getLogger().addHandler(log.StreamHandler(sys.stderr)) adrs = "/dev/log" if sys.platform.startswith("lin") else "/var/run/syslog" try: handler = log.handlers.SysLogHandler(address=adrs) except: log.debug("Unix SysLog Server not found,ignored Logging to SysLog.") else: log.addHandler(handler) log.debug("Logger created with Log file at: {0}.".format(log_file)) return log
def make_logger(name=str(os.getpid())): """Build and return a Logging Logger.""" if not sys.platform.startswith("win") and sys.stderr.isatty(): def add_color_emit_ansi(fn): """Add methods we need to the class.""" def new(*args): """Method overload.""" if len(args) == 2: new_args = (args[0], copy(args[1])) else: new_args = (args[0], copy(args[1]), args[2:]) if hasattr(args[0], "baseFilename"): return fn(*args) levelno = new_args[1].levelno if levelno >= 50: color = "\x1b[31;5;7m\n " # blinking red with black elif levelno >= 40: color = "\x1b[31m" # red elif levelno >= 30: color = "\x1b[33m" # yellow elif levelno >= 20: color = "\x1b[32m" # green elif levelno >= 10: color = "\x1b[35m" # pink else: color = "\x1b[0m" # normal try: new_args[1].msg = color + str(new_args[1].msg) + " \x1b[0m" except Exception as reason: print(reason) # Do not use log here. return fn(*new_args) return new # all non-Windows platforms support ANSI Colors so we use them log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit) else: log.debug("Colored Logs not supported on {0}.".format(sys.platform)) log_file = os.path.join(gettempdir(), str(name).lower().strip() + ".log") log.basicConfig( level=-1, filemode="w", filename=log_file, format="%(levelname)s:%(asctime)s %(message)s %(lineno)s" ) log.getLogger().addHandler(log.StreamHandler(sys.stderr)) adrs = "/dev/log" if sys.platform.startswith("lin") else "/var/run/syslog" try: handler = log.handlers.SysLogHandler(address=adrs) except Exception: log.warning("Unix SysLog Server not found,ignored Logging to SysLog.") else: log.addHandler(handler) log.debug("Logger created with Log file at: {0}.".format(log_file)) return log
def setup_log(level): # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(level) # create formatter formatter = logging.Formatter( '%(asctime)s [%(levelname)s] %(name)s: %(message)s') # add formatter to ch ch.setFormatter(formatter) logging.addHandler(ch)
def setUpClass(cls): root_logger.getLogger('').setLevel(root_logger.WARNING) LOG_FILE_NAME = "log.{}".format(splitext(split(__file__)[1])[0]) file_h = root_logger.FileHandler(LOG_FILE_NAME, mode='w') file_h.setLevel(root_logger.DEBUG) console = root_logger.StreamHandler() console.setLevel(root_logger.WARNING) logging = root_logger.getLogger(__name__) logging.setLevel(root_logger.DEBUG) logging.addHandler(console) logging.addHandler(file_h)
def init_logging(): LOG_LEVEL = logging.DEBUG LOGFORMAT = " %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s" from colorlog import ColoredFormatter logging.root.setLevel(LOG_LEVEL) formatter = ColoredFormatter(LOGFORMAT) stream = logging.StreamHandler() stream.setLevel(LOG_LEVEL) stream.setFormatter(formatter) log = logging.getLogger('backup') log.setLevel(LOG_LEVEL) log.addHandler(stream) return log
def get_logger(self): """Create and return a customized logger object. :return log: the logger object. :rtype log: log.Logger """ lf = MyLogFormatter() ch = self.log.StreamHandler() ch.setFormatter(lf) self.log.captureWarnings(True) log = self.log.getLogger("phasemap_fit") log.addHandler(ch) return log
def __init__(self, nbadotcom_games = {}, omit = None): ''' Args: nbadotcom_games (dict): key-value pair of gamecode and nbacom_game_id omit (list): fields to omit from nbastuffer files ''' logging.getLogger(__name__).addHandler(logging.NullHandler()) logging.addHandler(logging.NullHandler()) self.names = NBATeamNames() self.nbadotcom_games = nbadotcom_games if omit: self.omit = omit else: self.omit = ['teams', 'f', 'moneyline', 'moneyline_', 'movements', 'opening_odds', 'to to']
def create_logger(name, silent=False, to_disk=False, log_file=None): """Create a new logger""" # setup logger log = logging.getLogger(name) log.setLevel(logging.DEBUG) log.propagate = False formatter = logging.Formatter(fmt='%(message)s', datefmt='%Y/%m/%d %I:%M:%S') if not silent: ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) log.addHandler(ch) if to_disk: log_file = log_file if log_file is not None else strftime( "log/log_%m%d_%H%M.txt", gmtime()) if type(log_file) == list: for filename in log_file: fh = logging.FileHandler(filename, mode='w') fh.setLevel(logging.INFO) fh.setFormatter(formatter) log.addHandler(fh) if type(log_file) == str: fh = logging.FileHandler(log_file, mode='w') fh.setLevel(logging.INFO) fh.setFormatter(formatter) log.addHandler(fh) return log
def configure_logging(): logpath = config['smtpd']['log_file'] format=('{blue1}%(asctime)s ' '{red1}%(filename)s:%(lineno)d ' '{yel1}%(levelname)s ' '{gre1}%(funcName)s() ' '{res}%(message)s').format(blue1=AnsiColor.blue, red1=AnsiColor.red, yel1=AnsiColor.yellow, res=AnsiColor.end, gre1=AnsiColor.magenta) format1=('%(asctime)s ' '%(filename)s:%(lineno)d ' '%(levelname)s ' '%(funcName)s() ' '%(message)s') logFormatter = loggingg.Formatter(format) logFormatterfile = loggingg.Formatter(format1) logging = loggingg.getLogger() logging.setLevel(loggingg.INFO) fileHandler = loggingg.FileHandler(logpath) fileHandler.setFormatter(logFormatterfile) logging.addHandler(fileHandler) consoleHandler = loggingg.StreamHandler() consoleHandler.setFormatter(logFormatter) logging.addHandler(consoleHandler)
import logging info_log = name + '.log' info_log = os.path.join(basedir, info_log) logging.basicConfig() formatter = logging.Formatter("[%(asctime)s] %(levelname)s ==> %(message)s", "%d-%m-%Y %H:%M:%S") log = logging.getLogger() log.setLevel(logging.DEBUG) req_log = logging.getLogger('requests.packages.urllib3') req_log.setLevel(logging.DEBUG) req_log.propagate = True console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(formatter) log.addHandler(console) i_handler = logging.FileHandler(info_log) i_handler.setLevel(logging.INFO) i_handler.setFormatter(formatter) log.addHandler(i_handler) def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return log.error("{}: uncaught".format(name)) log.error("traceback:", exc_info=(exc_type, exc_value, exc_traceback)) sys.excepthook = handle_exception ######################################## site = 'https://upornia.com' ua_rand = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'
essLocalFileHandler.setLevel(LogLevel) essLocalFileHandler.setFormatter(essFormatter1) #essLocalFileHandler.doRollover() ########################### # LocalConsoleHandler essConsoleHandler = logging.StreamHandler(sys.stdout) essConsoleHandler.setLevel(LogLevel) essConsoleHandler.setFormatter(essFormatter2) ########################## # Add handlers to default logger if MultiProc: logger = multiprocessing.get_logger() logger.setLevel(LogLevel) logging = logging.getLogger('') logging.setLevel(0) logging.addHandler(essLocalFileHandler) if MultiProc: logger.addHandler(essLocalFileHandler) if Console: logging.addHandler(essConsoleHandler) if MultiProc: logger.addHandler(essConsoleHandler) logging.debug('LogFile: ' + str(LogFile)) logging.debug('Time: ' + str(Time)) logging.debug('Status: ' + str(Status)) logging.debug('Run: ' + str(Run)) AgentIdentifierValue = ESSDB.DB().action( 'ESSConfig', 'GET', ('Value', ), ('Name', 'AgentIdentifierValue'))[0][0] ExtDBupdate = int(ESSDB.DB().action('ESSConfig', 'GET', ('Value', ), ('Name', 'ExtDBupdate'))[0][0])
import sys import logging from flask.ext.script import Command from botocore.exceptions import ClientError from confidant import app from confidant import iam from confidant import kms from confidant import keymanager from confidant.models.service import Service logging.addHandler(logging.StreamHandler(sys.stdout)) logging.setLevel(logging.INFO) class ManageGrants(Command): def run(self): grants = keymanager.get_grants() try: roles = [x for x in iam.roles.all()] except ClientError: logging.error('Failed to fetch IAM roles.') return services = [] for service in Service.data_type_date_index.query('service'): services.append(service.id) for role in roles: if role.name in services: logging.info('Managing grants for {0}.'.format(role.name)) keymanager._ensure_grants(role, grants)
file_logger = logger.getLogger("log") history_logger = logger.getLogger("history") file_logger.info("file_logger") file_logger.info("file_logger") history_logger.info("history_logger") history_logger.info("history_logger") logger = logging.getLogger() _format = "%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d行 %(message)s" formatter = logging.Formatter(_format) t_handler = handlers.TimedRotatingFileHandler("test.log", when='D', backupCount=1) t_handler.setLevel(logging.DEBUG) t_handler.setFormatter(formatter) logger.addHandler(t_handler) for l in logger.handlers: print(l) # # 解决console.log上出现垃圾日志的问题:https://github.com/tensorflow/tensorflow/issues/26691 import absl.logging logging.root.removeHandler(absl.logging._absl_handler) absl.logging._warn_preinit_stderr = False print("xxxxxx")
import logging as log class QgsLogHandler(logging.StreamHandler): """ A handler class which allows the cursor to stay on one line for selected messages """ def emit(self, record): try: msg = self.format(record) QgsMessageLog.logMessage(msg, "Mappy", 0, False) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) prev_handlers = log.handlers if len( prev_handlers ) == 0: # when reloading a plugin the python interpreter is the same so we dont want to duplicate # logging handlers. Not a clean way to do so, but it is just for development handler = QgsLogHandler() handler.setLevel(logging.DEBUG) log.addHandler(handler) log.setLevel(logging.DEBUG)
#logging.basicConfig(filename='./web-serv.log', filemode='a', format='%(levelname)s - %(asctime)s - %(message)s', level=logging.WARN) log_formatter = logging.Formatter( '%(asctime)s %(levelname)s (%(lineno)d) %(message)s') logFile = './web-serv.log' my_handler = RotatingFileHandler(logFile, mode='a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None, delay=0) my_handler.setFormatter(log_formatter) my_handler.setLevel(logging.INFO) logging = logging.getLogger('root') # logging.setLevel(logging.INFO) # does not work, but why?? logging.addHandler(my_handler) config = configparser.ConfigParser() config.read('./config.ini') log = config.get('MODE', 'LOGLEVEL') location = config.get('MODE', 'LOCATION') print("[TokenJwt] location", location) if (location == "cloud") and (test == False): import ssl context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_cert_chain( '/etc/letsencrypt/live/otoplenok.ru/fullchain.pem', '/etc/letsencrypt/live/otoplenok.ru/privkey.pem')
import json import os import sys from cilogger import consolehandler, filehandler import logging logging = logging.getLogger(__name__) logging.addHandler(consolehandler) logging.addHandler(filehandler) class Environment: def __init__(self, env_file): self.env_vars = self.loadFromFile(env_file) def loadFromFile(self, filename): env = {} with open(filename, 'r') as envfile: lines = filter(lambda line: "#" not in line and '=' in line, envfile.read().split("\n")) [ env.update( {line.split("=")[0].strip(): line.split("=")[1].strip()}) for line in lines ] return env def addToEnv(self, env: dict): self.env_vars.update(env)
import codecs, logging, sys from fastapi import FastAPI from api.core.config import cnf from logging.handlers import RotatingFileHandler from fastapi.middleware.cors import CORSMiddleware logging.basicConfig( filename=cnf.LOG_FILE, format='[%(asctime)s %(name)s] : %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') handler = RotatingFileHandler(cnf.LOG_FILE, backupCount=10, encoding='utf8', maxBytes=(cnf.LOG_MAXFILESIZE * 1024)) if not logging.handlers: logging.addHandler(handler) logger = logging.getLogger('BACKEND-SERVICE') if cnf.LOG_LEVEL.upper() == 'DEBUG': logger.setLevel(logging.DEBUG) elif cnf.LOG_LEVEL.upper() == 'INFO': logger.setLevel(logging.INFO) elif cnf.LOG_LEVEL.upper() == 'WARNING': logger.setLevel(logging.WARNING) elif cnf.LOG_LEVEL.upper() == 'ERROR': logger.setLevel(logging.ERROR) elif cnf.LOG_LEVEL.upper() == 'CRITICAL': logger.setLevel(logging.CRTICAL) else: print( f"[!] Error occured while setting log level\n[*] Please set log level according to : DEBUG,INFO,WARNING,ERROR,CRITICAL" )
essLocalFileHandler.setLevel(LogLevel) essLocalFileHandler.setFormatter(essFormatter1) #essLocalFileHandler.doRollover() ########################### # LocalConsoleHandler essConsoleHandler = logging.StreamHandler(sys.stdout) essConsoleHandler.setLevel(LogLevel) essConsoleHandler.setFormatter(essFormatter2) ########################## # Add handlers to default logger if MultiProc: logger = multiprocessing.get_logger() logger.setLevel(LogLevel) logging = logging.getLogger('') logging.setLevel(0) logging.addHandler(essLocalFileHandler) if MultiProc: logger.addHandler(essLocalFileHandler) if Console: logging.addHandler(essConsoleHandler) if MultiProc: logger.addHandler(essConsoleHandler) logging.debug('LogFile: ' + str(LogFile)) logging.debug('Time: ' + str(Time)) logging.debug('Status: ' + str(Status)) logging.debug('Run: ' + str(Run)) AgentIdentifierValue = ESSDB.DB().action('ESSConfig','GET',('Value',),('Name','AgentIdentifierValue'))[0][0] ExtDBupdate = int(ESSDB.DB().action('ESSConfig','GET',('Value',),('Name','ExtDBupdate'))[0][0]) x=WorkingThread(ProcName) while 1:
log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'white,bg_red', }, secondary_log_colors={}, style='%' ) stream = logging.StreamHandler() stream.setLevel(LOG_LEVEL) stream.setFormatter(formatter) logging = logging.getLogger('pythonConfig') logging.setLevel(LOG_LEVEL) logging.addHandler(stream) ''' ______________________________________________________________________________ constants ______________________________________________________________________________ ''' TIME_SLOT_DURATION = global_setting.constants['time_slot_duration'] # duration of a time step in seconds NUM_MOBILE_DEVICE = global_setting.constants['num_mobile_device'] # NETWORK_BANDWIDTH = global_setting.constants['network_bandwidth'] # in Mbps NUM_TIME_SLOT = global_setting.constants['num_time_slot'] RUN_NUM = global_setting.constants['run_num'] ALGORITHM = global_setting.constants['algorithm_name'] ORIGINAL_OUTPUT_DIR = OUTPUT_DIR = global_setting.constants['output_dir'] networkList = global_setting.constants['network_list'] SAVE_TO_FILE_FREQUENCY = global_setting.constants['save_to_file_frequency'] # 1 means every time slot, 10 means every 10 time slots... PROBLEM_INSTANCE = global_setting.constants['problem_instance'] NUM_REPEAT = global_setting.constants['num_repeat'] ''' ____________________________________________________________________ MobileDevice class definition ____________________________________________________________________ '''
fh = logging.FileHandler('debug_log.log') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) def Linhao_traverse(main_root, matching_rule='c', matching_map=None): """ Traverses the main_root directory, looking for all the '.tif/.TIF' files, performs name matching then iterates through the resulting matched dictironary. Matching assumption is that except for the matching keys, the names are identical :param main_root: folder from which will be traversed in depth :param matching_rule: name modification to type mapping. Currently '' for no matching, 'color' for colors :param matching_map: {'pattern in the file name': color channel number} :return: """
from django.core.handlers import wsgi import pusher import constraint import logging as _ SETTINGS_PATH="django_tornado.settings" _H = _.StreamHandler() _F = _.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging = _.getLogger('') logging.setLevel(_.DEBUG) logging.addHandler(_H) _H.setFormatter(_F) def main(): wsgi_app = tornado.wsgi.WSGIContainer(wsgi.WSGIHandler()) Router = sockjs.tornado.SockJSRouter(pusher.PushClient, '/stream') Router.urls.append((r'/static/(.*)$', tornado.web.StaticFileHandler, {'path': './static'})) Router.urls.append(('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app))) logging.debug("start") ping = pusher.Pinger() ping.start() tornado_app = tornado.web.Application(Router.urls) server = tornado.httpserver.HTTPServer(tornado_app)