def logging_setup(): stream_handler = logging.StreamHandler() stream_handler.addFilter(RequestIDLogFilter()) loggly_handler = LogglyHandler( "https://logs-01.loggly.com/inputs/{}/tag/python".format( LOGGLY_CUSTOMER_TOKEN)) loggly_handler.addFilter(RequestIDLogFilter()) logging.basicConfig( handlers=[stream_handler, loggly_handler], level=logging.INFO, format= "[%(request_id)s] [%(asctime)s] [%(name)s] [%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S %z")
def get_logger(app, logfile_name="app", log_path="/tmp/sport-data-news/", level=logging.DEBUG): '''save log to diffrent file by deffirent log level into the log path and print all log in console''' RequestID(app) logging.setLoggerClass(AppLogger) formatter = logging.Formatter( '%(asctime)s, %(levelname)s, %(filename)s:%(lineno)d, request_id=%(request_id)s, %(message)s', '%Y-%m-%d %H:%M:%S') log_files = { logging.DEBUG: os.path.join(log_path, logfile_name + '-debug.log'), logging.INFO: os.path.join(log_path, logfile_name + '-info.log'), logging.WARNING: os.path.join(log_path, logfile_name + '-warning.log'), logging.ERROR: os.path.join(log_path, logfile_name + '-error.log'), logging.CRITICAL: os.path.join(log_path, logfile_name + '-critical.log') } logger = logging.getLogger('') logger.setLevel(logging.DEBUG) for log_level, log_file in log_files.items(): if log_level < level: continue file_handler = logging.handlers.TimedRotatingFileHandler( log_file, 'midnight') file_handler.setLevel(log_level) file_handler.setFormatter(formatter) file_handler.addFilter(RequestIDLogFilter()) logger.addHandler(file_handler) return logger
def initialize_logging(): # Setup logging handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - level=%(levelname)s - request_id=%(request_id)s - %(message)s")) handler.addFilter(RequestIDLogFilter()) # << Add request id contextual filter logging.getLogger().addHandler(handler) logging.getLogger().setLevel(logging.DEBUG)
def init_logger(app): if not app.debug: if app.config.get('LOG_DIR'): from logging.handlers import TimedRotatingFileHandler # https://docs.python.org/3.6/library/logging.handlers.html#timedrotatingfilehandler handler = TimedRotatingFileHandler( os.path.join(app.config['LOG_DIR'], 'TelecomMonitorWebTool.app.log'), 'midnight' ) else: handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter( logging.Formatter('[%(asctime)s] %(process)d %(request_id)s %(levelname)s - %(message)s')) handler.addFilter(RequestIDLogFilter()) app.logger.addHandler(handler) app.logger.setLevel(logging.INFO) logging.getLogger('werkzeug').addHandler(handler) logging.getLogger().addHandler(handler) # app.logger.debug('test debug output') # app.logger.info('test info output') # app.logger.warning('test warning output') # app.logger.error('test error output') else: fmt = '%(asctime)s %(hostname)s %(name)s[%(process)d] %(request_id)s %(levelname)s %(message)s' coloredlogs.install( logger=app.logger, level='DEBUG', fmt=fmt, ) coloredlogs.install( logger=logging.getLogger('werkzeug'), level='DEBUG', fmt=fmt, ) logging.getLogger('werkzeug').propagate = False coloredlogs.install( level='DEBUG', fmt=fmt, ) for h in app.logger.handlers + logging.getLogger('werkzeug').handlers + logging.getLogger().handlers: h.addFilter(RequestIDLogFilter())
def setup_logging(): handler = logging.StreamHandler() logging.getLogger("werkzeug").handlers = [] handler.setFormatter( logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - request_id=%(request_id)s - %(message)s" )) handler.addFilter(RequestIDLogFilter()) logging.getLogger().addHandler(handler)
def init_logging(**kwargs): logging.basicConfig(level=kwargs.get('LOG_LEVEL', 'INFO'), stream=sys.stderr) for handler in logging.getLogger().handlers: handler.addFilter(RequestIDLogFilter()) handler.setFormatter( logging.Formatter( "%(asctime)s:%(request_id)s:%(levelname)s:%(name)s:%(funcName)s:%(lineno)d:%(message)s" ))
def configure_logging(app): handler = logging.FileHandler( f'{app.config["LOGGING_DIR"]}/{app.config["LOGGING_FILE"]}') handler.setFormatter( logging.Formatter( '%(asctime)s [%(levelname)s] %(machine_ip)s %(env)s ' '%(request_id)s %(name)s %(funcName)s:%(lineno)d %(message)s') ) # noqa handler.setLevel(getattr(logging, app.config["LOG_LEVEL"])) handler.addFilter(RequestIDLogFilter()) handler.addFilter(MachineInfoLogFilter()) app.logger.addHandler(handler)
def initialise_logger(name=None): handler = logging.StreamHandler() handler.setFormatter( logging.Formatter( "[%(asctime)s] %(name)s [%(request_id)s] [%(levelname)s] %(message)s" )) handler.addFilter(RequestIDLogFilter()) _LOG = logging.getLogger(name) _LOG.addHandler(handler) # If invoked using Gunicorn, link our root logger to the gunicorn logger # this will mean the root logs will be captured and managed by the gunicorn logger # allowing you to set the gunicorn log directories and levels for logs # produced by this application _LOG.setLevel(logging.getLogger('gunicorn.error').getEffectiveLevel()) return _LOG
def config_log() -> None: """ Configures the application loggers to follow a json-like format. """ handler = logging.StreamHandler() formatter = JsonFormatter( "%(message)s %(request_id)s %(name)s %(levelname)s %(lineno)s %(pathname)s %(asctime)s" ) handler.addFilter(RequestIDLogFilter()) handler.setFormatter(formatter) root_logger = logging.getLogger() root_logger.handlers.clear() # clears previous handlers for root logger root_logger.addHandler(handler)
def configure_logging(flask_app): """ logging configuration """ handler = logging.StreamHandler() handler.setFormatter( logging.Formatter( ('%(asctime)s [%(levelname)s] %(machine_ip)s %(env)s ' '%(request_id)s %(name)s %(funcName)s:%(lineno)d %(message)s') ) ) handler.setLevel(getattr(logging, flask_app.config["LOG_LEVEL"])) handler.addFilter(RequestIDLogFilter()) handler.addFilter(MachineInfoLogFilter()) flask_app.logger.removeHandler(default_handler) flask_app.logger.addHandler(handler)
def setup_logging(app_name='stoq-server', is_debug=None): log_format = '%(asctime)s %(name)s [%(processName)s(%(process)s)]: %(levelname)s - %(message)s' ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG if is_debug else logging.INFO) if RequestIDLogFilter: ch.addFilter(RequestIDLogFilter()) log_format = log_format.replace('processName', 'request_id') formatter = logging.Formatter(log_format) ch.setFormatter(formatter) root = logging.getLogger() root.setLevel(logging.DEBUG if is_debug else logging.INFO) root.addHandler(ch) handler = SysLogHandler(address='/dev/log') handler.setLevel(logging.DEBUG) handler.setFormatter( logging.Formatter(app_name + '[%(process)d]: %(processName)s - %(message)s')) root.addHandler(handler) if platform.system() == 'Windows': # FIXME: We need some kind of log rotation here log_dir = os.path.join(get_application_dir(), 'stoqserver-logs') if not os.path.exists(log_dir): os.makedirs(log_dir) log_filename = os.path.join(log_dir, multiprocessing.current_process().name) stdout_file = open(log_filename + '-stdout.txt', 'a') # On windows, since it is not supervisor that is handling the logs, # and some places/plugins will do logging by printing info to stdout # (e.g. conector), we need to log them somewhere sys.stdout = _Tee(sys.stdout, stdout_file) sys.stderr = _Tee(sys.stderr, stdout_file) hdlr = logging.FileHandler(log_filename + '.txt') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) root.addHandler(hdlr)
from api.image_api import image_blue from db.base_model import db app = Flask(__name__) RequestID(app) LOG_PATH = 'log/app.log' HANDLER = TimedRotatingFileHandler(LOG_PATH, when="D", interval=1, backupCount=30) FORMATTER = logging.Formatter( "[%(asctime)s][%(request_id)s] [%(pathname)s:%(lineno)d] %(levelname)s - %(message)s" ) HANDLER.setFormatter(FORMATTER) HANDLER.addFilter(RequestIDLogFilter()) app.logger.addHandler(HANDLER) app.logger.setLevel(logging.DEBUG) # url的格式为:数据库的协议://用户名:密码@ip地址:端口号(默认可以不写)/数据库名 app.config[ "SQLALCHEMY_DATABASE_URI"] = "mysql://*****:*****@127.0.0.1/dockermanager" # app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://*****:*****@127.0.0.1/dockermanager" # 动态追踪数据库的修改. 性能不好. 且未来版本中会移除. 目前只是为了解决控制台的提示才写的 app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False app.config["SQLALCHEMY_ECHO"] = True db.init_app(app) app.register_blueprint(project_blue) app.register_blueprint(docker_blue) app.register_blueprint(image_blue)
import logging import os import sys from flask_log_request_id import RequestIDLogFilter logger = logging.getLogger('app') level = logging.getLevelName( os.environ.get('BACKEND_LOGLEVEL', 'INFO').upper()) console_formatter = logging.Formatter( "[backend] [%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - " "request_id=%(request_id)s - %(message)s") console = logging.StreamHandler(stream=sys.stdout) console.addFilter(RequestIDLogFilter()) console.setFormatter(console_formatter) logger.addHandler(console) logger.setLevel(level) logger.info('Logger set with level %s' % logging.getLevelName(level))
from helpers import process_test_plan, cancel_test_plan import time # def process_test_plan(test_plan): # time.sleep(10) # app.logger.debug('completed ' + test_plan) # return 'completed' app = Flask(__name__) app.app_context() RequestID(app) # Setup logging handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s %(module)s.%(funcName)s [req_%(request_id)s] - %(message)s")) handler.addFilter(RequestIDLogFilter()) # << Add request id contextual filter logging.getLogger().addHandler(handler) API_ROOT = "api" API_VERSION = "v1" OK = 200 CREATED = 201 ACCEPTED = 202 INTERNAL_ERROR = 500 @app.route('/') def home(): return make_response("<h1>5GTango VnV Curator</h1>")
from flask import Flask from flask import jsonify from flask import request from flask_log_request_id import RequestID from flask_log_request_id import RequestIDLogFilter verbs = ['get', 'post', 'delete', 'put', 'patch'] routes = {v: defaultdict(dict) for v in verbs} app = Flask(__name__) RequestID(app) logging.basicConfig(level='INFO', stream=sys.stderr) handler = logging.getLogger().handlers[0] handler.addFilter(RequestIDLogFilter()) handler.setFormatter( logging.Formatter( "%(asctime)s:%(request_id)s:%(levelname)s:%(name)s:%(lineno)d:%(funcName)s:%(message)s" )) logging.getLogger().addHandler(handler) logger = logging.getLogger() @app.route("/", defaults={'path': ''}, methods=['get', 'post', 'put', 'patch', 'delete']) @app.route("/<path:path>", methods=['get', 'post', 'put', 'patch', 'delete']) def catch_all(path, *args, **kwargs): verb = request.method
def init_loggers(app): handler = default_handler handler.setFormatter(logging.Formatter(FORMAT)) handler.addFilter(RequestIDLogFilter())
def __init__(self, app): RequestID(app) create_logger(log=app.logger, config=app.config, handler_filter=RequestIDLogFilter()) AppLogger(app)