示例#1
0
文件: logger.py 项目: newsky/fuelweb
 def __init__(self, application):
     self.application = application
     log_file = WatchedFileHandler(settings.API_LOG)
     log_format = logging.Formatter(LOGFORMAT, DATEFORMAT)
     log_file.setFormatter(log_format)
     api_logger.setLevel(logging.DEBUG)
     api_logger.addHandler(log_file)
示例#2
0
    def __init__(self, *args, **kwargs):
        WatchedFileHandler.__init__(self, *args, **kwargs)

        def reopenLog(signal, frame):
            """the signal handler"""
            self.reopen_stream()
        signal.signal(signal.SIGUSR1, reopenLog)
示例#3
0
def get_logger(logname, logfile, loglevel, propagate):
    """Create and return a logger object."""

    stream_handlers = {
        'STDOUT': sys.stdout,
        'STDERR': sys.stderr,
    }

    try:
        if logfile in stream_handlers:
            logger_handler = logging.StreamHandler(stream_handlers[logfile])
        else:
            logger_handler = WatchedFileHandler(logfile)
    except (PermissionError, FileNotFoundError) as err: # pytype: disable=name-error
        print(err)
        sys.exit(-1)

    logger = logging.getLogger(logname)
    log_fmt = '%(asctime)s %(name)-6s %(levelname)-8s %(message)s'
    logger_handler.setFormatter(
        logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
    logger.addHandler(logger_handler)
    logger.propagate = propagate
    logger.setLevel(loglevel)
    return logger
示例#4
0
    def close(self):
        self._lock.acquire(timeout=2)
        try:
            WatchedFileHandler.close(self)

        finally:
            self._lock.release()
示例#5
0
    def emit(self, record):
        self._lock.acquire(timeout=2)
        try:
            WatchedFileHandler.emit(self, record)

        finally:
            self._lock.release()
示例#6
0
def getLogger(name, level=logging.INFO, handlers=[]):
	logger = logging.getLogger(name)

	if len(handlers) != 0:
		logger.setLevel(level)

	if "console" in handlers:
		strm = StreamHandler()
		fmt = logging.Formatter('%(message)s')
		strm.setLevel(level)
		strm.setFormatter(fmt)
		logger.addHandler(strm)

	if "file" in handlers:
		conf = handlers['file']
		fl = WatchedFileHandler(conf['logfile'])
		fl.setLevel(level)

		fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
		fl.setFormatter(fmt)
		logger.addHandler(fl)

	if "syslog" in handlers:
		sysl = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_SYSLOG)
		sysl.setLevel(level)

		formatter = logging.Formatter('%(name)s[' + str(os.getpid()) + '] %(levelname)-8s: %(message)s')
		sysl.setFormatter(formatter)
		logger.addHandler(sysl)

	return logger
示例#7
0
def main():
    # Set up our log level
    try:
        filename = config['server.logging_filename']
        handler = WatchedFileHandler(filename)
    except KeyError:
        handler = StreamHandler()
    handler.setFormatter(logging.Formatter(config['server.logging_format']))
    root_logger = logging.getLogger('')
    root_logger.setLevel(int(config['server.logging_level']))
    root_logger.addHandler(handler)

    settings = {
    }

    if 'debug' in config:
        log.info('Enabling Tornado Web debug mode')
        settings['debug'] = config['debug']

    host = config['server.socket_host']
    port = int(config['server.socket_port'])

    application = tornado.web.Application([
        (r"/event", EventHandler),
    ], **settings)

    if config.get('dry-run'):
        log.info('In dry-run mode')

    log.info('Starting corgi server http://%s:%d/' % (host, port))
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(port, host)
    tornado.ioloop.IOLoop.instance().start()
示例#8
0
    def __init__(self, name):
        self.name = name

        self.plugins.register(self.name, self)

        self._methods = {}

        self.debug = bool('DEBUG-%s' % self.name in get_vim_buffers_names())

        self.log = logging.getLogger(self.name)
        self.log.setLevel(logging.DEBUG if self.debug else logging.INFO)

        self.settings.option('LOG_PATH', default=None)

        if self.settings['LOG_PATH']:
            if not os.path.exists(self.settings['LOG_PATH']):
                os.makedirs(self.settings['LOG_PATH'])

            log_file_name = '%s.log' % os.path.join(self.settings['LOG_PATH'],
                                                    self.name)

            handler = WatchedFileHandler(log_file_name, 'w')

        else:
            handler = logging.FileHandler('/dev/null')

        fm = logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s',
            '%H:%M:%S'
        )
        handler.setFormatter(fm)
        self.log.addHandler(handler)

        self.log.debug('Plugin name: %r', self.name)
示例#9
0
def _logger(level, out_stream, name=None, log_file=None,
            log_file_level=logging.DEBUG, milliseconds=False):
    """Create the actual logger instance, logging at the given level

    if name is None, it will get args[0] without the extension (e.g. gina).
    'out_stream must be passed, the recommended value is sys.stderr'
    """
    if name is None:
        # Determine the logger name from the script name
        name = sys.argv[0]
        name = re.sub('.py[oc]?$', '', name)

    # We install our custom handlers and formatters on the root logger.
    # This means that if the root logger is used, we still get correct
    # formatting. The root logger should probably not be used.
    root_logger = logging.getLogger()

    # reset state of root logger
    reset_root_logger()

    # Make it print output in a standard format, suitable for
    # both command line tools and cron jobs (command line tools often end
    # up being run from inside cron, so this is a good thing).
    hdlr = logging.StreamHandler(out_stream)
    # We set the level on the handler rather than the logger, so other
    # handlers with different levels can be added for things like debug
    # logs.
    root_logger.setLevel(0)
    hdlr.setLevel(level)
    if milliseconds:
        # Python default datefmt includes milliseconds.
        formatter = LaunchpadFormatter(datefmt=None)
    else:
        # Launchpad default datefmt does not include milliseconds.
        formatter = LaunchpadFormatter()
    hdlr.setFormatter(formatter)
    root_logger.addHandler(hdlr)

    # Add an optional aditional log file.
    if log_file is not None:
        handler = WatchedFileHandler(log_file, encoding="UTF8")
        handler.setFormatter(formatter)
        handler.setLevel(log_file_level)
        root_logger.addHandler(handler)

    # Create our logger
    logger = logging.getLogger(name)

    # Set the global log
    log._log = logger

    # Inform the user the extra log file is in operation.
    if log_file is not None:
        log.info(
            "Logging %s and higher messages to %s" % (
                logging.getLevelName(log_file_level), log_file))

    return logger
示例#10
0
def create_app():
    from server.views.frontend import frontend as blueprint_frontend
    from server.views.entry import entry as blueprint_entry
    from server.views.filter import filter as blueprint_filter
    from server.views.pinboard import pinboard as blueprint_pinboard
    from server.db import db
    from server.login import login_manager

    app = Flask(__name__, instance_relative_config=True)
    app.jinja_options = dict(app.jinja_options)
    app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')

    app.config.from_pyfile("default_settings.py")
    app.config.from_envvar('PINBOARD_SETTINGS', silent=True)

    if not app.debug:
        file_handler = WatchedFileHandler(app.config.get("LOG_FILENAME",
            "pinboard.log"))
        file_handler.setLevel(logging.WARNING)
        app.logger.addHandler(file_handler)

    assets = Environment(app)

    js_assets = Bundle(
            "scripts/jquery-1.7.2.js",
            "scripts/jquery-ui-1.8.16.custom.min.js",
            #"scripts/chosen.jquery.min.js",
            "scripts/bootstrap.min.js",
            "scripts/angular-1.0.1.js",
            #"scripts/angular-cookies-1.0.0.js",
            #"scripts/taffy.js",
            "scripts/sugar-1.2.4.min.js",
            #"scripts/jquery.couch.js",
            Bundle("lib/*.coffee", filters=["coffeescript", ]),
            filters=["rjsmin", ],
            output="generated_app.js",
            )
    css_assets = Bundle(
            "stylesheets/jquery-ui-1.8.16.custom.css",
            Bundle(
                "stylesheets/app.less",
                filters=["less", ],
                ),
            filters=["cssmin", ],
            output="generated_app.css",
            )
    assets.register('js_all', js_assets)
    assets.register('css_all', css_assets)

    db.init_app(app)
    login_manager.setup_app(app)

    app.register_blueprint(blueprint_frontend)
    app.register_blueprint(blueprint_entry, url_prefix="/entry")
    app.register_blueprint(blueprint_filter, url_prefix="/filter")
    app.register_blueprint(blueprint_pinboard, url_prefix="/pinboards")

    return app
示例#11
0
 def emit(self, record):
     old_umask = os.umask(self.umask)
     try:
         # This does not work on py 2.6
         # WatchedFileHandler is probably old style class in 2.6
         # super(UmaskWatchedFileHandler, self).emit(record)
         WatchedFileHandler.emit(self, record)
     finally:
         os.umask(old_umask)
示例#12
0
def main(args):
    log = logging.getLogger("addisonarches.web")
    log.setLevel(args.log_level)

    formatter = logging.Formatter(
        "%(asctime)s %(levelname)-7s %(name)s|%(message)s")
    ch = logging.StreamHandler()

    if args.log_path is None:
        ch.setLevel(args.log_level)
    else:
        fh = WatchedFileHandler(args.log_path)
        fh.setLevel(args.log_level)
        fh.setFormatter(formatter)
        log.addHandler(fh)
        ch.setLevel(logging.WARNING)

    ch.setFormatter(formatter)
    log.addHandler(ch)

    loop = asyncio.SelectorEventLoop()
    asyncio.set_event_loop(loop)

    down = asyncio.Queue(loop=loop)
    up = asyncio.Queue(loop=loop)

    #TODO: Read service name from CLI
    service = "dev"  # Cf qa, demo, prod, etc
    tok = token(args.connect, service, APP_NAME)
    node = create_udp_node(loop, tok, down, up)
    loop.create_task(node(token=tok))

    app = aiohttp.web.Application()
    assets = Assets(app, **vars(args))
    reg = Registration(app, tok, down, up, **vars(args))
    transitions = Transitions(app, **vars(args))
    work = Workflow(app, tok, down, up, **vars(args))
    for svc in (assets, reg, transitions, work):
        log.info("{0.__class__.__name__} object serves {1}".format(
            svc, ", ".join(svc.routes.keys())))

    handler = app.make_handler()
    f = loop.create_server(handler, args.host, args.port)
    srv = loop.run_until_complete(f)

    log.info("Serving on {0[0]}:{0[1]}".format(srv.sockets[0].getsockname()))
    try:
        loop.run_forever()
    except KeyboardInterrupt:
        pass
    finally:
        loop.run_until_complete(handler.finish_connections(1.0))
        srv.close()
        loop.run_until_complete(srv.wait_closed())
        loop.run_until_complete(app.finish())
    loop.close()
示例#13
0
def create_logger(filename=settings.LOG_FILE, level=settings.LOG_LEVEL, name=settings.LOG_NAME):
    # WatchedFileHandler watches the file it is logging to.
    # If the file changes, it is closed and reopened using the file name.
    file_handler = WatchedFileHandler(filename)
    file_handler.setLevel(level)
    logger = logging.getLogger(name)
    logger.addHandler(file_handler)

    logger.setLevel(level)
    return logger
示例#14
0
def main():
    if len(sys.argv) < 5:
        usage(f = sys.stderr)
        sys.exit(-1)
        
    http_ip = sys.argv[1]
    http_port = int(sys.argv[2])
    socksip = sys.argv[3]
    socksport = int(sys.argv[4])
    
    opts, _ = getopt.gnu_getopt(sys.argv[5:], "hdp:l:",
                            ["help", "debug", "pidfile=", "logfile="])
    for o, a in opts:
        if o == "-h" or o == "--help":
            usage()
            sys.exit()
        if o == "-d" or o == "--debug":
            options.logginglevel = logging.DEBUG
        elif o == "-p" or o == "--pidfile":
            options.daemonize = True
            options.pidfile = a
        elif o == "-l" or o == "--logfile":
            options.daemonize = True
            options.logfile = a
            
    if options.daemonize:
        pid = os.fork()
        if pid != 0:
            # write pidfile by father
            f = open(options.pidfile, "w")
            print >> f, pid
            f.close()
            sys.exit(0)
    
    if options.daemonize:
        logger = logging.getLogger()
        logger.setLevel(options.logginglevel)
        ch = WatchedFileHandler(options.logfile)
        ch.setFormatter(logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'))
        logger.addHandler(ch)
    else:
        logging.basicConfig(
            format='[%(asctime)s][%(name)s][%(levelname)s] - %(message)s',
            datefmt='%Y-%d-%m %H:%M:%S',
            level=options.logginglevel,    
        )
    
    socks = SocksServer(socksip, socksport, SocksRelayFactory(), timeout=30, maxclient=500)
    socks.start()

    globalvars.socksip = socksip
    globalvars.socksport = socksport
    globalvars.sockstimeout = 60
    WSGIServer((http_ip, http_port), meek_server_application, log=None).serve_forever()
示例#15
0
def getDaemonLogger(filePath, log_format=None, loglevel=logging.INFO):
    logger = logging.getLogger()
    logger.setLevel(loglevel)
    try:
        watchedHandler = WatchedFileHandler(filePath)
    except Exception as e:  # pylint: disable=broad-except
        return e, None

    watchedHandler.setFormatter(logging.Formatter(log_format or '%(asctime)s %(msg)s'))
    logger.addHandler(watchedHandler)
    return logger, watchedHandler
示例#16
0
 def emit(self, record):
     while True:
         try:
             WatchedFileHandler.emit(self, record)
             self.tries = 0
             return
         except IOError as err:
             if self.tries == self.max_tries:
                 raise
             self.stream.close()
             self.stream = self._open()
             self.tries += 1
示例#17
0
 def __init__(self, filename, umask=0112):
     if not isinstance(filename, basestring):
         raise ValueError('filename must be a string, got %r' % filename)
     self.umask = umask
     old_umask = os.umask(self.umask)
     try:
         # This does not work on py 2.6
         # WatchedFileHandler is probably old style class in 2.6
         # super(UmaskWatchedFileHandler, self).__init__(filename)
         WatchedFileHandler.__init__(self, filename)
     finally:
         os.umask(old_umask)
示例#18
0
def main():
    parser = argparse.ArgumentParser(description="Send out broker notifications")
    parser.add_argument("-c", "--config", dest="config",
                        help="location of the broker configuration file")
    parser.add_argument("--one_shot", action="store_true",
                        help="do just a single run and then exit")
    parser.add_argument("--debug", action="store_true",
                        help="turn on debug logs on stderr")

    opts = parser.parse_args()

    config = Config(configfile=opts.config)

    # These modules must be imported after the configuration has been
    # initialized
    from aquilon.aqdb.db_factory import DbFactory

    db = DbFactory()

    if opts.debug:
        level = logging.DEBUG
        logging.basicConfig(level=level, stream=sys.stderr,
                            format='%(asctime)s [%(levelname)s] %(message)s')
    else:
        level = logging.INFO
        logfile = os.path.join(config.get("broker", "logdir"), "aq_notifyd.log")

        handler = WatchedFileHandler(logfile)
        handler.setLevel(level)

        formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
        handler.setFormatter(formatter)

        rootlog = logging.getLogger()
        rootlog.addHandler(handler)
        rootlog.setLevel(level)

    # Apply configured log settings
    for logname, level in config.items("logging"):
        if level not in logging._levelNames:
            continue
        logging.getLogger(logname).setLevel(logging._levelNames[level])

    logger = logging.getLogger("aq_notifyd")

    if opts.one_shot:
        update_index_and_notify(config, logger, db)
    else:
        signal.signal(signal.SIGTERM, exit_handler)
        signal.signal(signal.SIGINT, exit_handler)

        run_loop(config, logger, db)
示例#19
0
文件: logger.py 项目: e0ne/fuel-web
def make_api_logger():
    """Make logger for REST API writes logs to the file
    """
    # Circular import dependency problem
    # we import logger module in settings
    from nailgun.settings import settings

    logger = logging.getLogger("nailgun-api")
    log_file = WatchedFileHandler(settings.API_LOG)
    log_file.setFormatter(formatter)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(log_file)
    return logger
示例#20
0
    def configure_logger(self):
        logger = logging.getLogger(self.name)

        logger.setLevel(logging.INFO)
        formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s')

        logname = self.logname if hasattr(self, 'logname') else '{}.log'.format(self.name)
        handler = WatchedFileHandler('{}/{}'.format(self.logging_folder, logname))
        handler.setFormatter(formatter)
        handler.setLevel(logging.INFO)

        logger.addHandler(handler)

        return logger
示例#21
0
def main(args):
    log = logging.getLogger(APP_NAME)
    log.setLevel(args.log_level)

    formatter = logging.Formatter(
        "%(asctime)s %(levelname)-7s %(name)s|%(message)s")
    ch = logging.StreamHandler()

    if args.log_path is None:
        ch.setLevel(args.log_level)
    else:
        fh = WatchedFileHandler(args.log_path)
        fh.setLevel(args.log_level)
        fh.setFormatter(formatter)
        log.addHandler(fh)
        ch.setLevel(logging.WARNING)

    ch.setFormatter(formatter)
    log.addHandler(ch)

    loop = asyncio.SelectorEventLoop()
    asyncio.set_event_loop(loop)

    down = asyncio.Queue(loop=loop)
    up = asyncio.Queue(loop=loop)

    tok = token(args.connect, APP_NAME)
    node = create_udp_node(loop, tok, down, up)
    loop.create_task(node(token=tok))
    loop.create_task(queue_logger(loop, up))

    msg = parcel(
        tok,
        Alert(datetime.datetime.now(), "Hello World!"),
        via=Address(tok.namespace, tok.user, tok.service, turberfield.ipc.demo.router.APP_NAME)
    )
    log.info("Sending message: {}".format(msg))
    loop.call_soon_threadsafe(functools.partial(down.put_nowait, msg))

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        for task in asyncio.Task.all_tasks(loop=loop):
            task.cancel()

        for resource in resources:
            resource.close()

    finally:
        loop.close()
示例#22
0
文件: log.py 项目: priy1237/yagi
 def __init__(self, name, level=None):
     formatter = logging.Formatter(FORMAT)
     logging.Logger.__init__(self, name,
         logging.getLevelName(yagi.config.get('logging', 'default_level')))
     handlers = []
     stream_handler = logging.StreamHandler()
     stream_handler.setFormatter(formatter)
     handlers.append(stream_handler)
     logfile = yagi.config.get('logging', 'logfile')
     if logfile:
         file_handler = WatchedFileHandler(filename=logfile)
         file_handler.setFormatter(formatter)
         handlers.append(file_handler)
     for handler in handlers:
         logging.Logger.addHandler(self, handler)
示例#23
0
def configure_logging(loglevel, logfile=None):
    loglevel = loglevel.upper()
    loglevels = ('DEBUG', 'INFO', 'WARNING', 'ERROR')
    if loglevel not in loglevels:
        raise Exception('Loglevel must be one of {}'.format(loglevels))

    logger.setLevel(getattr(logging, loglevel))
    if logfile:
        handler = WatchedFileHandler(logfile)
    else:
        handler = StreamHandler()
    handler.setFormatter(
        logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s',
                          '%m-%d %H:%M:%S'))
    logger.addHandler(handler)
示例#24
0
def get_logger(name):
    logger = logging.getLogger(name)
    format = logging.Formatter("[%(levelname)s] %(asctime)s - %(name)s - %(message)s", datefmt="%m-%d-%Y %H:%M:%S")
    logger.propagate = False

    if PRINT_STDOUT:
        handler = logging.StreamHandler()
        handler.setLevel(LOG_LEVEL)
        handler.setFormatter(format)
        logger.addHandler(handler)

    handler = WatchedFileHandler(LOG_FILE)
    handler.setLevel(LOG_LEVEL)
    handler.setFormatter(format)
    logger.addHandler(handler)
    return logger
示例#25
0
文件: app.py 项目: kestava/log-server
def initialize_root_logger():
    logger = logging.getLogger()
    logger.setLevel(settings.config['logging.level'])
    
    # We'll use a WatchedFileHandler and utilize some external application to
    # rotate the logs periodically
    logFilePath = os.path.join(settings.config['logdir'], '{0}.log'.format(settings.config['server-producer']))
    handler = WatchedFileHandler(logFilePath)
    formatter = logging.Formatter(fmt='%(asctime)s|%(name)s|%(levelname)s|%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    
    handler = logging.StreamHandler()
    formatter = logging.Formatter(fmt='%(asctime)s|%(levelname)s|%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    return logger
示例#26
0
    def handle(self, *args, **options):
        self.validate()

        _action = options.get('action')
        if _action not in ('start', 'restart', 'stop'):
            raise Exception('{} is not supported action.'.format(_action))

        if not options.get('queue_alias'):
            raise Exception("Please set --queue-alias options.")

        _queue = RegisteredQueue(**options)

        # Set logger up.
        if not logger.handlers:
            _formatter = logging.Formatter(
                fmt='[%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d ' +
                    django_sqs.PROJECT + '] %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S')
            _handler = WatchedFileHandler(_queue.options['output_log_path'])
            _handler.setFormatter(_formatter)
            logger.addHandler(_handler)
            logger.setLevel(logging.DEBUG)
            logger.info('Set new logger up.')

        # Close the DB connection now and let Django reopen it when it
        # is needed again.  The goal is to make sure that every
        # process gets its own connection
        from django.db import connection
        connection.close()

        if 'start' == _action and os.path.isfile(_queue.options['pid_file_path']):
            with open(_queue.options['pid_file_path'], 'r') as _pid_file:
                for _pid in _pid_file:
                    try:
                        _pid = int(_pid.rstrip('\n'))
                    except (AttributeError, ValueError):
                        _pid = -1
                    logger.info('PID file exists already, so checking whether PID({}) is running.'.format(_pid))
                    if pid_exists(_pid):
                        logger.info('PID({}) is already running, so exit this process.'.format(_pid))
                        return

        _runner = CustomDaemonRunner(_queue, (__name__, _action))
        logger.info('Initiated daemon runner to {} {}.'.format(_action, _queue.options['queue_name']))
        _runner.do_action()
        logger.info('Exit process for {}.'.format(_queue))
示例#27
0
文件: log.py 项目: daemotron/backtory
    def __init__(self, name, mode):
        logging.Handler.__init__(self)

        self._handler = WatchedFileHandler(name, mode)
        self.queue = multiprocessing.Queue(-1)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()
示例#28
0
def _create_handlers(filename=LOG_FILEPATH, level=LOG_LEVEL):
    formatter = logging.Formatter(format)

    # WatchedFileHandler watches the file it is logging to.
    # If the file changes, it is closed and reopened using the file name.
    file_handler = WatchedFileHandler(filename)
    file_handler.setFormatter(formatter)
    file_handler.setLevel(level)

    # Used by internal log monitoring applications
    syslog_handler = SysLogHandler(facility=LOG_LOCAL3)
    syslog_handler.setFormatter(formatter)
    syslog_handler.setLevel(level)

    global handlers
    handlers = [file_handler, syslog_handler]

    return handlers
示例#29
0
文件: __init__.py 项目: chawco/pyres
def setup_logging(log_level=logging.INFO, filename=None, stream=sys.stderr):
    if log_level == logging.NOTSET:
        return
    logger = logging.getLogger()
    logger.setLevel(log_level)
    if filename:
        try:
            from logging.handlers import WatchedFileHandler
            handler = WatchedFileHandler(filename)
        except:
            from logging.handlers import RotatingFileHandler
            handler = RotatingFileHandler(filename,maxBytes=52428800,
                                          backupCount=7)
    else:
        handler = logging.StreamHandler(stream)
    handler.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S'))
    logger.addHandler(handler)
示例#30
0
    def init(cls, config):
        formatter = logging.Formatter(config.FORMAT)
        root = logging.getLogger('')
        root.setLevel(config.ROOT_LEVEL)

        if config.LOG_TO_CONSOLE:
            console_handler = logging.StreamHandler()
            console_handler.setLevel(config.CONSOLE_LEVEL)
            console_handler.setFormatter(formatter)
            root.addHandler(console_handler)
        if config.LOG_TO_FILE:
            file_name = cls.generate_log_file_name()
            file_handler = WatchedFileHandler(
                os.path.join(config.FILE_BASE, file_name),
                encoding='utf-8'
            )
            file_handler.setLevel(config.FILE_LEVEL)
            file_handler.setFormatter(formatter)
            root.addHandler(file_handler)
示例#31
0
    def set_logger(self):
        if self.debug:
            logging.level = logging.DEBUG
        else:
            logging.level = logging.INFO
        logFormatter = \
            logging.Formatter("%(asctime)s %(levelname)s %(message)s")
        rootLogger = logging.getLogger()
        if self.debug:
            rootLogger.setLevel(logging.DEBUG)
        else:
            rootLogger.setLevel(logging.INFO)

        logdir = os.path.dirname(self.logfile)
        if logdir and not os.path.isdir(logdir):
            os.makedirs(logdir)

        fileHandler = WatchedFileHandler(self.logfile)
        fileHandler.setFormatter(logFormatter)
        rootLogger.addHandler(fileHandler)
        consoleHandler = logging.StreamHandler()
        consoleHandler.setFormatter(logFormatter)
        rootLogger.addHandler(consoleHandler)
示例#32
0
    def _setup_logging(self):
        loglevel = environ.get('LOG_LEVEL', 'INFO')
        logdir = environ.get('LOG_DIR')
        numeric_level = getattr(logging, loglevel.upper(), None)

        if not isinstance(numeric_level, int):
            raise ValueError('Invalid log level: %s' % loglevel)
        name = getattr(self, "instance_id", type(self).__name__)
        self.logger = logging.getLogger(name)

        if logdir:
            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
            fileName = join(logdir, name)+".log"
            fileHandler = WatchedFileHandler(fileName, mode='w')
            fileHandler.setFormatter(formatter)
            self.logger.addHandler(fileHandler)
            numeric_level = logging.DEBUG
        else:
            consoleHandler = logging.StreamHandler()
            consoleHandler.setFormatter(logging.Formatter('%(asctime)-15s {0} %(message)s'.format(name)))
            self.logger.addHandler(consoleHandler)

        self.logger.setLevel(numeric_level)
示例#33
0
def setup_log_file(log_file_path):
    from logging import DEBUG, Formatter, getLogger
    from logging.handlers import WatchedFileHandler
    if not log_file_path:
        return
    h = WatchedFileHandler(str(log_file_path))
    h.setLevel(DEBUG)
    h.setFormatter(Formatter(log_format))
    getLogger().addHandler(h)
示例#34
0
def get_logger(logname, logfile, loglevel, propagate):
    """Create and return a logger object."""

    stream_handlers = {
        'STDOUT': sys.stdout,
        'STDERR': sys.stderr,
    }

    try:
        if logfile in stream_handlers:
            logger_handler = logging.StreamHandler(stream_handlers[logfile])
        else:
            logger_handler = WatchedFileHandler(logfile)
    except PermissionError as err:  # pytype: disable=name-error
        print(err)
        sys.exit(-1)

    logger = logging.getLogger(logname)
    log_fmt = '%(asctime)s %(name)-6s %(levelname)-8s %(message)s'
    logger_handler.setFormatter(logging.Formatter(log_fmt, '%b %d %H:%M:%S'))
    logger.addHandler(logger_handler)
    logger.propagate = propagate
    logger.setLevel(loglevel)
    return logger
示例#35
0
def setup():
    cfg = parse_config()

    logging.getLogger().setLevel(getattr(logging, cfg.get('log', 'loglevel').upper()))
    logfile = cfg.get('log', 'logfile')
    if logfile != '':
        handler = WatchedFileHandler(logfile)
        handler.addFilter(RequestIdFilter())
        formatter = logging.Formatter(
            '%(asctime)s [%(process)d] %(levelname)-5s '
            '%(request_id)s %(name)s %(message)s'
        )
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)
    else:
        logging.basicConfig()

    if cfg.has_option("metrics", "sentry_dsn"):
        # Only import sentry if enabled
        import sentry_sdk
        from sentry_sdk.integrations.flask import FlaskIntegration
        sentry_sdk.init(
            dsn=cfg.get("metrics", "sentry_dsn"),
            integrations=[FlaskIntegration()],
        )

    if cfg.has_option("metrics", "prometheus_port"):
        prometheus_client.start_http_server(
            port=cfg.getint("metrics", "prometheus_port"),
            addr=cfg.get("metrics", "prometheus_addr"),
        )

    ctx = SygnalContext()
    ctx.database = sygnal.db.Db(cfg.get('db', 'dbfile'))

    for key,val in cfg.items('apps'):
        parts = key.rsplit('.', 1)
        if len(parts) < 2:
            continue
        if parts[1] == 'type':
            try:
                pushkins[parts[0]] = make_pushkin(val, parts[0])
            except:
                logger.exception("Failed to load module for kind %s", val)
                raise

    if len(pushkins) == 0:
        logger.error("No app IDs are configured. Edit sygnal.conf to define some.")
        sys.exit(1)

    for p in pushkins:
        pushkins[p].cfg = cfg
        pushkins[p].setup(ctx)
        logger.info("Configured with app IDs: %r", pushkins.keys())

    logger.error("Setup completed")
def create_app():
    # 플라스크 앱
    app = Flask(__name__)

    app.config.from_object(config)

    # @app.before_first_request
    handler = WatchedFileHandler("/home/ato/lecturedocapp.log")
    app.logger.addHandler(handler)
    app.logger.setLevel(logging.DEBUG)

    # Blueprint
    app.register_blueprint(main_views.bp)

    return app
示例#37
0
async def init_app():
    # Setup logger
    if DEBUG_MODE:
        print("debug mode")
        logging.basicConfig(level=logging.DEBUG)
    else:
        root = logging.getLogger('aiohttp.server')
        logging.basicConfig(level=logging.WARN)
        handler = WatchedFileHandler(LOG_FILE)
        formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s",
                                      "%Y-%m-%d %H:%M:%S %z")
        handler.setFormatter(formatter)
        root.addHandler(handler)
        root.addHandler(
            TimedRotatingFileHandler(LOG_FILE,
                                     when="d",
                                     interval=1,
                                     backupCount=30))

    app = web.Application()
    # Global vars
    app['clients'] = {}  # Keep track of connected clients
    app['nodes'] = {}  # Keep track of connected nodes
    app['limit'] = {}  # Limit messages based on IP
    app['active_messages'] = set(
    )  # Avoid duplicate messages from being processes simultaneously
    app['subscriptions'] = {
    }  # Store subscription UUIDs, this is used for targeting callback accounts

    app.add_routes([web.get('/', client_handler)])  # All client WS requests
    app.add_routes([web.get(f'/callback/{CALLBACK_TOKEN}',
                            node_handler)])  # ws/wss callback from nodes
    app.add_routes([web.post('/debug',
                             debug_handler)])  # local debug interface

    return app
示例#38
0
 def __init__(self, path, isRotate=True, logSaveDays=10, isShortLog=False):
     self.__path = path
     self.__switch = True
     fmter = None
     if isShortLog:
         fmter = logging.Formatter(Log.__shortFmt)
     else:
         fmter = logging.Formatter(Log.__fmt)
     if isRotate:
         self.__fileHandler = handlers.TimedRotatingFileHandler(
             self.__path,
             when='D',
             interval=1,
             backupCount=logSaveDays,
             encoding='utf-8')
     else:
         self.__fileHandler = WatchedFileHandler(self.__path)
     self.__fileHandler.setFormatter(fmter)
     self.__logger = logging.getLogger(path)
     self.__logger.addHandler(self.__fileHandler)
     self.__logger.setLevel(Log.__level)
     self._errorCallBack = None
     self._warningCallBack = None
     self._isOutput = False
示例#39
0
 def add_file_handler(self, log_file):
     """
     Create, format & add the handler that will log to the log file
     """
     handler = WatchedFileHandler(log_file)
     handler.setLevel(self.logger.level)
     formatter = logging.Formatter(
         '%(asctime)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S")
     handler.setFormatter(formatter)
     self.logger.addHandler(handler)
示例#40
0
def setup_log(name=None, path='logs', rotating=True, backups=3, file_mode='a',
              disk_level=logging.DEBUG, screen_level=logging.INFO,
              encoding='utf-8'):
    '''This logs to screen if ``screen_level`` is not None, and logs to
    disk if ``disk_level`` is not None.

    If you do not pass a log ``name``, the root log is configured and
    returned.

    If ``rotating`` is True, a RotatingFileHandler is configured on the
    directory passed as ``path``.

    If ``rotating`` is False, a single log file will be created at ``path``.
    Its ``file_mode`` defaults to `a` (append), but you can set it to `w`.
    '''
    # If strings are passed in as levels, "decode" them first
    levels = dict(
        debug=logging.DEBUG,                         # 10
        info=logging.INFO,                            # 20
        warn=logging.WARN, warning=logging.WARNING,    # 30
        error=logging.ERROR,                            # 40
        critical=logging.CRITICAL, fatal=logging.FATAL,  # 50
    )
    if isinstance(disk_level, basestring):
        disk_level = levels[disk_level.lower()]
    if isinstance(screen_level, basestring):
        screen_level = levels[screen_level.lower()]
    # Set up logging
    log = logging.getLogger(name)
    if screen_level:
        h1 = logging.StreamHandler()
        h1.setLevel(screen_level)
        log.addHandler(h1)
    if disk_level:
        if rotating:
            try:
                os.mkdir(path)
            except OSError:
                pass
            h2 = RotatingFileHandler(os.path.join(
                path, name or 'root' + ".log.txt"), encoding=encoding,
                maxBytes=2 ** 22, backupCount=backups)
        else:
            h2 = WatchedFileHandler(path, mode=file_mode, encoding=encoding)
        h2.setLevel(disk_level)
        log.setLevel(disk_level)
        log.addHandler(h2)
    return log
示例#41
0
文件: main.py 项目: messa/ow2
def setup_log_file(log_file_path):
    from logging import DEBUG, Formatter, getLogger
    from logging.handlers import WatchedFileHandler
    from .util.logging import CustomFormatter
    if log_file_path:
        h = WatchedFileHandler(str(log_file_path))
        h.setLevel(DEBUG)
        h.setFormatter(
            CustomFormatter(strip_name_prefix=__name__.split('.')[0]))
        getLogger().addHandler(h)
示例#42
0
def setup_logging(log_file, verbose):
    from logging import INFO, DEBUG, StreamHandler, Formatter
    from logging.handlers import WatchedFileHandler
    root = logging.getLogger('')
    root.setLevel(DEBUG)

    h = StreamHandler()
    h.setFormatter(Formatter(log_format))
    h.setLevel(DEBUG if verbose else INFO)
    root.addHandler(h)

    if log_file:
        h = WatchedFileHandler(log_file)
        h.setFormatter(Formatter(log_format))
        h.setLevel(DEBUG)
        root.addHandler(h)
示例#43
0
def setup_logging(output_dir: str):
    global logger
    for handler in logger.handlers:
        logger.removeHandler(handler)
    logger.setLevel(logging.DEBUG)
    handler = WatchedFileHandler(f'{output_dir}/run_logs.log')
    handler.setLevel(logging.DEBUG)
    logger.addHandler(handler)
    handler = logging.StreamHandler(sys.stdout)
    handler.setLevel(logging.DEBUG)
    logger.addHandler(handler)
示例#44
0
def _create_file_handler(target, filename):
    """ 创建一个基于文件的 logging handler
    :param target: 一个 Path 对象,或者一个 path 字符串
    """
    logsdir = target if isinstance(target, Path) else Path(target)
    # 创建或者设置 logs 文件夹的权限,让其他 user 也可以写入(例如nginx)
    # 注意,要设置 777 权限,需要使用 0o40777 或者先设置 os.umask(0)
    # 0o40777 是根据 os.stat() 获取到的 st_mode 得到的
    if logsdir.exists():
        logsdir.chmod(0o40777)
    else:
        logsdir.mkdir(mode=0o40777)
    logfile = logsdir.joinpath(filename + '.log')
    if not logfile.exists():
        logfile.touch()
    # 使用 WatchedFileHandler 在文件改变的时候自动打开新的流,配合 logrotate 使用
    return WatchedFileHandler(logfile, encoding='utf8')
示例#45
0
def get_logger(name):
    logger = logging.getLogger(name)
    if logger and logger.handlers:
        return logger
    logger = logging.getLogger(name=name)
    if logger and logger.handlers:
        return logger
    logger.setLevel(logging.DEBUG)
    logger.info('use default logger level: DEBUG')
    logger.propagate = False
    if name == "test_scheduler":
        file_handler = TimedRotatingFileHandler(LOGGER_FILE_PATH, when="MIDNIGHT", interval=1, backupCount=5)
    else:
        file_handler = WatchedFileHandler(LOGGER_FILE_PATH)
    file_handler.setFormatter(logging.Formatter(LOGGER_FORMAT))
    logger.addHandler(file_handler)
    return logger
示例#46
0
def create_flask():
    """ Create the Flask app """

    # print 'sqlmongo::create_flask()'

    # create app
    app = Flask(__name__)

    # configure settings
    app.config.from_pyfile('config.py')

    # setup SQL database handler
    db.app = app
    db.init_app(app)

    # set up mongo database handler
    mongo.app = app
    mongo.init_app(app)

    # configure logger
    # http://flask.pocoo.org/docs/0.11/api/#flask.Flask.logger
    # https://docs.python.org/dev/library/logging.html#logging.Logger
    handler = WatchedFileHandler(app.config['DEBUG_LOG_FILE'])
    handler.setLevel(logging.INFO)
    # http://flask.pocoo.org/docs/0.11/errorhandling/#controlling-the-log-format
    handler.setFormatter(
        Formatter('%(asctime)s [%(levelname)s] %(message)s '
                  '[%(pathname)s : %(lineno)d]'))
    app.logger.addHandler(handler)
    app.logger.setLevel('INFO')

    # register the module controllers
    # sets up URL collections, that we wrote in CONTROLLER file
    from modules.Countries.controller import countries
    from modules.Ceramics.controller import ceramics

    app.register_blueprint(countries)
    app.register_blueprint(ceramics)

    # http://flask.pocoo.org/docs/0.11/api/#flask.Flask.route
    @app.route('/')
    def home():
        """Default homepage

        Args:
            None
        Returns:
            The homepage HTML. Currently just 'Hello World from SQLMongo'.

        """
        return render_template('home.html')

    return app
示例#47
0
def make_app(import_name=__name__,
             config='homebank.settings.Configuration',
             debug=False):

    app = Flask(import_name)
    app.config.from_object(config)
    app.config.from_envvar('FLASK_SETTINGS', silent=True)
    app.debug = debug
    app.jinja_env.filters['currency'] = \
        lambda x: "{:,.2f} %s".format(x).replace(",", " ").replace(".", ",") % (
            app.config.get('CURRENCY', '')
        )

    if app.debug:
        import_string('flask.ext.debugtoolbar:DebugToolbarExtension')(app)

    @app.errorhandler(404)
    def not_found(ex):
        return render_template("404.html"), 404

    for blueprint in ['__init__', 'accounts', 'transactions']:
        app.register_blueprint(
            import_string('homebank.blueprints.%s:root' % blueprint))

    login_manager = LoginManager(app=app)
    login_manager.login_view = "index.login"
    login_manager.session_protection = "strong"

    @login_manager.user_loader
    def load_user(uid):
        if uid != app.config['PINCODE']:
            return None
        return User()

    if not app.debug:
        handler = StreamHandler()
        if 'ERROR_LOG' in app.config:
            handler = WatchedFileHandler(app.config['ERROR_LOG'])

        handler.setLevel(WARNING)
        handler.setFormatter(
            Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
        app.logger.addHandler(handler)

    return app
示例#48
0
def main(args):

    log = logging.getLogger("pyspike")
    log.setLevel(args.log_level)

    formatter = logging.Formatter(
        "%(asctime)s %(levelname)-7s %(name)s|%(message)s")
    ch = logging.StreamHandler()

    if args.log_path is None:
        ch.setLevel(args.log_level)
    else:
        fh = WatchedFileHandler(args.log_path)
        fh.setLevel(args.log_level)
        fh.setFormatter(formatter)
        log.addHandler(fh)
        ch.setLevel(logging.WARNING)

    ch.setFormatter(formatter)
    log.addHandler(ch)

    if args.target not in pyspike.ops.misc.targets: 
        log.warning("No build defined for target '{}'.".format(args.target))
        return 1

    locn = os.path.abspath(os.path.expanduser(args.work))
    log.info("Calculated working directory as {0}".format(locn))
    os.chdir(locn)
    for url in pyspike.ops.misc.targets[args.target]:
        project = pyspike.ops.misc.url_to_project(url)
        if os.path.exists(os.path.join(locn, project)):
            success = pyspike.ops.misc.git_pull(locn, project)
            success = success and pyspike.ops.misc.git_checkout(locn, project)
        else: 
            success = pyspike.ops.misc.git_clone(args.work, url)

        if success:
            pyspike.ops.misc.pip_uninstall(locn, project)
            success = pyspike.ops.misc.pip_install(locn, project)
            # TODO: python -m unittest discover <namespace> 

        if not success:
            return 1

    if not args.command:
        log.info("No command supplied.")

    elif args.command == "docker":
        log.info("Docker command supplied.")

    log.info(sys.executable)
    return 0
    def generate_logger(clazz_name):
        logger = logging.getLogger(clazz_name)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s'
        )

        handler = logging.StreamHandler()
        handler.setLevel(INFO)
        handler.setFormatter(formatter)
        logger.addHandler(handler)

        file_handler = WatchedFileHandler('log/app.log')
        file_handler.setLevel(INFO)
        file_handler.setFormatter(formatter)
        file_handler.addFilter(LoggingFilter(ERROR))
        logger.addHandler(file_handler)

        logger.setLevel(INFO)
        logger.propagate = False

        return logger
示例#50
0
    def run(self):

        handler = WatchedFileHandler('/tmp/cake.log')
        self.logger = logging.getLogger()
        self.logger.addHandler(handler)
        try:
            config = yamlreader.yaml_load('/etc/cake.conf.d/')
            self.logger.exception(config)
            while True:
                for test_name, test_id in config['tests'].iteritems():
                    self.logger.info('{0}: fetching data for {1}:{2}'.format(
                        time.strftime("%a, %d %b %Y %H:%M:%S +0000",
                                      time.gmtime()), test_name, test_id))
                    data_dict = gather_data(config['apikey'],
                                            config['username'], test_id)
                    push_data(data_dict, test_name, config['graphitehost'])
                time.sleep(600)
        except Exception:
            self.logger.exception('succubus doing some strange stuff')
示例#51
0
def logging_config():
    level = logging.ERROR
    if config.get('log', 'level') == 'DEBUG':
        level = logging.DEBUG
    elif config.get('log', 'level') == 'INFO':
        level = logging.INFO
    elif config.get('log', 'level') == 'WARNING':
        level = logging.WARNING

    log_file = None
    if config.get('log', 'file'):
        log_file = config.get('log', 'file')

    if log_file:
        handlers = [WatchedFileHandler(log_file)]
    else:
        handlers = [logging.StreamHandler()]

    logging.basicConfig(level=level, handlers=handlers, format=LOG_FORMAT)
示例#52
0
def main(args):
    log = logging.getLogger(APP_NAME)
    log.setLevel(args.log_level)

    formatter = logging.Formatter(
        "%(asctime)s %(levelname)-7s %(name)s|%(message)s")
    ch = logging.StreamHandler()

    if args.log_path is None:
        ch.setLevel(args.log_level)
    else:
        fh = WatchedFileHandler(args.log_path)
        fh.setLevel(args.log_level)
        fh.setFormatter(formatter)
        log.addHandler(fh)
        ch.setLevel(logging.WARNING)

    ch.setFormatter(formatter)
    log.addHandler(ch)

    loop = asyncio.SelectorEventLoop()
    asyncio.set_event_loop(loop)

    down = asyncio.Queue(loop=loop)
    up = asyncio.Queue(loop=loop)

    tok = token(args.connect, APP_NAME)
    node = create_udp_node(loop, tok, down, up)
    loop.create_task(node(token=tok))
    loop.create_task(queue_logger(loop, up))

    msg = parcel(
        tok,
        Alert(datetime.datetime.now(), "Hello World!"),
        via=Address(tok.namespace, tok.user, tok.service, turberfield.ipc.demo.router.APP_NAME)
    )
    log.info("Sending message: {}".format(msg))
    loop.call_soon_threadsafe(functools.partial(down.put_nowait, msg))

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        for task in asyncio.Task.all_tasks(loop=loop):
            task.cancel()

        for resource in resources:
            resource.close()

    finally:
        loop.close()
示例#53
0
def main():
    logs.mkdir(exist_ok=True)

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        handlers=[
                            WatchedFileHandler(str(logs / "aio2influxdb.log")),
                            logging.StreamHandler(sys.stdout)
                        ])
    logging.info('Startup')

    if EXAMPLE_FILE:
        logging.info('Using example file as source')

    dbclient = None
    if USE_INFLUXDB:
        dbclient = InfluxDBClient(host='localhost',
                                  port=8086,
                                  username='******',
                                  password='******',
                                  database=DB_NAME)
        logging.info('Database connection established')

        dbclient.create_database(DB_NAME)
        logging.info("Ensure database: " + DB_NAME)

    exceptions = 0
    while exceptions < MAX_NUMBER_OF_EXCEPTIONS:
        try:
            html = fetch_html()
            parse_and_write(dbclient, html)

            time.sleep(FREQUENCY)
        except Exception:
            exceptions = exceptions + 1
            timestring = time.strftime('%Y_%m_%d-%H_%M_%S')
            logging.exception("Exception at " + timestring)

            filepath = logs / "exception-{}.html".format(timestring)
            with filepath.open("w") as myfile:
                myfile.write(html)
示例#54
0
    def setup_logging(self):
        root = logging.getLogger()
        root.handlers = []
        root.name, _ = os.path.splitext(os.path.basename(__main__.__file__))

        output = self.get("log", "STDERR")

        handler = None
        formatter = logging.Formatter(
            "%(asctime)s %(name)s [%(process)s] %(levelname)s: %(message)s {%(filename)s:%(lineno)s}"
        )

        if output == "STDERR":
            handler = StreamHandler(sys.stderr)
            handler.setFormatter(formatter)
        elif output == "STDOUT":
            handler = StreamHandler(sys.stdout)
            handler.setFormatter(formatter)
        elif output == "SYSLOG":
            handler = SysLogHandler("/dev/log")
            handler.setFormatter(
                logging.Formatter(
                    "%(name)s[%(process)s] %(levelname)s: %(message)s {%(filename)s:%(lineno)s}"
                ))
        else:
            handler = WatchedFileHandler(output)
            handler.setFormatter(formatter)

        level = None
        try:
            level = {
                "DEBUG": logging.DEBUG,
                "INFO": logging.INFO,
                "WARNING": logging.WARNING,
                "ERROR": logging.ERROR,
                "CRITICAL": logging.CRITICAL
            }[self.get("log_level", "DEBUG")]
        except KeyError as e:
            level = logging.DEBUG

        root.addHandler(handler)
        root.setLevel(level)
示例#55
0
    def get_logger(self, name):
        logger = logging.getLogger(name)

        if not logger.handlers:
            logger.setLevel(logging.INFO)

            # create formatter and handler
            formatter = logging.Formatter('%(asctime)s;%(message)s')
            handler = WatchedFileHandler(os.path.join(log_file_path, name))
            # combine
            handler.setLevel(logging.INFO)
            handler.setFormatter(formatter)
            logger.addHandler(handler)

        return logger
示例#56
0
def get_logger(stdout: bool = False):
    logger = logging.getLogger("bpow")
    logging.basicConfig(level=logging.INFO)
    formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(filename)s@%(funcName)s:%(lineno)s:%(message)s", "%Y-%m-%d %H:%M:%S %z")
    if not stdout:
        log_file = "/tmp/bpow.log"
        handler = WatchedFileHandler(log_file)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.addHandler(TimedRotatingFileHandler(log_file, when="d", interval=1, backupCount=100))
    else:
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    return logger
示例#57
0
    def init_statuslog(self):
        self.statuslog = logging.getLogger('statuslog')
        self.statuslog.setLevel(logging.DEBUG)
        statuslog_file_handler = WatchedFileHandler("%s/main-status.log" %
                                                    (self.config.LOG_DIR))
        statuslog_file_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
        statuslog_file_handler.setLevel(logging.INFO)
        self.statuslog.addHandler(statuslog_file_handler)

        statuslog_stream_handler = logging.StreamHandler()
        statuslog_stream_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
        statuslog_stream_handler.setLevel(logging.DEBUG)
        self.statuslog.addHandler(statuslog_stream_handler)
示例#58
0
def get_scripts_logger(filename='/tmp/log.txt', level=logging.DEBUG):
    log_formatter = logging.Formatter(
        "%(asctime)s[%(levelname)s][%(name)s][%(module)s-%(lineno)s]-%(process)d %(message)s"
    )
    root_logger = logging.getLogger('app')
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setLevel(level)
    console_handler.setFormatter(log_formatter)
    root_logger.handlers = []
    root_logger.addHandler(console_handler)
    file_handler = WatchedFileHandler(filename)
    file_handler.setFormatter(log_formatter)
    file_handler.setLevel(level)
    root_logger.addHandler(file_handler)
    root_logger.setLevel(level=level)
示例#59
0
def get_daemon_logger(filepath,
                      log_format=None,
                      loglevel=logging.INFO,
                      journal=False):
    logger = logging.getLogger()
    logger.setLevel(loglevel)
    try:
        if journal:
            from systemd.journal import JournalHandler
            handler = JournalHandler(SYSLOG_IDENTIFIER=basename(sys.argv[0]))
        elif filepath:
            handler = WatchedFileHandler(filepath)
        else:
            handler = StreamHandler()
    except Exception as e:  # pylint: disable=broad-except
        print("Fatal error creating client_logger: " + str(e))
        sys.exit(os.EX_OSERR)

    if (log_format):
        handler.setFormatter(logging.Formatter(log_format))
    logger.addHandler(handler)
    return logger, handler
def setup_logger(logger):
    """Setup the Flask app's logger

    :param logger: Flask app's logger
    """
    cfy_config = config.instance

    # setting up the app logger with a watched file handler, in addition to
    #  the built-in flask logger which can be helpful in debug mode.
    # log rotation is handled by logrotate.
    additional_log_handlers = [
        WatchedFileHandler(filename=cfy_config.rest_service_log_path)
    ]

    _setup_python_logger(logger=logger,
                         logger_level=cfy_config.rest_service_log_level,
                         handlers=additional_log_handlers,
                         remove_existing_handlers=False)

    # log all warnings passed to function
    for w in cfy_config.warnings:
        logger.warning(w)