示例#1
0
def configure_logging(app):

    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                                  '[in %(pathname)s:%(lineno)d]')

    # Also error can be sent out via email.
    # So we can also have a SMTPHandler?
    log_file = os.path.join(os.path.dirname(__file__), 'audio-anno.log')

    max_size = 1024 * 1024 * 20  # Max Size for a log file: 20MB
    log_handler = RotatingFileHandler(log_file, maxBytes=max_size,
                                      backupCount=10)

    try:
        if config.LOG_LEVEL:
            log_level = config.LOG_LEVEL
        else:
            log_level = 'DEBUG'
    except AttributeError:
        log_level = 'DEBUG'

    log_handler.setLevel(log_level)
    log_handler.setFormatter(formatter)

    app.logger.addHandler(log_handler)
示例#2
0
def init(verbose=0, quiet=False, filename='activity.log'):
    """
    Initialize the logger
    * verbose (int) specify the verbosity level of the standart output
      0 (default) ~ ERROR, 1 ~ WARN & WARNING, 2 ~ INFO, 3 ~ DEBUG
    * quiet (boolean) allow to remove all message whatever is the verbosity lvl
    """
    if not os.path.exists('log'):
        os.mkdir('log')

    with open("log/" + filename, 'w'):
        pass

    logger = logging.getLogger()
    logger.propagate = False
    logger.setLevel(min([
        conf['logging']['log_file_level'], 
        conf['logging']['log_console_level'], 
        verbose]))

    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: ' +
        '%(filename)s:%(funcName)s[%(lineno)d] :: %(message)s')
    file_handler = RotatingFileHandler("log/" + filename, 'w', 10000000, 10)
    file_handler.setLevel(conf['logging']['log_file_level'])
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: ' +
        '%(filename)s:%(funcName)s[%(lineno)d] :: %(message)s')
    file_handler = RotatingFileHandler("log/errors.log", 'w', 10000000, 10)
    file_handler.setLevel(logging.ERROR)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    formatter = logging.Formatter(
        '%(levelname)s :: %(filename)s :: %(message)s')
    stream_handler = logging.StreamHandler()
    if verbose is -1:
        stream_handler.setLevel(conf['logging']['log_file_level'])
    elif verbose is 0:
        stream_handler.setLevel(logging.ERROR)
    elif verbose is 1:
        stream_handler.setLevel(logging.WARNING)
    elif verbose is 2:
        stream_handler.setLevel(logging.INFO)
    elif verbose is 3:
        stream_handler.setLevel(logging.DEBUG)
    elif verbose is 4:
        stream_handler.setLevel(0)
    else:
        stream_handler.setLevel(conf['logging']['log_console_level'])
    stream_handler.setFormatter(formatter)
    if not quiet:
        logger.addHandler(stream_handler)

    logging.info("=" * 80)
    logging.info('Logging system started: verbose=%d, quiet=%s' %
                 (verbose, str(quiet)))
示例#3
0
文件: logger.py 项目: LeGaulois/soc
class log(object):
    def __init__(self,fichier,instance):
        self.logger = logging.getLogger(instance)
        
        self.logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        self.file_handler = RotatingFileHandler(fichier, 'a', 5000000, 1)
        self.file_handler.setLevel(logging.DEBUG)   
        self.file_handler.setFormatter(formatter)
        self.file_handler.createLock()
        self.logger.addHandler(self.file_handler)
        
    def ecrire(self,message,niveau):
        if niveau=='critical':
            self.logger.critical(message)

        elif niveau=='error':
            self.logger.error(message)

        elif niveau=='warning':
            self.logger.warning(message)

        elif niveau=='info':
            self.logger.info(message)
        else:        
            self.logger.debug(message)

    def fermer(self):
        self.file_handler.close()
示例#4
0
def get_logger(name, level='INFO', terminal_log=True, file_log=False,
               file_name=None, file_max_bytes=1048576, file_backup_count=3):
    """Copy from PyExpLabSys.common.utilities. See that module for details."""
    # Get the root logger and set the level
    log_level = getattr(logging, level.upper())
    root_logger = logging.getLogger('')
    root_logger.setLevel(log_level)

    handlers = []
    # Form the handler(s) and set the level
    if terminal_log:
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(log_level)
        handlers.append(stream_handler)

    # Create rotating file handler
    if file_log:
        if file_name is None:
            file_name = name + '.log'
        file_handler = RotatingFileHandler(file_name, maxBytes=file_max_bytes,
                                           backupCount=file_backup_count)
        file_handler.setLevel(log_level)
        handlers.append(file_handler)

    # Add formatters to the handlers and add the handlers to the root_logger
    formatter = logging.Formatter(
        '%(asctime)s:%(name)s: %(levelname)s: %(message)s')
    for handler in handlers:
        handler.setFormatter(formatter)
        root_logger.addHandler(handler)

    # Create a named logger and return it
    logger = logging.getLogger(name)
    return logger
def start(host,port,allow_agent=False):
    import argparse
    from gevent.pywsgi import WSGIServer
    from geventwebsocket.handler import WebSocketHandler
    from jinja2 import FileSystemLoader
    import os

    root_path = os.path.dirname(wssh.__file__)
 #   root_path = '/home/bob/test/wssh/wssh'#os.path.dirname(wssh.__file__)
#    print "RootPath===>",root_path
    app.jinja_loader = FileSystemLoader(os.path.join(root_path, 'templates'))
    app.static_folder = os.path.join(root_path, 'static')


#    global wssh_server_log_file
    handler = RotatingFileHandler(wssh_server_log_file, maxBytes=10000000, backupCount=5)
    handler.setLevel(logging.DEBUG)
    app.logger.addHandler(handler)

    app.config['WSSH_ALLOW_SSH_AGENT'] = allow_agent

    agent = 'wsshd/{0}'.format(wssh.__version__)

    print '{0} running on {1}:{2}'.format(agent, host, port)

    app.debug = True
    http_server = WSGIServer((host, port), app,
        log=None,
        handler_class=WebSocketHandler)
    try:
        http_server.serve_forever()
    except KeyboardInterrupt:
        pass
示例#6
0
文件: cli.py 项目: levi-rs/chirp
def configure_logger():
    """
    Creates a rotating log

    :param dir_path: String, path to current directory
    """
    # Formatting
    formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(message)s')

    # Set up STDOUT handler
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setLevel(logging.DEBUG)
    stdout_handler.setFormatter(formatter)

    # Set up file logging with rotating file handler
    rotate_fh = RotatingFileHandler(LOG_FILE, backupCount=5, maxBytes=1000000)
    rotate_fh.setLevel(logging.DEBUG)
    rotate_fh.setFormatter(formatter)

    # Create Logger object
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(stdout_handler)
    logger.addHandler(rotate_fh)

    return logger
示例#7
0
文件: app.py 项目: coco413/walle-web
def register_logging(app):
    # TODO https://blog.csdn.net/zwxiaoliu/article/details/80890136
    # email errors to the administrators
    import logging
    from logging.handlers import RotatingFileHandler
    # Formatter
    formatter = logging.Formatter(
            '%(asctime)s %(levelname)s %(pathname)s %(lineno)s %(module)s.%(funcName)s %(message)s')

    # log dir
    if not os.path.exists(app.config['LOG_PATH']):
        os.makedirs(app.config['LOG_PATH'])

    # FileHandler Info
    file_handler_info = RotatingFileHandler(filename=app.config['LOG_PATH_INFO'])
    file_handler_info.setFormatter(formatter)
    file_handler_info.setLevel(logging.INFO)
    info_filter = InfoFilter()
    file_handler_info.addFilter(info_filter)
    app.logger.addHandler(file_handler_info)

    # FileHandler Error
    file_handler_error = RotatingFileHandler(filename=app.config['LOG_PATH_ERROR'])
    file_handler_error.setFormatter(formatter)
    file_handler_error.setLevel(logging.ERROR)
    app.logger.addHandler(file_handler_error)
示例#8
0
    def __init__(self):
        super(Actuasim, self).__init__()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        file_handler = RotatingFileHandler('actuasim.log', 'a', 10000000, 1)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)
        self.logger.info('=======================================')
        self.logger.info('           ACTUASIM START')
        self.logger.info('=======================================')
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        self.resize(1700, 900)
        self.classrooms = []
        self.tabs = QTabWidget()
        self.setCentralWidget(self.tabs)
        self.file_menu = self.ui.menubar.addMenu("&File")
        self.save_action = QAction("&Save", self, triggered=self.save)
        self.file_menu.addAction(self.save_action)
        self.load_action = QAction("&Load", self, triggered=self.load)
        self.file_menu.addAction(self.load_action)
        self.command_handler = CommandHandler(self)

        # endpoints, status, id
        self.control_endpoint = ('0.0.0.0', 0)
        self.data_endpoint = ('0.0.0.0', 0)
        self.status = 0
        self.channel_id = random.randint(0, 255)  # TODO: handle multiple channel

        # server
        self.knxserver = Knxserver()
        self.knxserver.trigger.connect(self.frame_received)
        self.knxserver.start()
示例#9
0
def main():
    i=Ingestor()
    todaydate = time.strftime("%m-%d-%Y")
    filename = 'logs/' + str(todaydate) + '.log'
    handler = RotatingFileHandler(filename, mode='a',backupCount=1)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)
示例#10
0
文件: PoleLog.py 项目: itjp/pole
def setup_logging(app):
    log = logging.getLogger(app)
    log.setLevel(logging.INFO)

    formatter = logging.Formatter("%(levelname)s %(asctime)s %(name)s "
                                  "%(filename)s(%(lineno)d): %(message)s")

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    log.addHandler(sh)

    log_dir = os.path.expanduser('~')
    log_file = os.path.join(log_dir, '.erp', app + '.log')
    try:
        fh = RotatingFileHandler(log_file, maxBytes=_MAX_LOG_SIZE,
                                 backupCount=_MAX_LOG_BACKUP)
    except IOError:
        logging.exception('Could not set up file logging.')
        fh = None

    if fh:
        fh.setLevel(logging.INFO)
        fh.setFormatter(formatter)
        log.addHandler(fh)

    if os.getenv('DEBUG_SESSION_POLE', 0) == '1':
        log.setLevel(logging.DEBUG)
        log.debug('Debug enabled.')

        try:
            fh.setLevel(logging.DEBUG)
        except Exception, e:
            log.error(e)
示例#11
0
def enable_cli_log(debug=0):
    """
    Use this helper to add a rotating file handler to the 'ochopod' logger. This file will be
    located in /var/log so that the CLI can go get it. This is typically used when your pod is simply running
    another python script (e.g you can log from that script and see it in the CLI).

    :type debug: boolean
    :param debug: true to switch debug logging on
    """

    #
    # - add a small capacity rotating log
    # - this will be persisted in the container's filesystem and retrieved via /log requests
    # - an IOError here would mean we don't have the permission to write to /var/log for some reason (just skip)
    #
    logger = logging.getLogger('ochopod')
    try:
        handler = RotatingFileHandler(LOG, maxBytes=32764, backupCount=3)
        handler.setLevel(INFO)
        handler.setFormatter(Formatter('%(asctime)s - %(levelname)s - %(message)s'))
        logger.addHandler(handler)

    except IOError:
        pass

    #
    # - switch all handlers to DEBUG if requested
    #
    if debug:
        for handler in logger.handlers:
            handler.setLevel(DEBUG)
示例#12
0
 def setup_logging():
     if not app.debug:
         from logging.handlers import RotatingFileHandler
         if app.config.get('ERROR_LOG'):
             error_fh = RotatingFileHandler(app.config['ERROR_LOG'], maxBytes=1024*1024*10, backupCount=10, encoding='utf_8')
             error_fh.setLevel(logging.ERROR)
             app.logger.addHandler(error_fh)
示例#13
0
def initialize_logger(redfish_logfile):
    """Return api version.

    :param redfish_logfile: redfish log
    :type str
    :returns:  True

    """
    global logger
    logger = logging.getLogger()
    
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: %(message)s'
        )
    file_handler = RotatingFileHandler(redfish_logfile, 'a', 1000000, 1)

    # First logger to file
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    # Second logger to console
    steam_handler = logging.StreamHandler()
    steam_handler.setLevel(logging.DEBUG)
    logger.addHandler(steam_handler)
    return True
示例#14
0
def setup_logger(cfg):
    if 'LOGFILE' in cfg:
        file_handler = RotatingFileHandler(cfg['LOGFILE'], 'a', 1 * 1024 * 1024, 10)
        file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
        file_handler.setLevel(getattr(logging, cfg['DEBUG']))
    logger.setLevel(getattr(logging, cfg['DEBUG']))
    logger.addHandler(file_handler)
示例#15
0
def add_logger():
    log_file = app.config.get("LOGGING_FILE", "prioritizer.log")
    handler = RotatingFileHandler(log_file, maxBytes=10000, backupCount=10)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)

    app.logger.info(type(app.logger))
示例#16
0
文件: sncli.py 项目: Qu4tro/sncli
class sncli:

    def __init__(self, do_server_sync, verbose=False):
        self.config         = Config()
        self.do_server_sync = do_server_sync
        self.verbose        = verbose
        self.do_gui         = False

        if not os.path.exists(self.config.get_config('db_path')):
            os.mkdir(self.config.get_config('db_path'))

        # configure the logging module
        self.logfile = os.path.join(self.config.get_config('db_path'), 'sncli.log')
        self.loghandler = RotatingFileHandler(self.logfile, maxBytes=100000, backupCount=1)
        self.loghandler.setLevel(logging.DEBUG)
        self.loghandler.setFormatter(logging.Formatter(fmt='%(asctime)s [%(levelname)s] %(message)s'))
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        self.logger.addHandler(self.loghandler)
        self.config.logfile = self.logfile

        logging.debug('sncli logging initialized')

        self.logs = []

        try:
            self.ndb = NotesDB(self.config, self.log, self.gui_update_view)
        except Exception, e:
            self.log(str(e))
            sys.exit(1)
示例#17
0
    def start(self):
        # remove all handlers
        self.handlers = []

        # console log handler
        if self.consoleLogging:
            console = logging.StreamHandler()
            console.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S'))
            console.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(console)

        # rotating log file handlers
        if self.logFile and makeDir(os.path.dirname(self.logFile)):
            rfh = RotatingFileHandler(
                filename=self.logFile,
                maxBytes=self.logSize,
                backupCount=self.logNr
            )

            rfh_errors = RotatingFileHandler(
                filename=self.logFile.replace('.log', '.error.log'),
                maxBytes=self.logSize,
                backupCount=self.logNr
            )

            rfh.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S'))
            rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(rfh)

            rfh_errors.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S'))
            rfh_errors.setLevel(self.logLevels['ERROR'])
            self.addHandler(rfh_errors)
示例#18
0
 def _setup_file_log(self):
     """Add a file log handler."""
     
     file = os.path.abspath(os.path.expanduser(self._meta.file))
     log_dir = os.path.dirname(file)
     if not os.path.exists(log_dir):
         os.makedirs(log_dir)
         
     if self._meta.rotate:
         from logging.handlers import RotatingFileHandler
         file_handler = RotatingFileHandler(
             file, 
             maxBytes=int(self._meta.max_bytes), 
             backupCount=int(self._meta.max_files),
             )
     else:
         from logging import FileHandler
         file_handler = FileHandler(file)
     
     if self.get_level() == logging.getLevelName(logging.DEBUG):
         format = logging.Formatter(self._meta.debug_format)
     else:
         format = logging.Formatter(self._meta.file_format)
     file_handler.setFormatter(format)   
     file_handler.setLevel(getattr(logging, self.get_level())) 
     self.backend.addHandler(file_handler)
示例#19
0
def main():
    ini=Recall()
    #
    #setup logging
    #
    logger=logging.getLogger()
    logger.setLevel(logging.INFO)
    formatter=logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
    file_handler = RotatingFileHandler('ridirect.log', 'a', 1000000, 1)

    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    stream_handler = logging.StreamHandler()
    stream_handler.setLevel(logging.DEBUG)
    formatter=logging.Formatter('%(asctime)s \t %(filename)s \t %(levelname)s \t %(message)s', "%H:%M:%S")
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)

    r=Measurement(None,ini)
    print "Time out: ",r.time_out
    logging.info("Start"+r.version)
    r.collect_new()
    r.dispatch_new()
    logging.info("Stop")
    return True
示例#20
0
def _get_handler():
    # we only need one global handler
    global handler
    if handler is not None:
        return handler

    path = '/var/log/rhsm/rhsm.log'
    try:
        if not os.path.isdir("/var/log/rhsm"):
            os.mkdir("/var/log/rhsm")
    except Exception:
        pass

    # Try to write to /var/log, fallback on console logging:
    try:
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5, encoding='utf-8')
    except IOError:
        handler = logging.StreamHandler()
    except Exception:
        handler = logging.StreamHandler()

    handler.setFormatter(logging.Formatter(LOG_FORMAT))
    handler.setLevel(LOG_LEVEL)

    return handler
示例#21
0
def init_log(filter=None):
    log = logging.getLogger()
    log.setLevel(logging.DEBUG)

    formatter = logging.Formatter(FORMAT)

    # add file rotation handler
    file_handler = RotatingFileHandler(
        filename="/var/log/mediaplat/mediaplat.log", maxBytes=1024 * 1024, backupCount=5, mode="a+"
    )

    stream_handler = logging.StreamHandler()

    if filter:
        file_handler.addFilter(filter)
        stream_handler.addFilter(filter)

    # log to file
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    log.addHandler(file_handler)

    if DEBUG:
        # duplicate log to stdout with color
        stream_handler.setLevel(logging.DEBUG)
        stream_handler.setFormatter(ColoredFormatter())
        log.addHandler(stream_handler)
示例#22
0
class Logger(object):
    
    m_logger = None

    def __init__(self, project):
        self._log_dir = CONF.log_dir
        self._log_file = CONF.log_file
        self._log_file_size = CONF.log_file_size
        self._log_file_count = CONF.log_file_count

        if not os.path.exists(self._log_dir):
            os.makedirs(self._log_dir)

        log_file_name = os.path.join(self._log_dir, self._log_file)
        self._log_handler = RotatingFileHandler(log_file_name, maxBytes =
                self._log_file_size, backupCount = self._log_file_count)
        
        Logger.m_logger = logging.getLogger(project)
        self._log_handler.setFormatter(logging.Formatter(CONF.log_formatter))

        self._enable_debug = CONF.debug
        if self._enable_debug:
            self._log_handler.setLevel(logging.DEBUG)
            Logger.m_logger.setLevel(logging.DEBUG)

        Logger.m_logger.addHandler(self._log_handler)    
示例#23
0
文件: __init__.py 项目: zhuhj89/DRMS
def configure_logging(app):
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]')
    print app.config['DEBUG_LOG']
    debug_log = os.path.join(app.root_path,
                             app.config['DEBUG_LOG'])

    debug_file_handler = \
        RotatingFileHandler(debug_log, mode='w+',
                            maxBytes=100000,
                            backupCount=10)

    debug_file_handler.setLevel(logging.DEBUG)
    debug_file_handler.setFormatter(formatter)
    app.logger.addHandler(debug_file_handler)

    error_log = os.path.join(app.root_path,
                             app.config['ERROR_LOG'])

    error_file_handler = \
        RotatingFileHandler(error_log,
                            maxBytes=100000,
                            backupCount=10)

    error_file_handler.setLevel(logging.ERROR)
    error_file_handler.setFormatter(formatter)
    app.logger.addHandler(error_file_handler)
示例#24
0
def init_logging(stdout_enabled=True):
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)

    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
    logging.getLogger('boto').setLevel(logging.ERROR)
    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)

    log_formatter = logging.Formatter(
            fmt='%(asctime)s | ' + execution_id + ' | %(name)s | %(levelname)s | %(message)s',
            datefmt='%m/%d/%Y %I:%M:%S %p')

    stdout_logger = logging.StreamHandler(sys.stdout)
    stdout_logger.setFormatter(log_formatter)
    stdout_logger.setLevel(logging.CRITICAL)
    root_logger.addHandler(stdout_logger)

    if stdout_enabled:
        stdout_logger.setLevel(logging.INFO)

    # base log file

    log_dir = './'
    log_file_name = '%s/usergrid_iterator.log' % log_dir

    # ConcurrentLogHandler
    rotating_file = RotatingFileHandler(filename=log_file_name,
                                        mode='a',
                                        maxBytes=404857600,
                                        backupCount=0)
    rotating_file.setFormatter(log_formatter)
    rotating_file.setLevel(logging.INFO)

    root_logger.addHandler(rotating_file)
示例#25
0
def configure_logging(app):
    '''Setup file(info) and email(error) logging'''
    # Return if in debug or testing mode
    if app.debug or app.testing:
        return

    import logging
    from logging.handlers import RotatingFileHandler, SMTPHandler

    # Set logging level to info
    app.logger.setLevel(logging.INFO)

    # Rotating File loggiing for (info) level
    debug_log = os.path.join(app.root_path, app.config['DEBUG_LOG'])
    file_handler = RotatingFileHandler(debug_log, maxBytes=100000, backupCount=10)
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(logging.Formatter(
        '%(asctime)s %(processName)s\t | %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]')
    )
    app.logger.addHandler(file_handler)

    # Mail logging haldler for (error) level
    mail_handler = SMTPHandler(app.config['MAIL_SERVER'],
                               app.config['MAIL_USERNAME'],
                               app.config['SITE_ADMINS'],
                               'O_ops... %s failed!' % app.config['SITE_NAME'],
                               (app.config['MAIL_USERNAME'],
                                app.config['MAIL_PASSWORD']))
    mail_handler.setLevel(logging.ERROR)
    mail_handler.setFormatter(logging.Formatter(
        '%(asctime)s %(processName)s\t | %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]')
    )
    app.logger.addHandler(mail_handler)
示例#26
0
    def _setup_file_log(self):
        """Add a file log handler."""

        file_path = self.app.config.get(self._meta.config_section, 'file')
        rotate = self.app.config.get(self._meta.config_section, 'rotate')
        max_bytes = self.app.config.get(self._meta.config_section,
                                        'max_bytes')
        max_files = self.app.config.get(self._meta.config_section,
                                        'max_files')
        if file_path:
            file_path = fs.abspath(file_path)
            log_dir = os.path.dirname(file_path)
            if not os.path.exists(log_dir):
                os.makedirs(log_dir)

            if rotate:
                from logging.handlers import RotatingFileHandler
                file_handler = RotatingFileHandler(
                    file_path,
                    maxBytes=int(max_bytes),
                    backupCount=int(max_files),
                )
            else:
                from logging import FileHandler
                file_handler = FileHandler(file_path)

            format = self._get_file_format()
            formatter = self._get_file_formatter(format)
            file_handler.setFormatter(formatter)
            file_handler.setLevel(getattr(logging, self.get_level()))
        else:
            file_handler = NullHandler()

        self.backend.addHandler(file_handler)
示例#27
0
文件: app.py 项目: weso/CWR-Validator
def create_app(config_object=DevConfig):
    config = CWRValidatorConfiguration().get_config()

    app = Flask(__name__)
    api = Api(app)

    app.config.from_object(config_object)

    _register_resources(api)
    _load_services(app, config)

    app.wsgi_app = ProxyFix(app.wsgi_app)

    if app.config['DEBUG']:
        log = config['log.folder']
        if len(log) == 0:
            log = 'mera_ws.log'

        handler = RotatingFileHandler(log, maxBytes=10000, backupCount=1)
        handler.setLevel(logging.DEBUG)
        handler.setFormatter(
            Formatter('[%(levelname)s][%(asctime)s] %(message)s'))

        logging.basicConfig(level=logging.DEBUG)
        logging.getLogger('').addHandler(handler)

        app.logger.addHandler(handler)

    return app
示例#28
0
文件: beacon.py 项目: jdq/beacon
def add_logging_file_handler(filename, logformat, loglevel=logging.INFO):
    file_handler = RotatingFileHandler(filename,
                    maxBytes=10 * 1024 * 1024, backupCount=3)
    file_handler.setFormatter(logging.Formatter(logformat))
    file_handler.setLevel(loglevel)
    logging.getLogger().addHandler(file_handler)
    return file_handler
示例#29
0
def _add_file_handler(app, filename, max_bytes=512 * 1024, backup_count=100,
                      level=logging.NOTSET):
    """Adds file logging."""
    file_handler = RotatingFileHandler(filename, maxBytes=max_bytes,
                                       backupCount=backup_count)
    file_handler.setLevel(level)
    app.logger.addHandler(file_handler)
示例#30
0
文件: app.py 项目: nict-isp/scn-admin
def configure_logging(app):
    """
    Configure logger.

    Logger object set to utils.logging class.

    :param app: Flask object
    """
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]'
    )

    root_path = os.path.dirname(app.root_path)
    debug_log = os.path.join(root_path, app.config['DEBUG_LOG'])

    debug_file_handler = RotatingFileHandler(
        debug_log, maxBytes=100000, backupCount=100000
    )

    debug_file_handler.setLevel(app.config['LOG_LEVEL'])
    debug_file_handler.setFormatter(formatter)
    app.logger.addHandler(debug_file_handler)

    error_log = os.path.join(root_path, app.config['ERROR_LOG'])
    error_file_handler = RotatingFileHandler(
        error_log, maxBytes=100000, backupCount=10
    )

    error_file_handler.setLevel(logging.ERROR)
    error_file_handler.setFormatter(formatter)
    app.logger.addHandler(error_file_handler)
示例#31
0
CORS(app, supports_credentials=True)
excel.init_excel(app)
# 初始第三方库
mongo = PyMongo(app)
wx_mongo = PyMongo(app, uri=app.config['WX_MONGO_URI'])
celery = make_celery(app)
redis = Redis(decode_responses=True,
              host=app.config['REDIS_HOST'],
              password=app.config['REDIS_PASSWORD'],
              db=0)

# 记录日志
handler = RotatingFileHandler('app.log', maxBytes=10000, backupCount=1)
handler.setFormatter(
    logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                      '[in %(pathname)s:%(lineno)d]'))
handler.setLevel(logging.WARNING)
app.logger.addHandler(handler)

# 路由
from .router.gas import *
from .router.ding_talk import *
from .router.pic import *
from .router.plus import *
from .router.wx.wx_cron import *
from .router.lazy.material import *
from .router.lazy.keyword import *
from .router.lazy.follow import *
from .router.lazy.gas import *
from .router.lazy.login_sys import *
示例#32
0
def create_app(config_class=Config):  # Factory function
    app = Flask(__name__)
    app.config.from_object(config_class)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)
    babel.init_app(app)

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    # register the auth blueprint with the application
    from app.auth import bp as auth_bp
    app.register_blueprint(
        auth_bp, url_prefix='/auth'
    )  # The register_blueprint() call in this case has an extra argument, url_prefix. This is entirely optional, but Flask gives you the option to attach a blueprint under a URL prefix, so any routes defined in the blueprint get this prefix in their URLs.

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    #The code below creates a SMTPHandler instance, sets its level so that it only reports errors and not warnings, informational or debugging messages, and finally attaches it to the app.logger object from Flask.
    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'],
                subject='Microblog Failure',
                credentials=auth,
                secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if not os.path.exists('logs'):
            os.mkdir('logs')
        file_handler = RotatingFileHandler(
            'logs/microblog.log', maxBytes=10240, backupCount=10
        )  #I'm writing the log file with name microblog.log in a logs directory, which I create if it doesn't already exist . #The RotatingFileHandler class is nice because it rotates the logs, ensuring that the log files do not grow too large when the application runs for a long time. In this case I'm limiting the size of the log file to 10KB, and I'm keeping the last ten log files as backup.
        file_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                              '[in %(pathname)s:%(lineno)d]')
        )  #The logging.Formatter class provides custom formatting for the log messages. Since these messages are going to a file, I want them to have as much information as possible. So I'm using a format that includes the timestamp, the logging level, the message and the source file and line number from where the log entry originated.
        file_handler.setLevel(
            logging.INFO
        )  #To make the logging more useful, I'm also lowering the logging level to the INFO category, both in the application logger and the file logger handler. In case you are not familiar with the logging categories, they are DEBUG, INFO, WARNING, ERROR and CRITICAL in increasing order of severity.
        app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('Microblog startup')

    return app
示例#33
0
        if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
            auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
        secure = None
        if app.config['MAIL_USE_TLS']:
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'],
            subject='BAGLFailure',
            credentials=auth,
            secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)
    if not os.path.exists('logs'):
        os.mkdir('logs')
    file_handler = RotatingFileHandler('logs/bagl.log',
                                       maxBytes=10240,
                                       backupCount=10)
    file_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
        ))
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)

    app.logger.setLevel(logging.INFO)
    app.logger.info('BAGL startup')

from app import routes, models, errors
示例#34
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)
    babel.init_app(app)
    app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
                if app.config['ELASTICSEARCH_URL'] else None

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)
    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix='/auth')
    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'],
                subject='Microblog Failure',
                credentials=auth,
                secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if app.config['LOG_TO_STDOUT']:
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            app.logger.addHandler(stream_handler)
        else:
            if not os.path.exists('logs'):
                os.mkdir('logs')
            file_handler = RotatingFileHandler('logs/microblog.log',
                                               maxBytes=10240,
                                               backupCount=10)
            file_handler.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
                ))
            file_handler.setLevel(logging.INFO)
            app.logger.addHandler(file_handler)

            app.logger.setLevel(logging.INFO)
            app.logger.info('Microblog startup')

    return app
示例#35
0
log = logging.getLogger('bot_monitor')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '%(asctime)s | %(name)s | %(levelname)s | %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(logging.DEBUG)
handler_stdout.setFormatter(formatter)
log.addHandler(handler_stdout)
handler_file = RotatingFileHandler('twitter_bot_monitor.log',
                                   mode='a',
                                   maxBytes=1048576,
                                   backupCount=9,
                                   encoding='UTF-8',
                                   delay=True)
handler_file.setLevel(logging.DEBUG)
handler_file.setFormatter(formatter)
log.addHandler(handler_file)

path = os.path.dirname(os.path.abspath(inspect.getfile(
    inspect.currentframe())))

try:
    # read config
    config = configparser.SafeConfigParser()
    config.read(os.path.join(path, "config"))
except IOError:
    log.critical('configuration file is missing')
    sys.exit(0)

try:
示例#36
0
class UploadFiles(object):
    """Class responsible for uploading files to the cloud"""
    def __init__(self):
        self.log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')

        self.my_handler = RotatingFileHandler('/home/pi/Desktop/code_1/logs/upload_files.log', mode='a', maxBytes=5*1024*1024, backupCount=1, encoding=None, delay=0)
        self.my_handler.setFormatter(self.log_formatter)
        self.my_handler.setLevel(logging.INFO)

        self.app_log = logging.getLogger('root')
        self.app_log.setLevel(logging.INFO)

        self.app_log.addHandler(self.my_handler)

        self.settings_database = ptc_database.SettingsDatabase(self.app_log)
 #       self.settings_database.download()
        self.dbx = dropbox.Dropbox(self.settings_database.get_setting('dropbox_access_token'))
        self.app_log.info("Initialized dropbox")
        
        #initiate the database
        self.settings_database = ptc_database.SettingsDatabase(self.app_log)
        self.settings_database.download()

    def process(self):
        """Method responsible for looping through the files to upload"""
        #set the file extension of interest
        extensions = ('.ready')

        #Loop through the files  in the current working directory
        for sub_dir, dirs, files in os.walk(os.getcwd()):
            for ready_file_name in files:
                ext = os.path.splitext(ready_file_name)[-1].lower()
                if ext in extensions:
                    try:
                        pre, ext = os.path.splitext(ready_file_name)
                        image_file_name = pre + ".jpg"
                        self.upload_file(sub_dir, image_file_name)
                        os.remove(os.path.join(sub_dir,ready_file_name))
                    except Exception as exception:
                        self.app_log.exception('Exception: %s', exception)

    def upload_file(self, source_sub_directory, image_file_name):
        try:
            """Method responsible for processing the uploading of files"""
            source_full_path = os.path.join(source_sub_directory, image_file_name)
            destination_full_path = os.path.join(self.settings_database.get_setting('dropbox_destination_folder'), image_file_name)

            with open(source_full_path, 'rb') as source_file:
                self.dbx.files_upload(source_file.read(), destination_full_path)

            self.app_log.info("uploaded file from  " + source_full_path + " to " + destination_full_path)
 
            os.remove(source_full_path)
        except Exception as exception:
            self.app_log.exception('Exception: %s', exception)

    def process_loop(self):
        """Method responsible for the control loop for looking for files to upload"""
        while True:
            try:
                self.app_log.info("Looking for files to upload")
                self.process()
            except Exception as exception:
                self.app_log.exception('Exception: %s', exception)
            finally:
                time.sleep(SLEEP_TIME)
示例#37
0
    response = requests.get(payload_url, auth=(account_sid, auth_token)).json()
    results = 'GET request successful'

    #	print(resp)
    recKeywords = response["media"]["keywords"]
    recTranscripts = response["media"]["transcripts"]

    #	text = results['media']["transcripts"]["text"]

    rendering = render_template('index.html',
                                keywords=recKeywords,
                                transcripts=recTranscripts)

    #	response = requests.post("http://613dc9ad.ngrok.io/", rendering)

    return response


if __name__ == "__main__":
    handler = RotatingFileHandler('hermes.log', maxBytes=10000, backupCount=1)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)
    app.run()


@app.errorhandler(404)
def page_not_found(e):
    app.logger.error(str(e))
    return
示例#38
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.contract import bp as contract_bp
    app.register_blueprint(contract_bp)

    from app.tender import bp as tender_bp
    app.register_blueprint(tender_bp)

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix='/auth')

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    from app.clients import bp as clients_bp
    app.register_blueprint(clients_bp)

    configure_uploads(app, images)

    app.add_url_rule('/uploads/<filename>', 'uploaded_file',
                     build_only=True)
    app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
        '/uploads':  app.config['UPLOAD_FOLDER']
    })

    app.wsgi_app = ProxyFix(app.wsgi_app)


    app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024

    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'], subject='Microblog Failure',
            credentials=auth, secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)

        if not os.path.exists('logs'):
            os.mkdir('logs')
            file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240,
                                       backupCount=10)
            file_handler.setFormatter(logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
            file_handler.setLevel(logging.INFO)
            app.logger.addHandler(file_handler)

            app.logger.setLevel(logging.INFO)
            app.logger.info('Microblog startup')

    return app
示例#39
0
def setup_logger(name=None,
                 logfile=None,
                 level=logging.DEBUG,
                 formatter=None,
                 maxBytes=0,
                 backupCount=0,
                 fileLoglevel=None,
                 disableStderrLogger=False):
    """
    Configures and returns a fully configured logger instance, no hassles.
    If a logger with the specified name already exists, it returns the existing instance,
    else creates a new one.

    If you set the ``logfile`` parameter with a filename, the logger will save the messages to the logfile,
    but does not rotate by default. If you want to enable log rotation, set both ``maxBytes`` and ``backupCount``.

    Usage:

    .. code-block:: python

        from logzero import setup_logger
        logger = setup_logger()
        logger.info("hello")

    :arg string name: Name of the `Logger object <https://docs.python.org/2/library/logging.html#logger-objects>`_. Multiple calls to ``setup_logger()`` with the same name will always return a reference to the same Logger object. (default: ``__name__``)
    :arg string logfile: If set, also write logs to the specified filename.
    :arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: ``logging.DEBUG``).
    :arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter).
    :arg int maxBytes: Size of the logfile when rollover should occur. Defaults to 0, rollover never occurs.
    :arg int backupCount: Number of backups to keep. Defaults to 0, rollover never occurs.
    :arg int fileLoglevel: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ for the file logger (is not set, it will use the loglevel from the ``level`` argument)
    :arg bool disableStderrLogger: Should the default stderr logger be disabled. Defaults to False.
    :return: A fully configured Python logging `Logger object <https://docs.python.org/2/library/logging.html#logger-objects>`_ you can use with ``.debug("msg")``, etc.
    """
    _logger = logging.getLogger(name or __name__)
    _logger.propagate = False
    _logger.setLevel(level)

    # Reconfigure existing handlers
    stderr_stream_handler = None
    for handler in list(_logger.handlers):
        if hasattr(handler, LOGZERO_INTERNAL_LOGGER_ATTR):
            if isinstance(handler, logging.FileHandler):
                # Internal FileHandler needs to be removed and re-setup to be able
                # to set a new logfile.
                _logger.removeHandler(handler)
                continue
            elif isinstance(handler, logging.StreamHandler):
                stderr_stream_handler = handler

        # reconfigure handler
        handler.setLevel(level)
        handler.setFormatter(formatter or LogFormatter())

    # remove the stderr handler (stream_handler) if disabled
    if disableStderrLogger:
        if stderr_stream_handler is not None:
            _logger.removeHandler(stderr_stream_handler)
    elif stderr_stream_handler is None:
        stderr_stream_handler = logging.StreamHandler()
        setattr(stderr_stream_handler, LOGZERO_INTERNAL_LOGGER_ATTR, True)
        stderr_stream_handler.setLevel(level)
        stderr_stream_handler.setFormatter(formatter or LogFormatter())
        _logger.addHandler(stderr_stream_handler)

    if logfile:
        rotating_filehandler = RotatingFileHandler(filename=logfile,
                                                   maxBytes=maxBytes,
                                                   backupCount=backupCount)
        setattr(rotating_filehandler, LOGZERO_INTERNAL_LOGGER_ATTR, True)
        rotating_filehandler.setLevel(fileLoglevel or level)
        rotating_filehandler.setFormatter(formatter
                                          or LogFormatter(color=False))
        _logger.addHandler(rotating_filehandler)

    return _logger
示例#40
0
if not os.path.isdir(DIR_LOGS):
    os.mkdir(DIR_LOGS)

logging.basicConfig(
    level=LOGGER_LEVEL,
    format=FORMAT_LOGGER_TEXT
    )

formatter = logging.Formatter(FORMAT_LOGGER_TEXT)
fileHandler = RotatingFileHandler(
    # filename=f'{DIR_LOGS}/{FILE_LOG}',
    filename=os.path.join(DIR_LOGS, FILE_LOG),
    maxBytes=MAX_BYTES,
    backupCount=BACKUP_COUNT)
fileHandler.setLevel(LOGGER_LEVEL)
fileHandler.setFormatter(formatter)

logging = logging


def setup_custom_logger(_name):
    _logger = logging.getLogger(_name)
    _logger.addHandler(fileHandler)
    return _logger


logger = setup_custom_logger(__name__)
logger.info(f"Se configura modulo log en modo: {logging.getLevelName(LOGGER_LEVEL)}")
logger.info(f"Ruta de archivo logs: {os.path.join(DIR_LOGS, FILE_LOG)}")
if logger.isEnabledFor(logging.DEBUG):
示例#41
0
from logging.handlers import RotatingFileHandler

# Logging settings for this Django project.
LOG_PATH = ROOT_PATH
LOG_LEVEL = logging.DEBUG
LOG_FILENAME = os.path.join(LOG_PATH, 'appraise.log')
LOG_FORMAT = "[%(asctime)s] %(name)s::%(levelname)s %(message)s"
LOG_DATE = "%m/%d/%Y @ %H:%M:%S"
LOG_FORMATTER = logging.Formatter(LOG_FORMAT, LOG_DATE)

LOG_HANDLER = RotatingFileHandler(filename=LOG_FILENAME,
                                  mode="a",
                                  maxBytes=1024 * 1024,
                                  backupCount=5,
                                  encoding="utf-8")
LOG_HANDLER.setLevel(level=LOG_LEVEL)
LOG_HANDLER.setFormatter(LOG_FORMATTER)

LOGIN_URL = '/{0}login/'.format(DEPLOYMENT_PREFIX)
LOGIN_REDIRECT_URL = '/{0}'.format(DEPLOYMENT_PREFIX)
LOGOUT_URL = '/{0}logout/'.format(DEPLOYMENT_PREFIX)

# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'

# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
host = "localhost"
port = 12000
# HEADERSIZE = 10
"""
set up fantom logging
"""
fantom_logger = logging.getLogger()
fantom_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(levelname)s :: %(message)s",
                              "%H:%M:%S")
# file
if os.path.exists("./logs/fantom.log"):
    os.remove("./logs/fantom.log")
file_handler = RotatingFileHandler('./logs/fantom.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
fantom_logger.addHandler(file_handler)
# stream
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
fantom_logger.addHandler(stream_handler)


class dotdict(dict):
    def __getattr__(self, name):
        return self[name]


class Player():
    def __init__(self):
示例#43
0
import logging
import os
import sys

from logging import Formatter
from logging.handlers import RotatingFileHandler

SECRET_KEY = os.environ['FLASK_SECRET_KEY']
ATK_PATH = os.path.dirname(os.path.abspath(__file__))
API_KEY = os.environ['ATK_API_KEY']

sys.path.append(ATK_PATH)
dirname = os.path.dirname
logfile = os.path.join(dirname(__file__), 'logs', 'app.log')
handler = RotatingFileHandler(logfile, maxBytes=10240, backupCount=10)
handler.setLevel(logging.DEBUG)
handler.setFormatter(
    Formatter(
        '%(asctime)s %(levelname)s %(message)s [in %(pathname)s:%(lineno)d]'))
# Add additional logger handlers here, add to LOG_HANDLERS list

LOG_HANDLERS = [
    handler,
]

# Add your custom settings below
CHAIN_HISTORY_LENGTH = 1
示例#44
0
    def __init__(self):
        # Init logging facility
        # From : http://sametmax.com/ecrire-des-logs-en-python/
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        file_handler = RotatingFileHandler('api_log.log', 'a', 1000000, 1)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.DEBUG)
        logger.addHandler(stream_handler)

        # Signals
        self.signal_MCU_state_changed = Signal(args=['alive'])
        self.signal_received_descriptor = Signal(args=['var_id','var_type','var_name','var_writeable','group_id'])
        self.signal_received_group_descriptor = Signal(args=['group_id','group_name'])
        self.signal_received_value = Signal(args=['var_id'])

        self.distantio = distantio_protocol()
        self.protocol = Protocol(self.unused)

        # Queue holding received characters to be processed by worker process
        self.input_queue = mp.Queue()
        # Queue holding decoded frames
        self.output_queue = mp.Queue()
        # Conditions for controlling run process
        self.condition_new_rx_data = mp.Event()
        self.condition_new_rx_data.clear()
        self.condition_run_process = mp.Event()
        self.condition_run_process.clear()

        # Worker process for decoding characters
        self.producer_conn, self.consumer_conn = mp.Pipe()
        self.worker = Worker(self.input_queue,self.producer_conn,self.condition_new_rx_data,self.condition_run_process)
        self.worker.start()

        # Array containing buffers with MCU variables values
        self.variables_values = dict()
        # max size of the buffers
        self.buffer_length = 128
        # Array containing last time each individual variable was updated
        self.last_variables_update = dict()
        # Min delay in seconds between two emit value received signal
        self.emit_signal_delay = 0.1
        self.time_start = time.time()

        # Timer for monitoring MCU alive
        self.mcu_died_delay = 2.0
        self.mcu_alive_timer = threading.Timer(self.mcu_died_delay,self.on_mcu_lost_connection)

        self.variable_list = dict()
        self.connected = False

        self.datalogger = Datalogger()

        # Start MCU timer
        self.mcu_alive_timer = threading.Timer(self.mcu_died_delay,self.on_mcu_lost_connection)
        self.mcu_alive_timer.start()

        logging.info('DistantIO API initialized successfully.')
示例#45
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)
    app.redis = Redis.from_url(app.config['REDIS_URL'])
    app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)
    babel.init_app(app)
    app.elasticsearch = (Elasticsearch([app.config["ELASTICSEARCH_URL"]])
                         if app.config["ELASTICSEARCH_URL"] else None)

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix="/auth")

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    from app.api import bp as api_bp
    app.register_blueprint(api_bp, url_prefix='/api')

    if not app.debug and not app.testing:
        if app.config["MAIL_SERVER"]:
            auth = None
            if app.config["MAIL_USERNAME"] or app.config["MAIL_PASSWORD"]:
                auth = (app.config["MAIL_USERNAME"],
                        app.config["MAIL_PASSWORD"])
            secure = None
            if app.config["MAIL_USE_TLS"]:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config["MAIL_SERVER"], app.config["MAIL_PORT"]),
                fromaddr="no-reply@" + app.config["MAIL_SERVER"],
                toaddrs=app.config["ADMINS"],
                subject="Microblog Failure",
                credentials=auth,
                secure=secure,
            )
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if not os.path.exists("logs"):
            os.mkdir("logs")
        file_handler = RotatingFileHandler("logs/microblog.log",
                                           maxBytes=10240,
                                           backupCount=10)
        file_handler.setFormatter(
            logging.Formatter("%(asctime)s %(levelname)s: %(message)s "
                              "[in %(pathname)s:%(lineno)d]"))
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info("Microblog startup")

    return app
示例#46
0
handler.setFormatter(formatter)
logger.addHandler(handler)

logger.info("Start print log")
logger.debug("Do something")
logger.warning("Something maybe fail.")
logger.info("Finish")

#3. rrotate
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
#定义一个RotatingFileHandler,最多备份3个日志文件,每个日志文件最大1K
rHandler = RotatingFileHandler("log.txt", maxBytes=1 * 1024, backupCount=3)
rHandler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
rHandler.setFormatter(formatter)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)

logger.addHandler(rHandler)
logger.addHandler(console)

logger.info("Start print log")
logger.debug("Do something")
logger.warning("Something maybe fail.")
logger.info("Finish")
示例#47
0
文件: HS100.py 项目: wolfspyre/Glance
class HS100(object):

    # Predefined Smart Plug Commands
    commands = {
        'info': '{"system":{"get_sysinfo":{}}}',
        'on': '{"system":{"set_relay_state":{"state":1}}}',
        'off': '{"system":{"set_relay_state":{"state":0}}}',
        'cloudinfo': '{"cnCloud":{"get_info":{}}}',
        'wlanscan': '{"netif":{"get_scaninfo":{"refresh":0}}}',
        'time': '{"time":{"get_time":{}}}',
        'schedule': '{"schedule":{"get_rules":{}}}',
        'countdown': '{"count_down":{"get_rules":{}}}',
        'antitheft': '{"anti_theft":{"get_rules":{}}}',
        'reboot': '{"system":{"reboot":{"delay":1}}}',
        'reset': '{"system":{"reset":{"delay":1}}}'
    }

    def __init__(self, main_app_log):

        if main_app_log is None:

            self.log_formatter = logging.Formatter(
                '%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s'
            )

            self.logFile = './logs/HS100.log'

            self.my_handler = RotatingFileHandler(self.logFile,
                                                  mode='a',
                                                  maxBytes=5 * 1024 * 1024,
                                                  backupCount=1,
                                                  encoding=None,
                                                  delay=0)
            self.my_handler.setFormatter(self.log_formatter)
            self.my_handler.setLevel(logging.INFO)

            self.app_log = logging.getLogger('root')
            self.app_log.setLevel(logging.INFO)

            self.app_log.addHandler(self.my_handler)

        else:
            self.app_log = main_app_log

        self._db = DB.DB()
        self._db.load_settings()

        self.socket_count = 0

        #Set up the socket ip addresses from the database
        while (self.socket_count < 10):
            ip = self._db.get_value("hs100ip" + str(self.socket_count))
            if ip == "":
                break

            ips[self.socket_count] = ip
            self.socket_count = self.socket_count + 1

        self.start_mos()
        self.process_loop()

    # Encryption and Decryption of TP-Link Smart Home Protocol
    # XOR Autokey Cipher with starting key = 171
    def encrypt(self, string):
        key = 171
        result = "\0\0\0\0"
        for i in string:
            a = key ^ ord(i)
            key = a
            result += chr(a)
        return result

    def decrypt(self, string):
        key = 171
        result = ""
        for i in string:
            a = key ^ ord(i)
            key = ord(i)
            result += chr(a)
        return result

    def send_command(self, in_ip, in_cmd):
        ip = in_ip
        cmd = self.commands[in_cmd]

        # Send command and receive reply
        try:
            self.app_log.info("Sending command to HS100 -  " + ip + " " + cmd)
            sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sock_tcp.connect((ip, 9999))
            sock_tcp.settimeout(2)
            sock_tcp.send(self.encrypt(cmd))
            data = sock_tcp.recv(2048)
            sock_tcp.close()
            self.app_log.info("Command sent -  " + ip + " " + cmd)
            return self.decrypt(data[4:])
        except (socket.error, socket.timeout) as err:
            self.app_log.exception('Exception: %s', err)
            return -1

    def get_relay_state(self, ip):
        self.app_log.info("get_relay_state with ip - " + ip)
        value = self.send_command(ip, "info")
        if value == -1:  #error
            return value
        data = json.loads(value)
        return data['system']['get_sysinfo']['relay_state']

    def on_connect(self, mosclient, userdata, flags, rc):
        self.app_log.info("Subscribing to topic: " +
                          self._db.get_value("mostopic"))
        mosclient.subscribe(self._db.get_value("mostopic"))

    def on_message(self, mosclient, userdata, msg):
        messageparts = str(msg.payload).split("/")
        if len(messageparts) == 3 and messageparts[1] == "HS100":
            full_command = messageparts[2]
            commd = full_command[-1]
            self.app_log.info("Message received on mqtt: " + full_command)
            ip = full_command[0:len(messageparts[2]) - 1]
            self.app_log.info(ip + " - " + full_command)
            if commd == "+":
                self.send_command(ip, 'on')
            elif commd == "-":
                self.send_command(ip, 'off')

    def on_disconnect(client, userdata, rc):
        if rc != 0:
            self.app_log.info("Unexpected disconnection")

    def on_publish(self, client, userdata, mid):
        self.app_log.info("on_publish - published " + str(mid))

    def start_mos(self):
        self.mos_client = mqtt.Client()
        self.mos_client.on_connect = self.on_connect
        self.mos_client.on_message = self.on_message
        self.mos_client.on_disconnect = self.on_disconnect
        self.mos_client.on_publish = self.on_publish

        if len(self._db.get_value("mospassword")) > 0:
            self.mos_client.username_pw_set(self._db.get_value("mosusername"),
                                            self._db.get_value("mospassword"))

        mos_broker_address = self._db.get_value("mosbrokeraddress")

        self.app_log.info("Connecting to: " + mos_broker_address)

        self.mos_client.connect(mos_broker_address,
                                int(self._db.get_value("mosbrokerport")), 60)

        self.app_log.info("Connected")
        self.mos_client.loop_start()


#               	self.mos_client.loop_forever()

    def broadcast_send(self, data_item, value):
        result = 0
        mid = 0

        if data_item is None:
            self.app_log.info("data_item is None")
            return

        if value is None:
            self.app_log.info("value is None")
            return

        self.app_log.info("publishing: " + data_item + " " + value)

        try:
            message = self._db.get_value(
                "name") + "/" + data_item + "/" + value
            result, mid = self.mos_client.publish(
                self._db.get_value("mostopic"), message)
            if result == mqtt.MQTT_ERR_SUCCESS:
                self.app_log.info("published OK, Message ID = " + str(mid))
            elif result == mqtt.MQTT_ERR_NO_CONN:
                self.app_log.info("publish failed, no connection")
            else:
                self.app_log.info("publish failed, result code = " +
                                  str(result))
        except Exception as e:
            self.app_log.exception('Exception: %s', e)

    def process_loop(self):
        x = 0
        #Poll for state change of device, so we can keep the swiches updates with the status - in case another type of switch was used.
        while True:
            value = ""

            try:
                self.app_log.info("getting relay state for: " + ips[x])
                value = self.get_relay_state(ips[x])
            except Exception as e:
                self.app_log.exception('Exception: %s', e)

            self.app_log.info("value is " + str(value))

            #send a message on change of state of the device
            if value == 1:
                self.broadcast_send("HS100_STATE", ips[x] + "+")
                self.app_log.info("sending relay state - " + "/HS100_STATE/" +
                                  ips[x] + "+")
            elif value == 0 or value == -1:
                self.broadcast_send("HS100_STATE", ips[x] + "-")
                self.app_log.info("sending relay state - " + "/HS100_STATE/" +
                                  ips[x] + "-")

            time.sleep(SLEEP_TIME)

            if x == self.socket_count - 1:
                x = 0
            else:
                x = x + 1
示例#48
0
formatter = logging.Formatter(f'[%(asctime)s][%(levelname)s] %(message)s')

log = logging.getLogger('AutoSneknet')
log.setLevel(logging.DEBUG)

console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.ERROR)
log.addHandler(console)

file = RotatingFileHandler(__main__.__file__ + '.log',
                           mode='a',
                           maxBytes=5 * 1024 * 1024,
                           backupCount=0,
                           encoding=None,
                           delay=0)
file.setFormatter(formatter)
file.setLevel(logging.DEBUG)
log.addHandler(file)


def except_handler(type_, value, tb):
    if type_ is KeyboardInterrupt:
        return

    t = ''.join(traceback.format_tb(tb))
    log.critical(f'FATAL ERROR: {type_.__name__}\n\n{t}\n{value}')


sys.excepthook = except_handler
示例#49
0
文件: dxmarket.py 项目: HYBG/DX
g_home = os.getenv('IKNOW_HOME','/var/data/iknow')
if not g_home:
    raise Exception('IKNOW_HOME not found!')

sys.path.append(os.path.join(g_home,'lib'))
from dxdb import dxdblib

logd = os.path.join(g_home, 'log')
if not os.path.isdir(logd):
    os.makedirs(logd, 0777)
g_logger = logging.getLogger('market')
formatstr = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logfile = os.path.join(logd,'market.log')
rh = RotatingFileHandler(logfile, maxBytes=100*1024*1024,backupCount=50)
rh.setLevel(logging.INFO)
fmter = logging.Formatter(formatstr)
rh.setFormatter(fmter)
g_logger.addHandler(rh)
g_logger.setLevel(logging.INFO)

class dxmarket:
    def __init__(self):
        self._db = dxdblib('localhost','root','123456','dx','utf8')

    def __del__(self):
        pass

    def GET(self):
        try:
            fromip = self._db.exesqlone('select count(*) from dx_global where name=%s and value=%s',(web.ctx.ip,'1'))
示例#50
0
def init_logger():
    handler = RotatingFileHandler('yummybox.log', maxBytes=10000, backupCount=1)
    handler.setLevel(logging.INFO)
    app.logger.addHandler(handler)
示例#51
0
# création de l'objet logger qui va nous servir à écrire dans les logs
logger = logging.getLogger()
today = time.strftime("%d%m%Y", time.localtime())
# on met le niveau du logger à DEBUG, comme ça il écrit tout
logger.setLevel(logging.INFO)
# création d'un formateur qui va ajouter le temps, le niveau
# de chaque message quand on écrira un message dans le log
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
# création d'un handler qui va rediriger une écriture du log vers
# un fichier en mode 'append', avec 1 backup et une taille max de 1Mo

file_name1 = catalogue_log + "\\" + today + ".log"
file_handler1 = RotatingFileHandler(file_name1, 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler1.setLevel(logging.INFO)
file_handler1.setFormatter(formatter)
logger.addHandler(file_handler1)

file_name2 = catalogue_log_debug + "\\" + today + ".log"
file_handler2 = RotatingFileHandler(file_name2, 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler2.setLevel(logging.DEBUG)
file_handler2.setFormatter(formatter)
logger.addHandler(file_handler2)

# création d'un second handler qui va rediriger chaque écriture de log
# sur la console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.WARNING)
示例#52
0
from flask import Flask, request, jsonify, render_template
from backtesting.backtest import Backtester

app = Flask(__name__,
            static_folder='../www/static',
            template_folder='../www/static/templates')

# Add a rotating file handler to keep track of error logging
if app.debug is not True:
    import logging
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('errors.log',
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=20)
    file_handler.setLevel(logging.ERROR)
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    file_handler.setFormatter(formatter)
    app.logger.addHandler(file_handler)


@app.route("/")
def index():
    return render_template("index.html")


@app.route("/backtest", methods=['POST'])
def backtesting():

    coin_pair = request.args.get('pair')
    period_length = request.args.get('period')
示例#53
0
            else:
                msglist.append("<font color=%s>%s</font>" %
                               (colors[item], str(getattr(record, item))))
        message = ' '.join(msglist)
        print message
        return message
        # print self.format(record)
        # return self.format(record)


logging.root.setLevel(logging.DEBUG)
logging.root.propagate = 0
#log write in file
logpath = os.sep.join([os.getcwd(), 'Log', 'main.log'])
fh = RotatingFileHandler(logpath, maxBytes=10 * 1024 * 1024, backupCount=100)
fh.setLevel(logging.INFO)
#log write in console
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)

wh = LoggerHandler()
wh.setLevel(logging.DEBUG)
#log formatter
formatter = logging.Formatter(
    '%(asctime)s %(levelname)8s [%(filename)16s:%(lineno)04s] %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logging.root.addHandler(fh)
# logging.root.addHandler(ch)
logging.root.addHandler(wh)
logger = logging.root
示例#54
0
def create_app():
    app = Flask(__name__)
    app.config.from_object(Config)
    bootstrap = Bootstrap(app)

    db.init_app(app)
    migrate.init_app(app, db)

    from app.models import User
    user_manager = UserManager(app, db, User)

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.api import bp as bp_api
    app.register_blueprint(bp_api, url_prefix='/api')

    from app.main import bp as bp_main
    app.register_blueprint(bp_main)

    from app.report import bp as bp_report
    app.register_blueprint(bp_report)

    from app.collect import bp as bp_survey
    app.register_blueprint(bp_survey)

    @app.context_processor
    def context_processor():
        return dict(user_manager=user_manager)

    if not app.debug:
        if not os.path.exists('logs'):
            os.mkdir('logs')

        file_handler = RotatingFileHandler('logs/application.log',
                                           maxBytes=10240,
                                           backupCount=10)

        file_handler.setFormatter(
            logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
            ))

        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('Application startup')

        if app.config['MAIL_SERVER']:
            auth = None

            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None

            if app.config['MAIL_USE_TLS']:
                secure = ()

            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'],
                          app.config['INCOMING_MAIL_PORT']),
                fromaddr=app.config['USER_EMAIL_SENDER_EMAIL'],
                toaddrs=app.config['ADMINS'],
                subject='%s Error' % app.config['USER_APP_NAME'],
                credentials=auth,
                secure=secure)

            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

    return app
示例#55
0
# Setter logparametere
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    "%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:%(lineno)d — %(message)s"
)
debug = RotatingFileHandler(f"file/logs/DebugKommuneLog.log",
                            maxBytes=10 * 1024 * 1024,
                            backupCount=2)
inf = RotatingFileHandler(f"file/logs/InfoKommuneLog.log",
                          maxBytes=10 * 1024 * 1024,
                          backupCount=2)
err = RotatingFileHandler(f"file/logs/ErrorKommuneLog.log",
                          maxBytes=10 * 1024 * 1024,
                          backupCount=2)
inf.setLevel(logging.INFO)
err.setLevel(logging.ERROR)
debug.setLevel(logging.DEBUG)
inf.setFormatter(formatter)
err.setFormatter(formatter)
debug.setFormatter(formatter)
logger.addHandler(err)
logger.addHandler(inf)
logger.addHandler(debug)
disable_warnings()

try:
    logger.info("Loading Proxie settings", exc_info=True)
    proxies = json.load(open("file/config_.json", "r"))["proxies"]
except FileNotFoundError:
    logger.exception(
示例#56
0
            secure = ()
        mail_handler = SMTPHandler(
            mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
            fromaddr='no-reply@' + app.config['MAIL_SERVER'],
            toaddrs=app.config['ADMINS'],
            subject='Microblog Failure',
            credentials=auth,
            secure=secure)
        mail_handler.setLevel(logging.ERROR)
        app.logger.addHandler(mail_handler)
        if not os.path.exists('logs'):
            os.mkdir('logs')

        file_handler = RotatingFileHandler(
            'logs/microblog.log', maxBytes=10240, backupCount=10
        )  #I'm writing the log file with name microblog.log in a logs directory, which I create if it doesn't already exist . #The RotatingFileHandler class is nice because it rotates the logs, ensuring that the log files do not grow too large when the application runs for a long time. In this case I'm limiting the size of the log file to 10KB, and I'm keeping the last ten log files as backup.
        file_handler.setFormatter(
            logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
            )
        )  #The logging.Formatter class provides custom formatting for the log messages. Since these messages are going to a file, I want them to have as much information as possible. So I'm using a format that includes the timestamp, the logging level, the message and the source file and line number from where the log entry originated.
        file_handler.setLevel(
            logging.INFO
        )  #To make the logging more useful, I'm also lowering the logging level to the INFO category, both in the application logger and the file logger handler. In case you are not familiar with the logging categories, they are DEBUG, INFO, WARNING, ERROR and CRITICAL in increasing order of severity.
        app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('Microblog startup')

from app import routes, models, errors
#the app here is the directory name in which this python file is stored , models is the new module which defines the structure of the database
示例#57
0
formatter = logging.Formatter(
    '[%(asctime)s][pid:%(process)s-tid:%(thread)s] %(module)s.%(funcName)s: %(levelname)s: %(message)s'
)

# StreamHandler for print log to console
hdr = logging.StreamHandler()
hdr.setFormatter(formatter)
hdr.setLevel(logging.DEBUG)

# RotatingFileHandler
fhr_ana = RotatingFileHandler('%s/analysis.log' % (log_dir_path),
                              maxBytes=10 * 1024 * 1024,
                              backupCount=3)
fhr_ana.setFormatter(formatter)
fhr_ana.setLevel(logging.DEBUG)

# RotatingFileHandler
fhr_pro = RotatingFileHandler('%s/process.log' % (log_dir_path),
                              maxBytes=10 * 1024 * 1024,
                              backupCount=3)
fhr_pro.setFormatter(formatter)
fhr_pro.setLevel(logging.DEBUG)

# RotatingFileHandler
fhr_model = RotatingFileHandler('%s/model.log' % (log_dir_path),
                                maxBytes=10 * 1024 * 1024,
                                backupCount=3)
fhr_model.setFormatter(formatter)
fhr_model.setLevel(logging.DEBUG)
示例#58
0
logging_level = logging.INFO
if setting.logging_debug_level:
    logging_level = logging.DEBUG

logFormatter = logging.Formatter(fmt=setting.logging_format,
                                 datefmt=setting.logging_datefmt)

fileHandler = RotatingFileHandler(filename=setting.logging_filename,
                                  mode='a',
                                  backupCount=2,
                                  maxBytes=setting.logging_file_maxBytes,
                                  encoding=None,
                                  delay=0)
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging_level)

consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)

rootLogger = logging.getLogger('')  # Logging
rootLogger.setLevel(logging.NOTSET)
while rootLogger.handlers:  # Remove un-format logging in Stream, or all of messages are appearing more than once.
    rootLogger.handlers.pop()
rootLogger.addHandler(fileHandler)
rootLogger.addHandler(consoleHandler)

# Disable log messages from the Requests library
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
示例#59
0
log = app.logger
log.setLevel(logging.INFO)
LOG_PATH = os.path.abspath("../../logging") \
           if not hasattr(config, 'LOG_PATH') \
           else config.LOG_PATH
os.makedirs(LOG_PATH, exist_ok=True)
error_handler = RotatingFileHandler(os.path.join(LOG_PATH, "errors.log"),
                                    maxBytes=10 * 1024 * 1024,
                                    backupCount=5)
info_handler = RotatingFileHandler(os.path.join(LOG_PATH, "general.log"),
                                   maxBytes=10 * 1024 * 1024,
                                   backupCount=5)
# console_handler = logging.StreamHandler()
# console_handler.setFormatter(formatter)
# console_handler.setLevel(logging.DEBUG)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(formatter)
info_handler.setLevel(logging.INFO)
info_handler.setFormatter(formatter)
log.addHandler(error_handler)
# log.addHandler(console_handler)
log.addHandler(info_handler)
log.setLevel(logging.INFO)

HIDE = ['SECRET_KEY']
log.info("##### CONFIGURATION VALUES ###################\n%s" % \
         "\n".join(["\t\t\t%s: %s" % (key, value)
                    for key, value in app.config.items()
                    if key not in HIDE]))

CURRENT_DIR = os.path.abspath(os.curdir)
示例#60
0
class SiisLog(object):
    """
    Siis logger initialized based on python logger.
    """
    def __init__(self, options, style=''):
        # if init before terminal
        colorama.init()

        # stderr to terminal in info level
        self.console = TerminalHandler()  #  logging.StreamHandler()
        self.console.setLevel(logging.INFO)

        # self.term_formatter = logging.Formatter('- %(name)-12s: %(levelname)-8s %(message)s')
        self.term_formatter = ColoredFormatter('%(name)-s%(message)s', style)
        self.console.setFormatter(self.term_formatter)

        # add the handler to the root logger
        logging.getLogger('').addHandler(self.console)

        # default log file formatter
        self.file_formatter = logging.Formatter(
            '%(asctime)s %(levelname)s %(message)s')

        # a siis logger with siis.log
        self.file_logger = RotatingFileHandler(options['log-path'] + '/' +
                                               options['log-name'],
                                               maxBytes=1024 * 1024,
                                               backupCount=5)
        # self.file_logger = logging.FileHandler(options['log-path'] + '/' + options['log-name'])
        self.file_logger.setFormatter(self.file_formatter)
        self.file_logger.setLevel(logging.DEBUG)

        self.add_file_logger('siis', self.file_logger)

        # a siis logger with exec.siis.log
        # self.exec_file_logger = logging.FileHandler(options['log-path'] + '/' + "exec." + options['log-name'])
        self.exec_file_logger = RotatingFileHandler(
            options['log-path'] + '/' + "exec." + options['log-name'],
            maxBytes=1024 * 1024,
            backupCount=5)
        self.exec_file_logger.setFormatter(self.file_formatter)
        self.exec_file_logger.setLevel(logging.INFO)

        # don't propagate execution to siis logger
        self.add_file_logger('siis.exec', self.exec_file_logger, False)

        # a siis logger with error.siis.log
        # self.error_file_logger = logging.FileHandler(options['log-path'] + '/' + "error." + options['log-name'])
        self.error_file_logger = RotatingFileHandler(
            options['log-path'] + '/' + "error." + options['log-name'],
            maxBytes=1024 * 1024,
            backupCount=5)
        self.error_file_logger.setFormatter(self.file_formatter)
        self.error_file_logger.setLevel(logging.INFO)

        # don't propagate error trade to siis logger
        self.add_file_logger('siis.error', self.error_file_logger, False)

        # a siis logger with signal.siis.log
        # self.signal_file_logger = logging.FileHandler(options['log-path'] + '/' + "signal." + options['log-name'])
        self.signal_file_logger = RotatingFileHandler(
            options['log-path'] + '/' + "signal." + options['log-name'],
            maxBytes=1024 * 1024,
            backupCount=5)
        self.signal_file_logger.setFormatter(self.file_formatter)
        self.signal_file_logger.setLevel(logging.INFO)

        # don't propagate signal trade to siis logger
        self.add_file_logger('siis.signal', self.signal_file_logger, False)

    def add_file_logger(self,
                        name,
                        handler,
                        level=logging.DEBUG,
                        propagate=True):
        my_logger = logging.getLogger(name)

        my_logger.addHandler(handler)
        my_logger.setLevel(level)
        my_logger.propagate = propagate

        return my_logger