Ejemplo n.º 1
0
def get_logger(name,
               to_file=True,
               to_stdout=False,
               log_path=STD_LOGPATH,
               format_str=STD_FORMAT):

    logger = logging.getLogger(name)

    if name in LOGGERS:
        return logger

    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(format_str)

    if to_file:
        os.system('mkdir -p %s' % log_path)
        file_path = os.path.join(log_path, '%s.log' % name)
        handler = RotatingFileHandler(file_path, maxBytes=5e6, backupCount=5)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    if to_stdout:
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    LOGGERS.add(name)
    return logger
Ejemplo n.º 2
0
    def __init__(self, filename):
        pwd = os.path.abspath(os.path.dirname(__file__))

        directory = os.path.join(pwd, self.folder)
        if not os.path.exists(directory):
            os.mkdir(directory)

        self.file_path = directory + '/' + filename

        self.log = logging.getLogger(filename)
        self.log.setLevel(self.level)

        handler = ConcurrentRotatingFileHandler(self.file_path,
                                                'a',
                                                1024 * 1024 * 100,
                                                backupCount=5,
                                                encoding='utf-8')
        # handler.suffix = "%Y-%m-%d"

        # 设置输出格式
        # format_log = "%(asctime)s %(threadName)s %(funcName)s %(filename)s:%(lineno)s %(levelname)s %(message)s"
        formatter = logging.Formatter(
            '%(asctime)s [%(processName)s %(threadName)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d] %(message)s'
        )
        # fmt = logging.Formatter(formatter)
        handler.setFormatter(formatter)

        self.log.addHandler(handler)

        # 控制台输出
        stream = logging.StreamHandler()
        stream.setFormatter(formatter)

        self.log.addHandler(stream)
Ejemplo n.º 3
0
def set_logger():
    log_level = __conf["log"]["level"].upper()
    log_path = os.path.abspath(
        os.path.join(os.path.split(__file__)[0], "../logs/threat-detection"))
    log_size = 4 * 1024 * 1024

    log = logging.getLogger("threat_intelligence")

    if len(log.handlers) == 0:
        rotate_handler = ConcurrentRotatingFileHandler(log_path, "a", log_size,
                                                       5)

        if log_level == "DEBUG":
            log.setLevel(logging.DEBUG)
        elif log_level == "INFO":
            log.setLevel(logging.INFO)
        elif log_level == "WARNING":
            log.setLevel(logging.WARNING)
        elif log_level == "ERROR":
            log.setLevel(logging.ERROR)
        else:
            raise ValueError, "logLevel should be DEBUG/INFO/WARNING/ERROR."

        # set logs formatter
        formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s - %(message)s')
        rotate_handler.setFormatter(formatter)

        # add handler to logger
        log.addHandler(rotate_handler)

    return log
Ejemplo n.º 4
0
def get_logger(ENV, BASE_DIR):
 
  # temporary 
  is_email_script = pathlib.Path(sys.argv[0]).name == "email_db_report.py"

  # use Airbrake in production
  if(ENV=="production" and not is_email_script):
    log = airbrake.getLogger()
    log.setLevel(logging.INFO)
  else:
    log = logging.getLogger(__name__)
    log.setLevel(logging.DEBUG)

  # print all debug and higher to STDOUT
  # if the environment is development
  if(ENV=="development"): 
    stdoutHandler = logging.StreamHandler(sys.stdout)
    stdoutHandler.setLevel(logging.DEBUG)
    log.addHandler(stdoutHandler)

  logfile = os.path.abspath(BASE_DIR + "/logs/CivilServant_" + ENV + ".log")
  print("Logging to " + BASE_DIR + "/logs/CivilServant_" + ENV + ".log")
  formatter = logging.Formatter('%(asctime)s - %(name)s({env}) - %(levelname)s - %(message)s'.format(env=ENV))

  rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 32 * 1000 * 1024, 5)
  rotateHandler.setLevel(logging.DEBUG)
  rotateHandler.setFormatter(formatter)
  log.addHandler(rotateHandler)
  return log
Ejemplo n.º 5
0
def create_app():
    app = Flask(__name__)

    # 日志模块
    rotateHandler = ConcurrentRotatingFileHandler('%s/logs/service.log' %
                                                  PROJECT_PATH,
                                                  'a',
                                                  800 * 1024 * 1024,
                                                  backupCount=10,
                                                  encoding='utf-8')
    datefmt_str = '%Y-%m-%d %H:%M:%S'
    format_str = '%(asctime)s %(levelname)s %(module)s.%(funcName)s Line:%(lineno)d %(message)s'
    formatter = logging.Formatter(format_str, datefmt_str)
    rotateHandler.setFormatter(formatter)
    app.logger.addHandler(rotateHandler)
    app.logger.setLevel(logging.DEBUG)

    app.config.from_object(config)
    config.init_app(app)

    # 初始化db
    db.init_app(app)

    # 初始化cache
    cache.init_app(app)

    return app
Ejemplo n.º 6
0
def setupLogHandlers(fname, formatter=None, **kwargs):
    """
    Create a RotatingFileHandler to be used by a logger, and possibly a
    GELFHandler.

    By default the RotatingFileHandler stores 100 MB before starting a new
    log file and the last 10 log files are kept. The default formatter shows
    the logging level, current time, the function that created the log entry,
    and the specified message.

    :param str fname: path to the filename where logs will be written to
    :param logging.Formatter formatter: a custom formatter for this logger
    :param kwargs: custom parameters for the RotatingFileHandler
    :rtype: tuple
    """
    if formatter is None:
        formatter = logging.Formatter('%(levelname)s [%(asctime)s] %(funcName)s: %(message)s')

    opts = {'maxBytes': 100 * 1024 * 1024, 'backupCount': 10, 'debug': False}
    opts.update(kwargs)
    handler = RotatingFileHandler(os.path.join(pikaconfig.LOG_DIR, fname), **opts)
    handler.setFormatter(formatter)

    handlers = (handler, )
    if USE_GELF:
        gelf_handler = GELFHandler(**USE_GELF)
        gelf_handler.setLevel(logging.INFO)  # Ignore DEBUG messages.
        handlers += (gelf_handler, )

    return handlers
Ejemplo n.º 7
0
def init_http_logger(config):
    logger = logging.getLogger(config.LOGGER_HTTP_NAME)
    del logger.handlers[:]
    Rthandler = ConcurrentRotatingFileHandler('/tmp/snapperhttp.log', maxBytes=(100*1024), backupCount=1)
    Rthandler.setFormatter(Formatter(config.LOG_FORMAT))
    logger.addHandler(Rthandler)
    logger.setLevel(logging.DEBUG)
Ejemplo n.º 8
0
def log_config(f_level=logging.INFO,
               c_level=logging.CRITICAL,
               out_path='',
               filename='info',
               fix=False):
    logfile = os.path.join(out_path, filename) + '-' + time.strftime('%Y_%m%d_%H%M%S', time.localtime()) + '.log' \
        if not fix else os.path.join(out_path, filename) + '.log'
    print("2:", logfile)
    logger = logging.getLogger(logfile)
    if logger.handlers:
        logger.removeHandler(logger.handlers)
    logger.setLevel(f_level)

    fh = LogHandler(logfile, maxBytes=100 * 1024 * 1024, backupCount=50)
    fh.setLevel(f_level)

    ch = logging.StreamHandler()
    ch.setLevel(c_level)

    formatter = logging.Formatter(
        '[%(levelname)s]--%(asctime)s--[%(filename)s %(funcName)s %(lineno)d]: %(message)s'
    )
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)

    logger.addHandler(fh)
    logger.addHandler(ch)

    return logger, logfile
Ejemplo n.º 9
0
    def __init__(self, path, clevel=logging.DEBUG, Flevel=logging.DEBUG, when='M', backCount=5,
                 fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):
        self.logger = logging.getLogger(path)
        self.logger.setLevel(logging.DEBUG)
        fmt = logging.Formatter('%(asctime)s | %(message)s', '%Y-%m-%d %H:%M:%S')
        # fmt = logging.Formatter('%(asctime)s | %(message)s')
        # Use an absolute path to prevent file rotation trouble.
        logfile = os.path.abspath(path)
        # Rotate log after reaching 512K, keep 5 old copies.
        rh = ConcurrentRotatingFileHandler(logfile, "a", 10 * 1024 * 1024 * 1024, backCount)
        # th = handlers.TimedRotatingFileHandler(filename=logfile, when=when, backupCount=backCount, encoding='utf-8')
        rh.setFormatter(fmt)

        # 设置CMD日志
        sh = logging.StreamHandler()
        sh.setFormatter(fmt)
        sh.setLevel(clevel)
        # 设置文件日志
        fh = logging.FileHandler(path, encoding='utf-8')
        fh.setFormatter(fmt)
        fh.setLevel(Flevel)
        self.logger.addHandler(sh)
        # self.logger.addHandler(fh)
        # self.logger.addHandler(th)
        self.logger.addHandler(rh)
Ejemplo n.º 10
0
def add_rotating_file_logger(logger, logfile, log_level=None, format=None, mode="a", 
    maxBytes=10*(1024**2), backupCount=5):
    """Add a rotating file logger to the logger."""
    log_level = log_level or logging.DEBUG
    format = format or BRIEF_LOG_FORMAT

    # touch the logfile
    if not os.path.exists(logfile):
        try:
            fo = open(logfile, "w")
            fo.close()
        except (ValueError, IOError):
            return

    # is the logfile really a file?
    if not os.path.isfile(logfile):
        return

    # check if the logfile is writable
    if not os.access(logfile, os.W_OK):
        return

    handler = RFHandler(logfile, maxBytes=maxBytes, backupCount=backupCount, mode=mode)
    handler.setFormatter(logging.Formatter(format, datefmt="%Y-%m-%d %H:%M:%S"))
    handler.setLevel(log_level)
    logger.addHandler(handler)
Ejemplo n.º 11
0
def configure_logging(logger, log_filename, log_level=logging.INFO, 
        stderr_level=logging.ERROR, log_dir=LOG_DIR):
    """Configures logging for given logger using the given filename.

    :return None.
    """
    # If the logging directory doesn't exist, create it.
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Determine path to log file.
    log_path = os.path.join(log_dir, log_filename)

    # Create an IdentityFilter.
    identity = get_identifier()
    identity_filter = IdentityFilter(identity=identity)

    # Create a log handler and formtter and apply to _log.
    hdlr = ConcurrentRotatingFileHandler(filename=log_path,
                                         maxBytes=1000000,
                                         backupCount=5)
    hdlr.addFilter(identity_filter)
    formatter = logging.Formatter(LOG_FORMAT)
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(log_level)

    # Attach a stderr handler to the log.
    stderr_hdlr = logging.StreamHandler(sys.stderr)
    stderr_hdlr.setLevel(stderr_level)
    stderr_hdlr.setFormatter(formatter)
    logger.addHandler(stderr_hdlr)
Ejemplo n.º 12
0
def startlogging(log, logfile, loglevel=logging.INFO, consolelevel=None):
    """Start the logging system to store rotational file based log."""
    
    try:
        from cloghandler import ConcurrentRotatingFileHandler as RFHandler
    except ImportError:
    # Next 2 lines are optional:  issue a warning to the user
        from warnings import warn
        warn("ConcurrentLogHandler package not installed.  Using builtin log handler")
        from logging.handlers import RotatingFileHandler as RFHandler

    
    if not consolelevel:
        consolelevel = loglevel

    log.setLevel(loglevel)
    #create file handler and set level to debug
    fh = RFHandler(filename=logfile, maxBytes=2**20, backupCount=50)
    fh.setLevel(loglevel)
    #create console handler and set level to error
    ch = logging.StreamHandler()
    ch.setLevel(consolelevel)
    #create formatter
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    #add formatter to fh
    fh.setFormatter(formatter)
    #add formatter to ch
    ch.setFormatter(formatter)
    #add fh to logger
    log.addHandler(fh)
    #add ch to logger
    log.addHandler(ch)
    log.debug("Logging started at level %d", loglevel)
    return log
Ejemplo n.º 13
0
def log(message, path=None, level=None, filename=None, log_type=None):
    if not filename:
        filename = 'python_logs'
    if not path:
        path = './logs/'
        if not os.path.exists(path):
            os.makedirs(path)
    logger = logging.getLogger(filename)

    if not level:
        logger.setLevel(logging.INFO)
    elif level == 'debug':
        logger.setLevel(logging.DEBUG)

    #  若logger.handlers列表为空,则添加,否则,直接去写日志,避免重复写入日志
    if not logger.handlers:
        filehandler = ConcurrentRotatingFileHandler(path + filename + '.log')
        formatter = logging.Formatter(
            '%(asctime)s|%(levelname)s|%(name)s|%(message)s')
        filehandler.setFormatter(formatter)
        logger.addHandler(filehandler)

    if not log_type:
        logger.info(message)
    elif log_type == 'error':
        logger.error(message)
    elif log_type == 'warning':
        logger.warning(message)
    elif log_type == 'debug':
        logger.debug(message)
Ejemplo n.º 14
0
def setup_logging(file_, name_, level=config['LOGGING_LEVEL']):
    """
    Sets up generic logging to file with rotating files on disk

    :param file_: the __file__ doc of python module that called the logging
    :param name_: the name of the file that called the logging
    :param level: the level of the logging DEBUG, INFO, WARN
    :return: logging instance
    """

    level = getattr(logging, level)

    logfmt = '%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s'
    datefmt = '%m/%d/%Y %H:%M:%S'
    formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt)
    logging_instance = logging.getLogger(name_)
    fn_path = os.path.join(os.path.dirname(file_), PROJ_HOME, 'logs')
    if not os.path.exists(fn_path):
        os.makedirs(fn_path)
    fn = os.path.join(fn_path, '{0}.log'.format(name_))
    rfh = ConcurrentRotatingFileHandler(filename=fn,
                                        maxBytes=2097152,
                                        backupCount=5,
                                        mode='a',
                                        encoding='UTF-8')  # 2MB file
    rfh.setFormatter(formatter)
    logging_instance.handlers = []
    logging_instance.addHandler(rfh)
    logging_instance.setLevel(level)

    return logging_instance
Ejemplo n.º 15
0
 def __init__(self):
     """ Setup configuration and logging """
     try:
         debug = getattr(settings, 'DEBUG', False)
         self.mysql_host = getattr(settings, 'MYSQL_HOST')
         self.mysql_db = getattr(settings, 'MYSQL_DB')
         self.mysql_user = getattr(settings, 'MYSQL_USER')
         self.mysql_pass = getattr(settings, 'MYSQL_PASS')
         self.mysql_ssl = getattr(settings, 'MYSQL_SSL_CA', None)
     except AttributeError as ex:
         print('Missing or invalid configuration. Check settings.py')
         print(str(ex))
     if debug:
         ltype = 'DEBUG'
         level = logging.DEBUG
     else:
         ltype = 'INFO'
         level = logging.INFO
     maxsize = 16 * 1024 * 1024
     handler = RFHandler(logfile, 'a', maxBytes=maxsize, backupCount=9)
     form = '%(asctime)s [%(process)d] %(levelname)s: %(message)s'
     fmat = logging.Formatter(form)
     fmat.converter = time.gmtime
     global logg
     logg = logging.getLogger('ClassExample')
     logg.propagate = True
     logg.setLevel(level)
     handler.setFormatter(fmat)
     logg.addHandler(handler)
     form = 'ClassExample %s process starting in %s mode'
     args = (__version__, ltype)
     logg.info(form % args)
Ejemplo n.º 16
0
def build_logger_env(worker_name, log_level=logging.NOTSET):
    """规范化log输出"""

    logdir_path = os.path.join(os.path.dirname(__file__),
                                    os.pardir,
                                    'log'
                                    )
    if not os.path.exists(logdir_path):
        os.makedirs(logdir_path)
    logger = __loggers.get(worker_name)
    if logger:
        return logger
    logger = logging.getLogger(worker_name)
    logger.propagate = 0                    # 拒绝 父Logger 产生日志
    logger.setLevel(log_level)
    ch = ConcurrentRotatingFileHandler(os.path.join(logdir_path, '%s.log'%worker_name),
                                      'a',
                                      50*1024*1024,
                                      5
                                      )
    ch.setLevel(log_level)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(module)s.%(funcName)s[%(lineno)d] MSG:%(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    __loggers.setdefault(worker_name, logger)
    return logger
Ejemplo n.º 17
0
def f_log_concurrent(log_file):
    LEVELS = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'error': logging.ERROR
    }
    log = logging.getLogger()
    level = LEVELS.get(LOGLEVEL, logging.NOTSET)

    # Use an absolute path to prevent file rotation trouble.
    logfile = os.path.abspath(log_file)
    # Rotate log after reaching 1G, keep 60 old copies.
    rotateHandler = ConcurrentRotatingFileHandler(logfile,
                                                  "a",
                                                  1024 * 1024 * 1024,
                                                  60,
                                                  encoding="utf-8")
    fm = logging.Formatter(
        "%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
        "%Y-%m-%d %H:%M:%S",
    )
    rotateHandler.setFormatter(fm)
    log.addHandler(rotateHandler)
    log.setLevel(level)
    return log
Ejemplo n.º 18
0
    def get_instance(tag="test"):
        logging.config.dictConfig(LOGGING)
        logger = logging.getLogger("clogger")

        if not os.path.exists(os.path.join(LOG_PATH)):
            os.makedirs(os.path.join(LOG_PATH))

        logfile = os.path.join(LOG_PATH) + '%s.log' % tag

        fh = RFHandler(logfile,
                       maxBytes=1024 * 1024 * 100,
                       backupCount=10,
                       delay=0.05)
        formatter = logging.Formatter(
            '[%(asctime)s - %(levelno)s] - %(message)s')
        fh.setFormatter(formatter)
        fh.setLevel(logging.DEBUG)

        logger.addHandler(fh)

        #         error_logfile = os.path.join(LOG_PATH) + '%s_error.log' % tag
        #         efh = RFHandler(error_logfile, maxBytes=1024 * 1024 * 100, backupCount=10, delay=0.05)
        #         efh.setFormatter(formatter)
        #         efh.setLevel(logging.ERROR)
        #         logger.addHandler(efh)

        return logger
Ejemplo n.º 19
0
def setup_logging(file_, name_, level='DEBUG'):
    """
    Sets up generic logging to file with rotating files on disk

    :param file_: the __file__ doc of python module that called the logging
    :param name_: the name of the file that called the logging
    :param level: the level of the logging DEBUG, INFO, WARN
    :return: logging instance
    """

    level = getattr(logging, level)

    logfmt = '%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s'
    datefmt = '%m/%d/%Y %H:%M:%S'
    formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt)
    logging_instance = logging.getLogger(name_)
    fn_path = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')), 'logs')
    if not os.path.exists(fn_path):
        os.makedirs(fn_path)
    fn = os.path.join(fn_path, '{0}.log'.format(name_))
    rfh = ConcurrentRotatingFileHandler(filename=fn,
                                        maxBytes=2097152,
                                        backupCount=5,
                                        mode='a',
                                        encoding='UTF-8')  # 2MB file
    rfh.setFormatter(formatter)
    logging_instance.handlers = []
    logging_instance.addHandler(rfh)
    logging_instance.setLevel(level)

    return logging_instance
Ejemplo n.º 20
0
def configure_logging(app):
    """Configure logging."""

    try:
        from cloghandler import ConcurrentRotatingFileHandler as RotatingFileHandler
    except ImportError:
        RotatingFileHandler = logging.handlers.RotatingFileHandler 

    def log_exception(exc_info):
        """
        Override default Flask.log_exception for more verbose logging on
        exceptions.
        """
        try:
            oauth_user = request.oauth
        except AttributeError:
            oauth_user = None

        app.logger.error(
            """
            Request:     {method} {path}
            IP:          {ip}
            Agent:       {agent_platform} | {agent_browser} {agent_browser_version}
            Raw Agent:   {agent}
            Oauth2:      {oauth_user}
            """.format(
                method=request.method,
                path=request.path,
                ip=request.remote_addr,
                agent_platform=request.user_agent.platform,
                agent_browser=request.user_agent.browser,
                agent_browser_version=request.user_agent.version,
                agent=request.user_agent.string,
                oauth_user=oauth_user,
                ), exc_info=exc_info
        )
    app.log_exception = log_exception

    fn = app.config.get('LOG_FILE')
    if fn is None:
        fn = os.path.join(app.instance_path, 'logs/adsws.log')

    if not os.path.exists(os.path.dirname(fn)):
        os.makedirs(os.path.dirname(fn))

    rfh = RotatingFileHandler(fn, maxBytes=1000000, backupCount=10)
    rfh.setFormatter(logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]')
    )
    # NOTE:
    # Setting the level on just the handler seems to have *no* effect;
    # setting the level on app.logger seems to have the desired effect.
    # I do not understand this behavior
    # rfh.setLevel(app.config.get('LOG_LEVEL', logging.INFO))
    app.logger.setLevel((app.config.get('LOG_LEVEL', logging.INFO)))
    if rfh not in app.logger.handlers:
        app.logger.addHandler(rfh)
    app.logger.debug("Logging initialized")
Ejemplo n.º 21
0
def get_json_log_handler(path, app_name, json_fields):
    handler = ConcurrentRotatingFileHandler(path, "a", 2 * 1024 * 1024 * 1024,
                                            1)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['collector', app_name]
    formatter.defaults['@fields'] = json_fields
    handler.setFormatter(formatter)
    return handler
def get_json_log_handler(path, app_name, json_fields):
    handler = ConcurrentRotatingFileHandler(
        path, "a", 2 * 1024 * 1024 * 1024, 1)
    formatter = LogstashFormatter()
    formatter.defaults['@tags'] = ['collector', app_name]
    formatter.defaults['@fields'] = json_fields
    handler.setFormatter(formatter)
    return handler
Ejemplo n.º 23
0
def setup_logging(name_, level=None, proj_home=None, attach_stdout=False):
    """
    Sets up generic logging to file with rotating files on disk

    :param: name_: the name of the logfile (not the destination!)
    :param: level: the level of the logging DEBUG, INFO, WARN
    :param: proj_home: optional, starting dir in which we'll
            check for (and create) 'logs' folder and set the
            logger there
    :return: logging instance
    """

    if level is None:
        config = load_config(extra_frames=1,
                             proj_home=proj_home,
                             app_name=name_)
        level = config.get('LOGGING_LEVEL', 'INFO')

    level = getattr(logging, level)

    logfmt = u'%(asctime)s %(msecs)03d %(levelname)-8s [%(process)d:%(threadName)s:%(filename)s:%(lineno)d] %(message)s'
    datefmt = TIMESTAMP_FMT
    # formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt)

    formatter = MultilineMessagesFormatter(fmt=logfmt, datefmt=datefmt)
    formatter.multiline_marker = u''
    formatter.multiline_fmt = u'     %(message)s'

    formatter.converter = time.gmtime
    logging_instance = logging.getLogger(name_)
    logging_instance.propagate = False  # logging messages are not passed to the handlers of ancestor loggers (i.e., gunicorn)

    if proj_home:
        proj_home = os.path.abspath(proj_home)
        fn_path = os.path.join(proj_home, u'logs')
    else:
        fn_path = os.path.join(_get_proj_home(), u'logs')

    if not os.path.exists(fn_path):
        os.makedirs(fn_path)

    fn = os.path.join(fn_path, u'{0}.log'.format(name_.split(u'.log')[0]))
    rfh = ConcurrentRotatingFileHandler(filename=fn,
                                        maxBytes=10485760,
                                        backupCount=10,
                                        mode=u'a',
                                        encoding=u'UTF-8')  # 10MB file
    rfh.setFormatter(formatter)
    logging_instance.handlers = []
    logging_instance.addHandler(rfh)
    logging_instance.setLevel(level)

    if attach_stdout:
        stdout = logging.StreamHandler(sys.stdout)
        stdout.formatter = get_json_formatter()
        logging_instance.addHandler(stdout)

    return logging_instance
Ejemplo n.º 24
0
 def __init__(self,
              filename,
              mode='a',
              maxBytes=0,
              backupCount=0,
              encoding=None,
              delay=0):
     filename = _relativeToLogPath(filename)
     ParentHandler.__init__(self, filename, mode, maxBytes, backupCount,
                            encoding, delay)
Ejemplo n.º 25
0
    def __init__(self, app):
        self.log = logging.getLogger(app)

        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        rotateHandler = ConcurrentRotatingFileHandler(LOGFILE, "a", 512 * 1024, 5)
        rotateHandler.setFormatter(formatter)

        self.log.addHandler(rotateHandler)
        self.log.setLevel(logging.DEBUG)
Ejemplo n.º 26
0
def get_logger():
    logger = logging.getLogger('job')
    log_format = '%(asctime)s %(filename)s %(lineno)d %(levelname)s %(message)s'
    formatter = logging.Formatter(log_format)
    logfile = os.path.join(collector_agent_path, 'log/job.log')
    rotate_handler = ConcurrentRotatingFileHandler(logfile, "a", 2000000, 7)
    rotate_handler.setFormatter(formatter)
    logger.addHandler(rotate_handler)
    logger.setLevel(logging.DEBUG)
    return logger
Ejemplo n.º 27
0
 def __init__(self, filename, mode, encoding=None, delay=0):
     """
     Use the specified filename for streamed logging
     """
     if codecs is None:
         encoding = None

     ConcurrentRotatingFileHandler.__init__(self, filename, mode, encoding, delay)
     self.suffix = "%Y-%m-%d"
     self.suffix_time = ""
Ejemplo n.º 28
0
    def __init__(self, json=False, stdout=True, name='scrapy-cluster',
                 dir='logs', file='main.log', bytes=25000000, backups=5,
                 level='INFO',
                 format='%(asctime)s [%(name)s] %(levelname)s: %(message)s',
                 propagate=False):
        '''
        @param stdout: Flag to write logs to stdout or file
        @param json: Flag to write json logs with objects or just the messages
        @param name: The logger name
        @param dir: The directory to write logs into
        @param file: The file name
        @param bytes: The max file size in bytes
        @param backups: The number of backups to keep of the file
        @param level: The logging level string
        @param format: The log format
        @param propagate: Allow the log to propagate to other ancestor loggers

        '''
        # set up logger
        self.logger = logging.getLogger(name)
        self.logger.setLevel(logging.DEBUG)
        self.logger.propagate = propagate
        self.json = json
        self.log_level = level
        self.format_string = format

        if stdout:
            # set up to std out
            stream_handler = logging.StreamHandler(sys.stdout)
            stream_handler.setLevel(logging.DEBUG)
            formatter = self._get_formatter(json)
            stream_handler.setFormatter(formatter)
            self.logger.addHandler(stream_handler)
            self._check_log_level(level)
            self.debug("Logging to stdout")
        else:
            # set up to file
            try:
                # try to make dir
                os.makedirs(dir)
            except OSError as exception:
                if exception.errno != errno.EEXIST:
                    raise

            file_handler = ConcurrentRotatingFileHandler(dir + '/' + file,
                                                         maxBytes=bytes,
                                                         backupCount=backups)
            file_handler.setLevel(logging.DEBUG)
            formatter = self._get_formatter(json)
            file_handler.setFormatter(formatter)
            self.logger.addHandler(file_handler)
            self._check_log_level(level)
            self.debug("Logging to file: {file}".format(
                    file=dir+'/'+file))
Ejemplo n.º 29
0
def get_logger():
    """Setup and tune the application logger"""
    app_log = logging.getLogger("tornado.application")
    tornado.log.enable_pretty_logging()
    # TODO: user dir
    logfile = path.abspath('node.log')
    # Rotate log after reaching 512K, keep 5 old copies.
    rotate_handler = ConcurrentRotatingFileHandler(logfile, 'a', 512 * 1024, 5)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    rotate_handler.setFormatter(formatter)
    app_log.addHandler(rotate_handler)
    return app_log
Ejemplo n.º 30
0
    def __init__(self, app):
        self.log = logging.getLogger(app)

        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        rotateHandler = ConcurrentRotatingFileHandler(LOGFILE, "a", 512 * 1024,
                                                      5)
        rotateHandler.setFormatter(formatter)

        self.log.addHandler(rotateHandler)
        self.log.setLevel(logging.DEBUG)
Ejemplo n.º 31
0
 def setup_logging(self,level=logging.DEBUG):
   logfmt = '%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s'
   datefmt= '%m/%d/%Y %H:%M:%S'
   self.formatter = logging.Formatter(fmt=logfmt,datefmt=datefmt)
   LOGGER = logging.getLogger(self.__class__.__name__)
   fn = os.path.join(os.path.dirname(__file__),'..','logs','%s.log' % self.__class__.__name__)   
   rfh = ConcurrentRotatingFileHandler(filename=fn,maxBytes=2097152,backupCount=5,mode='a') #2MB file
   rfh.setFormatter(self.formatter)
   LOGGER.handlers = []
   LOGGER.addHandler(rfh)
   LOGGER.setLevel(level)
   return LOGGER
Ejemplo n.º 32
0
def get_handler(filename, level=None):
    ext = logging.getLevelName(level).lower() if level is not None else 'log'
    handler = ConcurrentRotatingFileHandler(''.join([LOG_PATH, filename, '.', ext]),
                                            mode='a',
                                            maxBytes=MAX_FILE_SIZE,
                                            backupCount=5,
                                            encoding='utf-8')
    handler.setFormatter(formatter)
    if level is not None:
        handler.setLevel(level)

    return handler
Ejemplo n.º 33
0
    def __init__(self, filename, backupCount=0, when='D', interval=1,
                 utc=False, mode='a', delay=False, debug=False, suffix='log'):
        # Call __init__ of ConcurrentRotatingFileHandler
        ConcurrentRotatingFileHandler.__init__(self, filename,
                                               backupCount=backupCount,
                                               mode=mode, debug=debug,
                                               supress_abs_warn=True,
                                               maxBytes=0)
        self.when = when.upper()
        self.utc = utc
        if not suffix:
            suffix = 'log'
        if self.when == 'S':
            self.interval = 1  # one second
            self.suffix = "%Y-%m-%d_%H-%M-%S." + suffix
            self.extMatch = (r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(?=." +
                             suffix + ")")
        elif self.when == 'M':
            self.interval = 60  # one minute
            self.suffix = "%Y-%m-%d_%H-%M." + suffix
            self.extMatch = (r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(?=." + suffix +
                             ")")
        elif self.when == 'H':
            self.interval = 60 * 60  # one hour
            self.suffix = "%Y-%m-%d_%H." + suffix
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(?=." + suffix + ")"
        elif self.when == 'D' or self.when == 'MIDNIGHT':
            self.interval = 60 * 60 * 24  # one day
            self.suffix = "%Y-%m-%d." + suffix
            self.extMatch = r"^\d{4}-\d{2}-\d{2}(?=." + suffix + ")"
        elif self.when.startswith('W'):
            self.interval = 60 * 60 * 24 * 7  # one week
            if len(self.when) != 2:
                raise ValueError("You must specify a day for weekly rollover "
                                 "from 0 to 6 (0 is Monday): %s" % self.when)
            if self.when[1] < '0' or self.when[1] > '6':
                raise ValueError("Invalid day specified for weekly rollover: "
                                 "%s" % self.when)
            self.dayOfWeek = int(self.when[1])
            self.suffix = "%Y-%m-%d." + suffix
            self.extMatch = r"^\d{4}-\d{2}-\d{2}(?=." + suffix + ")"
        else:
            raise ValueError("Invalid rollover interval specified: %s" %
                             self.when)

        self.extMatch = re.compile(self.extMatch)
        self.interval = self.interval * interval  # multiply by units requested
        if os.path.exists(filename):
            t = os.stat(filename)[ST_MTIME]
        else:
            t = int(time.time())
        self.rolloverAt = self.computeRollover(t)
Ejemplo n.º 34
0
def main():
    """
    多线程博文下载:采用多线程下载博文,博文根据博客名分文件夹.再根据发布日期分文件夹
    python3 download.py 1000 10 启动10个线程下载1000个博文
    """
    args = sys.argv
    if len(args) == 2:
        limit = int(args[1])
        thread_num = 8
    elif len(args) == 3:
        limit = int(args[1])
        thread_num = int(args[2])
    else:
        limit = 1
        thread_num = 1
    begin = time.time()

    # 日志相关初始化
    if not os.path.isdir('log'):
        os.mkdir('log')
    log_file_name = '%s-%s.log' % (os.path.basename(__file__).replace(
        '.py', ''), datetime.date.today())
    log_full_file_name = os.path.join('log', log_file_name)
    log = getLogger()
    rotateHandler = ConcurrentRotatingFileHandler(log_full_file_name, "a",
                                                  512 * 1024, 0, 'utf-8')
    datefmt_str = '%Y-%m-%d %H:%M:%S'
    format_str = "[%(asctime)s - %(levelname)s - %(filename)s - LINE:%(lineno)d] %(message)s"
    formatter = Formatter(format_str, datefmt_str)
    rotateHandler.setFormatter(formatter)
    log.addHandler(rotateHandler)
    log.setLevel(INFO)

    log.info('开始执行: 启动%s个线程下载%s个博文' % (thread_num, limit))
    # 实例化线程锁
    lock = threading.Lock()
    # 创建线程池
    pool = threadpool.ThreadPool(thread_num)
    global tqdm_list
    for x in range(thread_num):
        tqdm_list[x] = 0
    # print(tqdm_list)
    # print(get_position())
    # return False
    requests_list = []
    progress = tqdm(total=limit, desc='total')
    for x in range(limit):
        requests_list.append(([x, lock, log, progress, thread_num], None))
    requests_res = threadpool.makeRequests(download, requests_list)
    [pool.putRequest(req) for req in requests_res]
    pool.wait()
Ejemplo n.º 35
0
def configuration_logging(**kwargs):
    log_dir = TORNADO_LOG_SETTINGS['log_file_prefix']
    TORNADO_LOG_SETTINGS.update(**kwargs)
    port = settings.TORNADO_SERVER_SETTINGS['port']
    for log_name, logger in [
        ('tornado_access_%s.log' % port, tornado.log.access_log),
        ('tornado_app_%s.log' % port, tornado.log.app_log),
        ('tornado_gen_%s.log' % port, tornado.log.gen_log)
    ]:
        log_file = os.path.join(log_dir, log_name)
        logger.setLevel(
            getattr(logging, TORNADO_LOG_SETTINGS['logging'].upper()))
        if log_file:
            rotate_mode = TORNADO_LOG_SETTINGS['log_rotate_mode']
            if rotate_mode == 'size':
                channel = RFHandler(
                    log_file, "a", TORNADO_LOG_SETTINGS['log_file_max_size'],
                    TORNADO_LOG_SETTINGS['log_file_num_backups'])
                #
                # channel = logging.handlers.RotatingFileHandler(
                #     filename=log_file,
                #     maxBytes=TORNADO_LOG_SETTINGS['log_file_max_size'],
                #     backupCount=TORNADO_LOG_SETTINGS['log_file_num_backups']
                # )
            elif rotate_mode == 'time':
                channel = logging.handlers.TimedRotatingFileHandler(
                    filename=log_file,
                    when=TORNADO_LOG_SETTINGS['log_rotate_when'],
                    interval=TORNADO_LOG_SETTINGS['log_rotate_interval'],
                    backupCount=TORNADO_LOG_SETTINGS['log_file_num_backups'])
            else:
                error_message = 'The value of log_rotate_mode option should be ' + \
                                '"size" or "time", not "%s".' % rotate_mode
                raise ValueError(error_message)
            channel.setFormatter(
                tornado.log.LogFormatter(
                    fmt=TORNADO_LOG_SETTINGS['log_fmt'],
                    datefmt=TORNADO_LOG_SETTINGS['log_datefmt'],
                    color=False))
            logger.addHandler(channel)

        if (TORNADO_LOG_SETTINGS['log_to_stderr']
                or (TORNADO_LOG_SETTINGS['log_to_stderr'] is None
                    and not logger.handlers)):
            # Set up color if we are in a tty and curses is installed
            channel = logging.StreamHandler()
            channel.setFormatter(
                tornado.log.LogFormatter(
                    fmt=TORNADO_LOG_SETTINGS['log_fmt'],
                    datefmt=TORNADO_LOG_SETTINGS['log_datefmt']))
            logger.addHandler(channel)
Ejemplo n.º 36
0
    def __init__(self, name):
        logging.Logger.__init__(self, name, self.level)

        if self.filename is not None:
            # Rotate log after reaching sizelimit, keep 25 old copies.
            handler = ConcurrentRotatingFileHandler(self.filename, "a", self.sizelimit, 25)
        else:
            handler = logging.StreamHandler()

        if self.level == logging.DEBUG:
            handler.setFormatter(self.debug_formatter())
        else:
            handler.setFormatter(self.info_formatter())
        self.addHandler(handler)
Ejemplo n.º 37
0
def init_logger(config):
    # Remove exist handlers
    logger = logging.getLogger(config.LOGGER_NAME)
    del logger.handlers[:]
    Rthandler = ConcurrentRotatingFileHandler('/tmp/snapperapp.log', maxBytes=config.LOG_FILE_SIZE, backupCount=config.LOG_BKUP_FILES)
    Rthandler.setFormatter(Formatter(config.LOG_FORMAT))
    logger.addHandler(Rthandler)
    logger.setLevel(config.LOG_LEVEL)

    if config.LOG_CONSOLE == True:
        console = logging.StreamHandler()
        console.setFormatter(Formatter(config.LOG_FORMAT))
        logger.addHandler(console)
        logger.setLevel(config.LOG_CONSOLE_LEVEL)
Ejemplo n.º 38
0
 def emit(self, record):
     """
     Emit a record.

     Always check time 
     """
     try:
         if self.check_baseFilename(record):
            self.build_baseFilename()
         ConcurrentRotatingFileHandler.emit(self, record)
     except (KeyboardInterrupt, SystemExit):
         raise
     except:
         self.handleError(record)
Ejemplo n.º 39
0
    def init_logger(cls, port):
        formatter = LogFormatter(fmt=cls.__fmt.format(port=port),
                                 datefmt="",
                                 color=False)

        access_log_handler = ConcurrentRotatingFileHandler(
            filename=os.path.join(ServerConfig["log_dir"], "access.log"))
        access_log_handler.setFormatter(formatter)
        access_log.addHandler(access_log_handler)

        server_log_handler = ConcurrentRotatingFileHandler(
            filename=os.path.join(ServerConfig['log_dir'], 'server.log'),
            maxBytes=128 * 1024 * 1024,
            backupCount=5,
            encoding='utf8')
        server_log_handler.setFormatter(formatter)
        gen_log.addHandler(server_log_handler)
        app_log.addHandler(server_log_handler)

        access_log.setLevel(logging.INFO)
        gen_log.setLevel(getattr(logging, ServerConfig['log_level'].upper()))
        app_log.setLevel(getattr(logging, ServerConfig['log_level'].upper()))

        access_log.propagate = app_log.propagate = gen_log.propagate = False
        return
Ejemplo n.º 40
0
def setup_logging(name_, level=None, proj_home=None):
    """
    Sets up generic logging to file with rotating files on disk

    :param: name_: the name of the logfile (not the destination!)
    :param: level: the level of the logging DEBUG, INFO, WARN
    :param: proj_home: optional, starting dir in which we'll
            check for (and create) 'logs' folder and set the
            logger there
    :return: logging instance
    """

    if level is None:
        config = load_config(extra_frames=1, proj_home=proj_home)
        level = config.get('LOGGING_LEVEL', 'INFO')

    level = getattr(logging, level)

    logfmt = u'%(asctime)s,%(msecs)03d %(levelname)-8s [%(process)d:%(threadName)s:%(filename)s:%(lineno)d] %(message)s'
    datefmt = u'%Y-%m-%d %H:%M:%S'
    #formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt)

    formatter = MultilineMessagesFormatter(fmt=logfmt, datefmt=datefmt)
    formatter.multiline_marker = ''
    formatter.multiline_fmt = '     %(message)s'

    formatter.converter = time.gmtime
    logging_instance = logging.getLogger(name_)

    if proj_home:
        proj_home = os.path.abspath(proj_home)
        fn_path = os.path.join(proj_home, 'logs')
    else:
        fn_path = os.path.join(_get_proj_home(), 'logs')

    if not os.path.exists(fn_path):
        os.makedirs(fn_path)

    fn = os.path.join(fn_path, '{0}.log'.format(name_.split('.log')[0]))
    rfh = ConcurrentRotatingFileHandler(filename=fn,
                                        maxBytes=10485760,
                                        backupCount=10,
                                        mode='a',
                                        encoding='UTF-8')  # 10MB file
    rfh.setFormatter(formatter)
    logging_instance.handlers = []
    logging_instance.addHandler(rfh)
    logging_instance.setLevel(level)

    return logging_instance
Ejemplo n.º 41
0
    def initialize_logger(self):

        # Invoke logging with a concurrent logging module since many of these
        # processes will likely be writing to scan.log at the same time
        self.dbg_h = logging.getLogger('dbg_log')
        dbglog = '%s/%s' % (self.log_path, 'dbg.log')
        dbg_rotateHandler = ConcurrentRotatingFileHandler(dbglog, "a")
        self.dbg_h.addHandler(dbg_rotateHandler)
        self.dbg_h.setLevel(logging.ERROR)

        self.scan_h = logging.getLogger('scan_log')
        scanlog = '%s/%s' % (self.log_path, 'scan.log')
        scan_rotateHandler = ConcurrentRotatingFileHandler(scanlog, "a")
        self.scan_h.addHandler(scan_rotateHandler)
        self.scan_h.setLevel(logging.INFO)
Ejemplo n.º 42
0
    def __cmd__(self, lock, command):

        log = getLogger()

        # buid log path
        sefl.__log_id__ = self.__runner_requestor__ + '_' + time.strftime(
            "%Y_%H_%M_%S") + '.log'
        logfile = os.path.join(log_path, self.__log_id__)

        # Rotate log after reaching 10M, keep 5 old copies.
        rotateHandler = ConcurrentRotatingFileHandler(logfile, "a",
                                                      1024 * 1024 * 10, 5)
        log.addHandler(rotateHandler)

        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        (stdout, errout) = p.communicate()
        if stdout:
            lock.acquire()
            self.__return_content__ = +command + ':' + stdout
            lock.release()
            log.setLevel(INFO)
            log.info(
                time.strftime("%b %d %Y %H:%M:%S:  ") + command + ':' + stdout)
        if errout:
            lock.acquire()
            self.__return_content__ = +command + ':' + errout
            lock.release()
            log.setLevel(ERROR)
            log.info(
                time.strftime("%b %d %Y %H:%M:%S:  ") + command + ':' + errout)
Ejemplo n.º 43
0
def set_app_log(app):
    """
    设置flask自带的log
    :param app:
    :return:
    """
    l_format = logging.Formatter(
        "%(asctime)s [%(levelname)s] [%(filename)s %(funcName)s %(lineno)d]: %(message)s "
    )
    r_handler = LogHandler("app.log",
                           maxBytes=20480000,
                           backupCount=10,
                           encoding='UTF-8')
    r_handler.setLevel(logging.INFO)
    r_handler.setFormatter(l_format)
    app.logger.addHandler(r_handler)
Ejemplo n.º 44
0
def log_add():
    logfile = os.path.abspath("mylogfile.log")
    log = getLogger()
    rotate_handler = ConcurrentRotatingFileHandler(logfile, "a", 1024*1024, 5)
    log.addHandler(rotate_handler)
    log.setLevel(INFO)
    log.info("Here is a very exciting log message for you.")
Ejemplo n.º 45
0
 def __init__(self, logfile, log_size_limit, log_rotate_num, log_level):
     self.logger = logging.getLogger()
     try:
         self.rotateHandler = ConcurrentRotatingFileHandler(
             logfile, "a", log_size_limit, log_rotate_num)
     except Exception, e:
         print 'INTERNAL_ERR'
 def initializeLogging(self,**kwargs):
   logfmt = '%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s'
   datefmt= '%m/%d/%Y %H:%M:%S'
   formatter = logging.Formatter(fmt=logfmt,datefmt=datefmt)
   LOGGER = logging.getLogger('PipelineMongoConnection')
   if not LOGGER.handlers:
     default_fn = os.path.join(os.path.dirname(__file__),'..','logs','PipelineMongoConnection.log')   
     fn = kwargs.get('logfile',default_fn)
     rfh = ConcurrentRotatingFileHandler(filename=fn,maxBytes=2097152,backupCount=10,mode='a') #2MB file
     rfh.setFormatter(formatter)
     ch = logging.StreamHandler() #console handler
     ch.setFormatter(formatter)
     #LOGGER.addHandler(ch)
     LOGGER.addHandler(rfh)
     LOGGER.setLevel(logging.DEBUG)
   self.logger = LOGGER
Ejemplo n.º 47
0
    def __init__(self):
        logger = logging.getLogger()
        logger.propagate = False
        formatter = logging.Formatter('%(levelname)s-%(asctime)s-%(funcName)s-%(message)s')

        filehandler = RFHandler("/var/log/fanclley.log",'a',1*1024*1024, 10000)
        filehandler.setFormatter(formatter)
        filehandler.suffix = "%Y%m%d-%H%M.log"
        logger.setLevel(logging.INFO)
        logger.addHandler(filehandler)

        terminal_handler = logging.StreamHandler()
        terminal_handler.setFormatter(formatter)
        terminal_handler.setLevel(logging.DEBUG)
        logger.addHandler(terminal_handler)

        self._logger = logger
def init_logging(stdout_enabled=True):
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO')))

    # root_logger.setLevel(logging.WARN)

    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
    logging.getLogger('boto').setLevel(logging.ERROR)
    logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)

    log_formatter = logging.Formatter(
            fmt='%(asctime)s | ' + ECID + ' | %(name)s | %(processName)s | %(levelname)s | %(message)s',
            datefmt='%m/%d/%Y %I:%M:%S %p')

    stdout_logger = logging.StreamHandler(sys.stdout)
    stdout_logger.setFormatter(log_formatter)
    root_logger.addHandler(stdout_logger)

    if stdout_enabled:
        stdout_logger.setLevel(logging.getLevelName(config.get('log_level', 'INFO')))

    # base log file

    log_file_name = '%s/migrator.log' % config.get('log_dir')

    # ConcurrentRotatingFileHandler
    rotating_file = ConcurrentRotatingFileHandler(filename=log_file_name,
                                                  mode='a',
                                                  maxBytes=404857600,
                                                  backupCount=0)
    rotating_file.setFormatter(log_formatter)
    rotating_file.setLevel(logging.INFO)

    root_logger.addHandler(rotating_file)

    error_log_file_name = '%s/migrator_errors.log' % config.get('log_dir')
    error_rotating_file = ConcurrentRotatingFileHandler(filename=error_log_file_name,
                                                        mode='a',
                                                        maxBytes=404857600,
                                                        backupCount=0)
    error_rotating_file.setFormatter(log_formatter)
    error_rotating_file.setLevel(logging.ERROR)

    root_logger.addHandler(error_rotating_file)
Ejemplo n.º 49
0
def setup_logger(log_filename):
    logger = logging.getLogger()
    numeric_level = getattr(logging, settings.LOGGING['level'].upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('incorrect log level %s in settings' % settings.LOGGING['level'])

    logger.setLevel(numeric_level)
    formatter = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] %(message)s')
    # console handler
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    #

    # file handler
    log_filepath = os.path.join(settings.LOGGING['dirname'], log_filename)
    rotate_handler = RFHandler(log_filepath, "a", settings.LOGGING['max_size'] * 1024 * 1024, settings.LOGGING['backup_num'])
    rotate_handler.setFormatter(formatter)
    logger.addHandler(rotate_handler)
Ejemplo n.º 50
0
def configure_logger(logger, log_level, docker_id=None, log_format=LOG_FORMAT,
                     log_dir=LOG_DIR):
    """
    Configures logging to the file 'calico.log' in the specified log directory

    If the logs are not coming from calico_kubernetes.py, format the log to
     include the filename of origin

    Additionally configures a stderr handler which logs INFO and
    above to stderr.

    :param logger: logger object to configure
    :param log_level: level at which logger starts logging.
    :param log_format: Indicates which logging scheme to use.
    :param log_dir: Directory where calico.log lives. If None set to default
    :return:
    """
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    formatter = logging.Formatter(log_format)
    docker_filter = IdentityFilter(identity=docker_id)

    file_hdlr = ConcurrentRotatingFileHandler(filename=log_dir+'calico.log',
                                              maxBytes=1000000,
                                              backupCount=5)
    file_hdlr.setFormatter(formatter)
    if docker_id:
        file_hdlr.addFilter(docker_filter)

    # Add file handler and set log level.
    logger.addHandler(file_hdlr)
    logger.setLevel(log_level)

    # Create a stderr handler and apply it to the logger.
    # This only logs INFO and above to stderr.
    stderr_hdlr = logging.StreamHandler(sys.stderr)
    stderr_hdlr.setLevel(logging.INFO)
    stderr_hdlr.setFormatter(formatter)
    logger.addHandler(stderr_hdlr)
Ejemplo n.º 51
0
    def __init__(self, module=''):

        today_datetime = dt.now()
        today_date = dt.date(today_datetime)
        string_date = str(today_date)

        if module == '':
            file_name = LOGGER_FILE + string_date
        else:
            file_name = LOGGER_FILE + module + '-' + string_date

        logger = logging.getLogger(file_name)  # log_namespace can be replaced with your namespace
        logger.setLevel(logging.DEBUG)
        if not logger.handlers:
            file_name = os.path.join(LOGGING_DIR,
                                     '%s.log' % file_name)  # usually I keep the LOGGING_DIR defined in some global settings file
            handler = ConcurrentRotatingFileHandler(file_name)
            formatter = logging.Formatter('%(asctime)s %(levelname)s:%(module)s:%(message)s')
            handler.setFormatter(formatter)
            handler.setLevel(logging.DEBUG)
            logger.addHandler(handler)
        self._logger = logger
Ejemplo n.º 52
0
    def __init__(self, url, dirname, secret):
        log_level = logging.DEBUG 
        log_dir = os.getcwd()
        log_max_size = 78 * 1024 * 1024
        log_max_rotate = 9
        
        LOG_LONG_FORMAT = '[%(module)s][%(funcName)s][%(lineno)d][%(levelname)s][%(message)s]'
        LOG_RECORD_TIME = '[%(asctime)s]'
        LOG_ROOT_NAME = 'server-node.log'
        
        log = logging.getLogger()
        log_format = LOG_RECORD_TIME + \
                    '[' + time.strftime("%Z", time.localtime()) + ']' + \
                    '[%(process)d]' + \
                    LOG_LONG_FORMAT
        log_name = LOG_ROOT_NAME + '.' + str(socket.gethostname())
        log_file = os.path.join(log_dir, log_name)
        try:
            rotate_handler = ConcurrentRotatingFileHandler(log_file, mode="a", maxBytes=log_max_size, backupCount=log_max_rotate)
            log.addHandler(rotate_handler)
            rotate_handler.setFormatter(formatter)
        except:
            logging.basicConfig(filename=log_file, level=logging.DEBUG, format='[%(asctime)s ][%(levelname)s][%(message)s]', datefmt='%m/%d/%Y %I:%M:%S %p')
            pass
        formatter = logging.Formatter(log_format)
        
        log.setLevel(log_level)
        logging.info('Logging level has been set to DEBUG mode')
        logging.info('New node started with url <{}> serving directory <{}> with secret<{}>'.format(url, dirname, secret))
    
        #log_file = "server-node.log"
        #logging.basicConfig(filename=log_file, level=logging.DEBUG, format='[%(asctime)s ][%(levelname)s][%(message)s]', datefmt='%m/%d/%Y %I:%M:%S %p')

        self.url = url
        self.dirname = dirname
        self.secret = secret
        self.known = set()
Ejemplo n.º 53
0
def configure_logger(logger, log_level, log_format=LOG_FORMAT, 
                     log_to_stdout=True, log_dir=LOG_DIR):
    """
    Configures logging to the file 'calico.log' in the specified log directory

    If the logs are not coming from calico_kubernetes.py, format the log to
     include the filename of origin

    Additionally configures a stdout handler which logs INFO and
    above to stdout.

    :param logger: logger object to configure
    :param log_level: level at which logger starts logging.
    :param log_format: Indicates which logging scheme to use.
    :param log_to_stdout: If True, configure the stdout stream handler.
    :param log_dir: Directory where calico.log lives. If None set to default
    :return:
    """
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    formatter = logging.Formatter(log_format)

    file_hdlr = ConcurrentRotatingFileHandler(filename=log_dir+'calico.log',
                                              maxBytes=1000000,
                                              backupCount=5)
    file_hdlr.setFormatter(formatter)

    logger.addHandler(file_hdlr)
    logger.setLevel(log_level)

    # Create an stdout handler and apply it to the logger
    if log_to_stdout:
        stdout_hdlr = logging.StreamHandler(sys.stdout)
        stdout_hdlr.setLevel(log_level)
        stdout_hdlr.setFormatter(formatter)
        logger.addHandler(stdout_hdlr)
Ejemplo n.º 54
0
    try:
        from ads.ADSCachedExports import ADSRecords, init_lookers_cache
        from lib import conversions
    except ImportError:
        print "Unable to import ads.ADSExports.ADSRecords!"
        print "We will be unable to query ADS-classic for records!"

INIT_LOOKERS_CACHE = init_lookers_cache

logfmt = "%(levelname)s\t%(process)d [%(asctime)s]:\t%(message)s"
datefmt = "%m/%d/%Y %H:%M:%S"
formatter = logging.Formatter(fmt=logfmt, datefmt=datefmt)
logger = logging.getLogger("ReadRecords")
if not logger.handlers:
    fn = os.path.join(os.path.dirname(__file__), "..", "logs", "ReadRecords.log")
    rfh = ConcurrentRotatingFileHandler(filename=fn, maxBytes=2097152, backupCount=10, mode="a")  # 2MB file
    rfh.setFormatter(formatter)
    ch = logging.StreamHandler()  # console handler
    ch.setFormatter(formatter)
    #  logger.addHandler(ch)
    logger.addHandler(rfh)
logger.setLevel(logging.INFO)


def canonicalize_records(records, targets=None):
    """
  Takes a dict of {bibcode:fingerprint} and resolves each bibcode to its canonical.

  Finds all alternates associated with that bibcode and constructs the full JSON_fingerprint
  from all of these associated records
def get_log_file_handler(path):
    handler = ConcurrentRotatingFileHandler(
        path, "a", 2 * 1024 * 1024 * 1024, 1)
    handler.setFormatter(logging.Formatter(
        "%(asctime)s [%(levelname)s] -> %(message)s"))
    return handler
Ejemplo n.º 56
0
def init():

    logger['root_logger'] = logging.getLogger()

    logfile = os.path.abspath('debug.log')
    debug_handler = ConcurrentRotatingFileHandler(logfile, 'a', 10485760, 5)
    debug_handler.setLevel(logging.DEBUG)
    debug_handler.setFormatter(default_formatter)
    logger['root_logger'].addHandler(debug_handler)

    logfile = os.path.abspath('info.log')
    info_handler = ConcurrentRotatingFileHandler(logfile, 'a', 10485760, 5)
    info_handler.setLevel(logging.INFO)
    info_handler.setFormatter(default_formatter)
    logger['root_logger'].addHandler(info_handler)

    logfile = os.path.abspath('error.log')
    error_handler = ConcurrentRotatingFileHandler(logfile, 'a', 10485760, 5)
    error_handler.setLevel(logging.ERROR)
    error_handler.setFormatter(default_formatter)
    logger['root_logger'].addHandler(error_handler)
Ejemplo n.º 57
0
        session = requests.Session()
        if self.auth_token:
            _log.debug("Using Auth Token: %s", self.auth_token)
            session.headers.update({'Authorization': 'Bearer ' + self.auth_token})
        response = session.get(path, verify=False)
        return json.loads(response.text)


if __name__ == '__main__':
    # Configure logging.
    log_file = "/var/log/calico/kubernetes/policy/agent.log"
    if not os.path.exists(os.path.dirname(log_file)):
        os.makedirs(os.path.dirname(log_file))
    formatter = logging.Formatter(LOG_FORMAT)
    file_hdlr = ConcurrentRotatingFileHandler(filename=log_file,
                                              maxBytes=1000000,
                                              backupCount=5)
    file_hdlr.setFormatter(formatter)
    _log.addHandler(file_hdlr)
    _log.setLevel(LOG_LEVEL)

    # Log to stderr as well.
    stdout_hdlr = logging.StreamHandler(sys.stderr)
    stdout_hdlr.setLevel(LOG_LEVEL)
    stdout_hdlr.setFormatter(formatter)
    _log.addHandler(stdout_hdlr)

    try:
        PolicyAgent().run()
    except Exception:
        # Log the exception
Ejemplo n.º 58
0
from lustre import lustreapi

import subprocess

from celery import Celery
from celery.decorators import periodic_task
from billiard import current_process

import memcache

import shutil

clib = ctypes.CDLL('libc.so.6', use_errno=True)

logger = logging.getLogger(__name__)
rotateHandler = ConcurrentRotatingFileHandler("/var/log/cmover_del.log", "a", 128*1024*1024)
formatter = logging.Formatter('%(asctime)s - %(levelname)s [%(filename)s:%(lineno)s - %(funcName)20s()] - %(message)s')
rotateHandler.setFormatter(formatter)
logger.addHandler(rotateHandler)

REPORT_INTERVAL = 30 # seconds

with open('rabbitmq/rabbitmq.conf','r') as f:
    rabbitmq_server = f.read().rstrip()

with open('rabbitmq/rabbitmq_%s.conf'%USERNAME,'r') as f:
    rabbitmq_password = f.read().rstrip()

app = Celery(USERNAME, broker='amqp://%s:%s@%s/%s'%(USERNAME, rabbitmq_password, rabbitmq_server, USERNAME))
app.config_from_object(settings)
Ejemplo n.º 59
0
def getLogger(logname='root'):
    
  logger = logging.getLogger(logname)
  logger.setLevel(logging.DEBUG)
                            
  DIR = '../log/'
  if DIR: # not none
    subprocess.check_call(['mkdir', '-p', DIR])
  #================================
  # File Handler
  #================================
  LOG_FILENAME = os.path.abspath(DIR + logname + '.err')
  handler = ConcurrentRotatingFileHandler(LOG_FILENAME, "a", 200*1024*1024, 5)
  handler.setLevel(logging.WARN)
  formatter = logging.Formatter("%(asctime)s\t%(name)s-%(process)s-%(threadName)s\t%(levelname)s\t%(message)s")
  handler.setFormatter(formatter)
  logger.addHandler(handler)
  
  #================================
  # Standard Output Handler: INFO ONLY
  #================================
  # handler = logging.StreamHandler(sys.stdout)
  LOG_FILENAME = os.path.abspath(DIR + logname + '.info')
  handler = ConcurrentRotatingFileHandler(LOG_FILENAME, "a", 200*1024*1024, 5)
  handler.setLevel(logging.DEBUG)
  formatter = logging.Formatter("%(asctime)s\t%(name)s-%(process)s-%(threadName)s\t%(message)s")
  handler.setFormatter(formatter)
  handler.addFilter(LevelFilter(logging.INFO))
  logger.addHandler(handler)
  return logger
Ejemplo n.º 60
0
#!/usr/bin/env python
#-*- coding: utf-8 -*-

import logging
import logging.handlers
from cloghandler import ConcurrentRotatingFileHandler
import time
from random import choice
logger = logging.getLogger()
#handler = logging.handlers.RotatingFileHandler("logs/output.log","a",1024*1024*100,50)
handler = ConcurrentRotatingFileHandler("logs/output.log", "a", 1024*1024*100, 50)
formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)

ip_array = ['172.16.0.111','192.168.2.9','10.8.0.3','172.15.3.22']
protocol_array = ['tcp','http','udp','icmp']
method_arry = ['PUT','GET','POST']
request_array = ['/','/hello/test','/happy/go','/sos/test']
header_arry = ['NULL','main:default:WAP:LABEL_CX-G_NORMAL_IP',\
                'apikey=7f8c4da3ce9849ffb2134f075201c45a&language=zh-CN&details=true',\
                'tcpmux: peer(218.8.127.165:9800) has 40 connections',\
                'iid=9529137899&device_id=35501623937&ac=4g&channel=store_aliyunos&aid=32&app_name=video_article&version_code=588&version_name=5.8.8&device_platform=android&user_version=1.1.8&ab_version=126489%2C125853%2C124728&ab_feature=z1&ssmix=a&device_type=Bird+D10&device_brand=BIRD&language=zh&os_api=22&os_version=5.1&uuid=862807030595172&openudid=32f5f18b092ba71e&manifest_version_code=118&resolution=720*1280&dpi=320&update_version_code=5881&_rticket=1496198125276']

while 1:
    num = choice(range(1,20000))
    for i in range(num):
        message = choice(ip_array) + ' ' + choice(protocol_array) + ' ' +\
        choice(method_arry) + ' ' + choice(request_array) + ' ' + choice(header_arry)
        logger.info(message)