def adjust_log_level(self):
     """
     Change the log level of all handlers if needed
     """
     self.logger.debug("Adjust log level if needed")
     if not self.parsed_arguments.verbose:
         self.logger.debug("Log level remains unchanged")
         return
     self.logger.debug("Changing log level to " + str(logging.DEBUG))
     logging.getLogger(self.base_logger_name).setLevel(logging.DEBUG)
     for handler in logging.getLogger(self.base_logger_name).handlers:
         # When upgrading to python 2.7
         # handler.setStream(sys.stderr)
         if self.parsed_arguments.verbose == 1:
             handler.setFormatter(
                 coloredlogs.ColoredFormatter(
                     self.log_format_verbose_1,
                     level_styles=self.log_level_styles,
                     field_styles=self.log_field_styles))
         elif self.parsed_arguments.verbose >= 2:
             handler.setLevel(logging.DEBUG)
             handler.setFormatter(
                 coloredlogs.ColoredFormatter(
                     self.log_format_verbose_2,
                     level_styles=self.log_level_styles,
                     field_styles=self.log_field_styles))
     self.logger.debug("Log level changed to " + str(logging.DEBUG))
    def initLogger(self):

        self.logger.handlers = []
        if config.params['debug']:
            self.logger.setLevel(logging.DEBUG)
            formatter = logging.Formatter('%(asctime)s ' +
                    '%(levelname)s - %(filename)s:%(lineno)s - %(message)s')
        else:
            self.logger.setLevel(logging.INFO)
            formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
        
        fn = self.logDir + '/' + self.baseName + '.log'
            
        if os.path.exists(fn):
            os.rename(fn, fn + '.prev')
            
        fh = logging.FileHandler(filename=fn)
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)
        
        if config.params['verbose'] is True:
            if config.params['debug']:
                #sformatter = logging.Formatter('%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s')
                sformatter = coloredlogs.ColoredFormatter(fmt='%(asctime)s,%(msecs)03d %(levelname)s - %(filename)s:%(lineno)s - %(message)s',
                                                field_styles={'levelname': {'color': 'cyan'}})
            else:
                #sformatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
                sformatter = coloredlogs.ColoredFormatter(fmt='%(asctime)s,%(msecs)03d %(levelname)s - %(message)s',
                                                field_styles={'levelname': {'color': 'cyan'}})
            sh = logging.StreamHandler()
            sh.setLevel(logging.DEBUG)
            sh.setFormatter(sformatter)
            self.logger.addHandler(sh) 
示例#3
0
def init_log(logger, loglevel=0, no_ansi=False):
    """
    Initializes logging.
    Prints logs to console with level defined by loglevel
    Also prints verbose log to the multiqc data directory if available.
    (multiqc_data/multiqc.log)

    Args:
        loglevel (str): Determines the level of the log output.
    """
    # File for logging
    global log_tmp_dir, log_tmp_fn
    log_tmp_dir = tempfile.mkdtemp()
    log_tmp_fn = os.path.join(log_tmp_dir, "multiqc.log")

    # Logging templates
    debug_template = "[%(asctime)s] %(name)-50s [%(levelname)-7s]  %(message)s"
    info_template = "|%(module)18s | %(message)s"

    # Base level setup
    logger.setLevel(getattr(logging, "DEBUG"))

    # Automatically set no_ansi if not a tty terminal
    if not no_ansi:
        if sys.stderr.isatty() and not force_term_colors():
            no_ansi = True

    # Set up the console logging stream
    console = logging.StreamHandler()
    console.setLevel(getattr(logging, loglevel))
    level_styles = coloredlogs.DEFAULT_LEVEL_STYLES
    level_styles["debug"] = {"faint": True}
    field_styles = coloredlogs.DEFAULT_FIELD_STYLES
    field_styles["module"] = {"color": "blue"}
    if loglevel == "DEBUG":
        if no_ansi:
            console.setFormatter(logging.Formatter(debug_template))
        else:
            console.setFormatter(
                coloredlogs.ColoredFormatter(fmt=debug_template,
                                             level_styles=level_styles,
                                             field_styles=field_styles))
    else:
        if no_ansi:
            console.setFormatter(logging.Formatter(info_template))
        else:
            console.setFormatter(
                coloredlogs.ColoredFormatter(fmt=info_template,
                                             level_styles=level_styles,
                                             field_styles=field_styles))
    logger.addHandler(console)

    # Now set up the file logging stream if we have a data directory
    file_handler = logging.FileHandler(log_tmp_fn, encoding="utf-8")
    file_handler.setLevel(getattr(logging,
                                  "DEBUG"))  # always DEBUG for the file
    file_handler.setFormatter(logging.Formatter(debug_template))
    logger.addHandler(file_handler)
示例#4
0
文件: log.py 项目: z395802609/MultiQC
def init_log(logger, loglevel=0, no_ansi=False):
    """
    Initializes logging.
    Prints logs to console with level defined by loglevel
    Also prints verbose log to the multiqc data directory if available.
    (multiqc_data/multiqc.log)

    Args:
        loglevel (str): Determines the level of the log output.
    """
    # File for logging
    global log_tmp_dir, log_tmp_fn
    log_tmp_dir = tempfile.mkdtemp()
    log_tmp_fn = os.path.join(log_tmp_dir, 'multiqc.log')

    # Logging templates
    debug_template = '[%(asctime)s] %(name)-50s [%(levelname)-7s]  %(message)s'
    info_template = '[%(levelname)-7s] %(module)15s : %(message)s'

    # Base level setup
    logger.setLevel(getattr(logging, 'DEBUG'))

    # Set up the console logging stream
    console = logging.StreamHandler()
    console.setLevel(getattr(logging, loglevel))
    level_styes = coloredlogs.DEFAULT_LEVEL_STYLES
    level_styes['debug'] = {'faint': True}
    if loglevel == 'DEBUG':
        if no_ansi or not sys.stderr.isatty():
            console.setFormatter(logging.Formatter(debug_template))
        else:
            console.setFormatter(
                coloredlogs.ColoredFormatter(fmt=debug_template,
                                             level_styles=level_styes))
    else:
        if no_ansi or not sys.stderr.isatty():
            console.setFormatter(logging.Formatter(info_template))
        else:
            console.setFormatter(
                coloredlogs.ColoredFormatter(fmt=info_template,
                                             level_styles=level_styes))
    logger.addHandler(console)

    # Now set up the file logging stream if we have a data directory
    file_handler = logging.FileHandler(log_tmp_fn, encoding='utf-8')
    file_handler.setLevel(getattr(logging,
                                  'DEBUG'))  # always DEBUG for the file
    file_handler.setFormatter(logging.Formatter(debug_template))
    logger.addHandler(file_handler)
示例#5
0
    def __init__(self):
        super(OutputLogger, self).__init__()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.NOTSET)

        self.handler = logging.StreamHandler()
        self.handler.setLevel(logging.NOTSET)

        try:
            import coloredlogs
            self.formatter = coloredlogs.ColoredFormatter("%(asctime)s "
                                                          "[%(threadName)s] "
                                                          "[%(name)s] "
                                                          "[%(levelname)s] "
                                                          "%(message)s")
        except ImportError:
            self.formatter = logging.Formatter("%(asctime)s "
                                               "[%(threadName)s] "
                                               "[%(name)s] "
                                               "[%(levelname)s] "
                                               "%(message)s")

        self.handler.setFormatter(self.formatter)
        self.logger.addHandler(self.handler)
        self.logger.info("Logger enabled")
    def setup_logging(self):
        logging_path = "../../.mlsc/"
        logging_file = "mlsc.log"

        format_string_file = "%(asctime)s - %(levelname)-8s - %(name)-30s - %(message)s"
        format_string_console = "%(levelname)-8s - %(name)-30s - %(message)s"

        logging_level_root = logging.NOTSET
        logging_level_console = logging.INFO
        logging_level_file = logging.INFO
        logging_file_enabled = False

        logging_level_map = {
            "notset": logging.NOTSET,
            "debug": logging.DEBUG,
            "info": logging.INFO,
            "warning": logging.WARNING,
            "error": logging.ERROR,
            "critical": logging.CRITICAL
        }

        if self.config is not None:
            try:
                logging_level_console = logging_level_map[
                    self.config["general_settings"]["log_level_console"]]
                logging_level_file = logging_level_map[
                    self.config["general_settings"]["log_level_file"]]
                logging_file_enabled = self.config["general_settings"][
                    "log_file_enabled"]
            except Exception as e:
                print(f"Could not load logging settings. Exception {e}")
                pass

        if not os.path.exists(logging_path):
            Path(logging_path).mkdir(exist_ok=True)

        root_logger = logging.getLogger()

        # Reset Handlers
        root_logger.handlers = []
        root_logger.setLevel(logging_level_root)

        if logging_file_enabled:
            file_formatter = logging.Formatter(format_string_file)
            rotating_file_handler = RotatingFileHandler(
                logging_path + logging_file,
                mode='a',
                maxBytes=5 * 1024 * 1024,
                backupCount=5,
                encoding='utf-8')
            rotating_file_handler.setLevel(logging_level_file)
            rotating_file_handler.setFormatter(file_formatter)
            root_logger.addHandler(rotating_file_handler)

        console_formatter = coloredlogs.ColoredFormatter(
            fmt=format_string_console)
        stream_handler = logging.StreamHandler(stream=sys.stderr)
        stream_handler.setFormatter(console_formatter)
        stream_handler.setLevel(logging_level_console)
        root_logger.addHandler(stream_handler)
示例#7
0
async def run(url, girder_api_key):
    from stemworker import socketio

    global _pipelines
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)

    handler = logging.StreamHandler(sys.stdout)
    handler.setLevel(logging.INFO)
    formatter = coloredlogs.ColoredFormatter(
        '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    root.addHandler(handler)

    logger.info('Loading pipelines.')
    load_pipelines()

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    worker_id = None
    if rank == 0:
        worker_id = uuid.uuid4().hex

    worker_id = comm.bcast(worker_id, root=0)

    cookie = await authenticate(url, girder_api_key)
    await socketio.connect(_pipelines, worker_id, url, cookie)
def get_logger(log_to_console=True, log_to_file=True):
    """Initialize Python logger that outputs to file and console."""
    assert log_to_console or log_to_file

    logger = logging.getLogger("main_logger")
    logger.setLevel(logging.DEBUG)
    formatter = coloredlogs.ColoredFormatter(
        "%(asctime)s | %(filename)12s | %(levelname)8s | %(message)s")

    if log_to_file:
        fh = logging.FileHandler("run.log")
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        logger.addHandler(fh)
    if log_to_console:
        ch = logging.StreamHandler()
        ch.setLevel(logging.INFO)
        ch.setFormatter(formatter)
        logger.addHandler(ch)

    # Fix TensorFlow doubling logs
    # https://stackoverflow.com/questions/33662648/tensorflow-causes-logging-messages-to-double
    logger.propagate = False

    return logger
示例#9
0
def setup_logging(stream_log_level=None,
                  log_path=None,
                  file_log_level=None,
                  **kwargs):
    global PKG_FILE_HANDLER, PKG_STREAM_HANDLER

    if log_path:
        if PKG_FILE_HANDLER:
            PKG_LOGGER.removeHandler(PKG_FILE_HANDLER)
        PKG_FILE_HANDLER = logging.FileHandler(log_path)
        ROOT_LOGGER.addHandler(PKG_FILE_HANDLER)
    if file_log_level:
        PKG_FILE_HANDLER.setLevel(file_log_level)
    if stream_log_level:
        if not PKG_STREAM_HANDLER:
            PKG_STREAM_HANDLER = logging.StreamHandler()
            if os.name != 'nt':
                PKG_STREAM_HANDLER.setFormatter(coloredlogs.ColoredFormatter())
            PKG_STREAM_HANDLER.addFilter(coloredlogs.HostNameFilter())
            PKG_STREAM_HANDLER.addFilter(coloredlogs.ProgramNameFilter())
            ROOT_LOGGER.addHandler(PKG_STREAM_HANDLER)
        PKG_STREAM_HANDLER.setLevel(stream_log_level)

    # TODO: maybe process quiet differently so that it does not add a stream handler
    if PKG_STREAM_HANDLER:
        logging.info("stream log level: %s", PKG_STREAM_HANDLER.level)
    if PKG_FILE_HANDLER:
        logging.info("file log level: %s", PKG_FILE_HANDLER.level)
示例#10
0
 def __new__(cls,
             name,
             level,
             file=None,
             logger_format=None,
             file_format=None,
             file_level=None):
     levels = {
         'debug': logging.DEBUG,
         'info': logging.INFO,
         'warning': logging.WARNING,
         'error': logging.ERROR,
         'critical': logging.CRITICAL
     }
     logger = logging.getLogger(name)
     level = levels[level.lower()]
     stream_handler = logging.StreamHandler()
     stream_handler.setLevel(level)
     stream_format = logger_format or '%(name)s - (%(levelname)s) - %(message)s'
     stream_handler.setFormatter(
         coloredlogs.ColoredFormatter(stream_format))
     logger.addHandler(stream_handler)
     if file is not None:
         file_handler = logging.FileHandler(file)
         if file_level is not None:
             file_handler.setLevel(levels[file_level.lower()])
         else:
             file_handler.setLevel(level)
         file_format = file_format or '%(asctime)s - %(name)s - (%(levelname)s) - %(message)s'
         file_handler.setFormatter(logging.Formatter(file_format))
         logger.addHandler(file_handler)
     coloredlogs.install(fmt=stream_format, logger=logger, level=level)
     return logger
示例#11
0
 def __init__(self, log_file_name):
     """
     Initializes the logging manager:
     - Uses a different colored format for each log level (see LEVEL_STYLES)
     - Defines a FileHandler with the given log_file_name.
     - Defines a SteamHandler for stdout.
     - Defines a logger for real time logs with those handlers
     Args:
         log_file_name: The path to where the log file will be saved
     """
     if not hasattr(logging, 'success'):
         _add_logging_level('SUCCESS', 25)
     self.log_file_name = log_file_name
     formatter = coloredlogs.ColoredFormatter(fmt=LOGGING_FORMAT,
                                              level_styles=LEVEL_STYLES)
     self.console_handler = logging.StreamHandler(sys.stdout)
     self.console_handler.setFormatter(formatter)
     log_file_path = os.path.join(
         ARTIFACTS_PATH, 'logs', log_file_name) if os.path.exists(
             os.path.join(ARTIFACTS_PATH, 'logs')) else os.path.join(
                 ARTIFACTS_PATH, log_file_name)
     self.file_handler = logging.FileHandler(log_file_path)
     self.file_handler.setFormatter(formatter)
     self.console_handler.setLevel(logging.INFO)
     self.file_handler.setLevel(logging.DEBUG)
     self.real_time_logger = logging.getLogger(f'real_time-{log_file_path}')
     self.real_time_logger.addHandler(self.file_handler)
     self.real_time_logger.addHandler(self.console_handler)
     self.real_time_logger.setLevel(logging.DEBUG)
     self.loggers = {}
     self.listeners = {}
     self.logs_lock = Lock()
     self.thread_names = set()
示例#12
0
def getFileHandler():
    log_file_formatter = coloredlogs.ColoredFormatter(
        log_format, field_styles=field_styles, level_styles=level_styles)
    log_file_handler = TimedRotatingFileHandler(LOG_FILE, when='midnight')
    log_file_handler.addFilter(coloredlogs.ProgramNameFilter())
    log_file_handler.setFormatter(log_file_formatter)
    return log_file_handler
示例#13
0
def setup(config: DynaBox, name: str):
    fmt = "%(asctime)s %(levelname)-8s [%(name)s] %(message)s"
    colored_formatter = coloredlogs.ColoredFormatter(fmt)
    plain_formatter = logging.Formatter(fmt)
    logger = logging.getLogger(name)
    if config.file:
        fh = logging.FileHandler(config.filename)
        fhLevel = logging.getLevelName(config.file_verbosity.upper())
        logger.setLevel(fhLevel)
        fh.setLevel(fhLevel)
        fh.setFormatter(plain_formatter)
        logger.addHandler(fh)
    if config.console:
        ch = logging.StreamHandler()
        chLevel = logging.getLevelName(config.console_verbosity.upper())
        ch.setLevel(chLevel)
        if logger.level > chLevel or logger.level == 0:
            logger.setLevel(chLevel)
        ch.setFormatter(colored_formatter)
        logger.addHandler(ch)

    class ShutdownHandler(logging.Handler):
        """Exit application with CRITICAL logs"""

        def emit(self, record):
            logging.shutdown()
            sys.exit(1)

    sh = ShutdownHandler(level=50)
    sh.setFormatter(colored_formatter)
    logger.addHandler(sh)
    return logger
示例#14
0
    def __init__(self, name, sub_directory=''):
        self._ensure_logs_directory_exists(
            os.path.join(self.LOGS_DIRECTORY, sub_directory))

        if type(name) is not str:
            name = name.__class__.__name__

        self._logger = logging.getLogger(name)
        if len(self._logger.handlers) > 0:
            return

        self._logger.setLevel(logging.DEBUG)

        stream_handler = logging.StreamHandler()
        stream_handler.setFormatter(
            coloredlogs.ColoredFormatter(
                '[%(name)s] [%(levelname)s] %(message)s'))
        self._logger.addHandler(stream_handler)

        file_name = utils.camel_case_to_underscore(name) + '.log'
        file_path = os.path.join(self.LOGS_DIRECTORY, sub_directory, file_name)
        file_handler = logging.FileHandler(file_path)
        file_handler.setFormatter(
            logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
        self._logger.addHandler(file_handler)
示例#15
0
    def __init__(self,
                 game_state_interface,
                 cache_count=30,
                 log_level=logging.DEBUG):  # this is cheaper to store
        '''
        :type self.gsi: GameStateInterface

        :param game_state_interface: GameStateInterface
        :type game_state_interface: GameStateInterface
        :param cache_count: How many score ticks to cache
        :type cache_count: int
        '''
        self.gsi = game_state_interface

        self.single_tick_score_cache = LRUCache(cache_count)
        self._total_scores = {}

        log = logging.getLogger('gamebot_scoring')
        log.setLevel(log_level)
        log_formatter = coloredlogs.ColoredFormatter(ScoringInterface.LOG_FMT)
        log_handler = logging.StreamHandler()
        log_handler.setFormatter(log_formatter)
        log.addHandler(log_handler)

        self.log = log
示例#16
0
def setup_logging(
    verbosity='INFO',
    log_file='simulation.log',
    log_file_verbosity='DEBUG',
    results_dir='',
):
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.DEBUG)  # possibly overridden from args later
    # ^ TODO: how does this impact performance?
    stream_handler = logging.StreamHandler(stream=sys.stdout)
    stream_handler.setFormatter(
        coloredlogs.ColoredFormatter(
            style='{',
            fmt='{asctime} {levelname} {name}:\n\t{message}',
            datefmt='%Y-%m-%d %H:%M:%S'))
    stream_handler.setLevel(verbosity)
    root_logger.addHandler(stream_handler)

    if results_dir != '':
        os.makedirs(results_dir, exist_ok=True)
    log_file = os.path.join(results_dir, log_file)
    file_handler = logging.FileHandler(filename=log_file, mode='w')
    file_handler.setFormatter(
        logging.Formatter(style='{',
                          fmt='{asctime} {levelname} {name}: {message}',
                          datefmt='%Y-%m-%d %H:%M:%S'))
    file_handler.setLevel(log_file_verbosity)
    root_logger.addHandler(file_handler)
示例#17
0
    def __init__(self, db_api, log_level=logging.INFO):

        self.db_api = db_api

        # Set up logging
        log = logging.getLogger('gamebot_scripts')
        log.addHandler(logstash.TCPLogstashHandler(LOGSTASH_IP, LOGSTASH_PORT, version=1))
        log.setLevel(log_level)
        log_formatter = coloredlogs.ColoredFormatter(ScriptsFacade.LOG_FMT)
        log_handler = logging.StreamHandler()
        log_handler.setFormatter(log_formatter)
        log.addHandler(log_handler)
        self.log = log

        # Test connection to RabbitMQ server
        host        = settings.RABBIT_ENDPOINT
        username    = settings.RABBIT_USERNAME
        password    = settings.RABBIT_PASSWORD
        credentials = pika.PlainCredentials(username, password)
        self.conn_params = pika.ConnectionParameters(host=host, credentials=credentials)
        while True:
            try:
                connection = pika.BlockingConnection(self.conn_params)
                log.info("Connection to RabbitMQ dispatcher verified")
                break
            except pika.exceptions.AMQPConnectionError as ex:
                log.info("The RabbitMQ server is not ready yet...")
                time.sleep(5)
                continue
        connection.close()
示例#18
0
def setupLogger(settings):
    if settings.logfile is not None:
        fmt = "%(asctime)s [%(levelname)s] %(message)s"
        date_fmt = "%Y-%m-%d %H:%M"

        try:
            consoleFormatter = coloredlogs.ColoredFormatter(fmt, date_fmt)
        except NameError:
            consoleFormatter = logging.Formatter(fmt, date_fmt)
        consoleHandler = logging.StreamHandler()
        consoleHandler.setLevel(settings.loglevel_stderr)
        consoleHandler.setFormatter(consoleFormatter)

        fileFormatter = logging.Formatter(fmt, date_fmt)
        fileHandler = logging.FileHandler(settings.logfile)
        fileHandler.setLevel(settings.loglevel_file)
        fileHandler.setFormatter(fileFormatter)

        logging.getLogger().setLevel(0)
        logging.getLogger().addHandler(fileHandler)
        logging.getLogger().addHandler(consoleHandler)
    else:
        try:
            coloredlogs.install(fmt=fmt,
                                datefmt=date_fmt,
                                level=settings.loglevel_stderr)
        except NameError:
            logging.basicConfig(format=fmt,
                                datefmt=date_fmt,
                                level=settings.loglevel_stderr)
示例#19
0
文件: config.py 项目: ekadofong/hugs
    def setup_logger(self, tract, patch):
        """
        Setup the python logger.
        """

        name = 'hugs-pipe: {} | {}'.format(tract, patch)
        self.logger = logging.getLogger(name)
        self.logger.setLevel(getattr(logging, self.log_level.upper()))
        fmt = '%(name)s: %(asctime)s %(levelname)s: %(message)s'
        try:
            formatter = coloredlogs.ColoredFormatter(fmt=fmt,
                                                     datefmt='%m/%d %H:%M:%S')
        except:
            formatter = logging.Formatter(fmt=fmt, datefmt='%m/%d %H:%M:%S')

        if not self.logger.handlers:
            sh = logging.StreamHandler(sys.stdout)
            sh.setFormatter(formatter)
            self.logger.addHandler(sh)

            if self.log_fn:
                fh = logging.FileHandler(self.log_fn)
                fh.setFormatter(formatter)
                self.logger.addHandler(fh)

            msg = 'starting hugs-pipe with config file ' + self.config_fn
            self.logger.info(msg)
示例#20
0
def install_logging(log_file_name: str, include_process_name=False) -> str:
    """
    This method install the logging mechanism so that info level logs will be sent to the console and debug level logs
    will be sent to the log_file_name only.
    Args:
        include_process_name: Whether to include the process name in the logs format, Should be used when
            using multiprocessing
        log_file_name: The name of the file in which the debug logs will be saved
    """
    if not hasattr(logging, 'success'):
        _add_logging_level('SUCCESS', 25)
    logging_format = LOGGING_FORMAT
    if include_process_name:
        logging_format = '[%(asctime)s] - [%(processName)s] - [%(threadName)s] - [%(levelname)s] - %(message)s'
    formatter = coloredlogs.ColoredFormatter(fmt=logging_format,
                                             level_styles=LEVEL_STYLES)
    ch = logging.StreamHandler(sys.stdout)
    ch.setFormatter(formatter)
    log_file_path = os.path.join(
        ARTIFACTS_PATH, 'logs', log_file_name) if os.path.exists(
            os.path.join(ARTIFACTS_PATH, 'logs')) else os.path.join(
                ARTIFACTS_PATH, log_file_name)
    fh = logging.FileHandler(log_file_path)
    fh.setFormatter(formatter)
    ch.setLevel(logging.INFO)
    fh.setLevel(logging.DEBUG)
    logging.basicConfig(level=logging.DEBUG, handlers=[ch, fh], force=True)
    return log_file_path
示例#21
0
def configure_logging():
    tf.logging.set_verbosity(tf.logging.INFO)
    coloredlogs.install(level="INFO")
    coloredlogs.DEFAULT_LEVEL_STYLES = {
        "debug": {
            "color": "white",
            "bold": False
        },
        "info": {
            "color": "white",
            "bold": True
        },
        "warning": {
            "color": "yellow",
            "bold": True
        },
        "error": {
            "color": "red",
            "bold": True
        },
        "fatal": {
            "color": "magenta",
            "bold": True
        },
    }
    logger = logging.getLogger("tensorflow")
    log_format = "%(asctime)s %(levelname)s %(message)s"
    formatter = coloredlogs.ColoredFormatter(log_format)

    for handler in logger.handlers:
        handler.setFormatter(formatter)
    logger.propagate = False
示例#22
0
def init():
    initialize_logger(os.getcwd())
    coloredlogs.install(level='DEBUG')
    coloredlogs.ColoredFormatter()
    c = Cassandra()
    syncTables()
    g = Goodreads()
    return (c, g)
示例#23
0
def setup_logging(level: str = 'DEBUG'):
    log = logging.getLogger('prairiedog')
    coloredlogs.install(level=level)

    fmt = '%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s'
    formatter = coloredlogs.ColoredFormatter(fmt)
    fh = logging.FileHandler('prairiedog.log')
    fh.setFormatter(formatter)
    log.addHandler(fh)
示例#24
0
 def __init__(self, color=False):
     if not color:
         self._formatter = logging.Formatter(
             "%(asctime)s.%(msecs)03d | %(source)s %(type)s %(threadName)s: "
             "%(message)s", "%H:%M:%S")
     else:
         self._formatter = coloredlogs.ColoredFormatter(
             "%(asctime)s.%(msecs)03d | %(source)s %(type)s %(threadName)s: "
             "%(message)s", "%H:%M:%S", LEVEL_FORMATS, FIELD_STYLES)
示例#25
0
def main():
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)

    handler = logging.StreamHandler(sys.stdout)
    handler.setLevel(logging.DEBUG)
    formatter = coloredlogs.ColoredFormatter('%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    root.addHandler(handler)
示例#26
0
def main():
    # logger initialize
    logger = logging.getLogger(__name__)
    handler = logging.StreamHandler()
    handler.setLevel(logging.INFO)
    handler.setFormatter(
        # logging.Formatter("[%(asctime)s] [%(threadName)s] %(message)s")
        coloredlogs.ColoredFormatter(
            fmt="[%(asctime)s] [%(threadName)s] %(message)s"))
    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)
    logger.propagate = False

    # initialize photostand configure
    photostand_config = PhotostandConfig()

    try:
        photostand_config.read()

    except (FailedToReadSerialNumber):
        logger.critical('Failed to read cpu serial number.')
        exit(1)

    except (FileNotFoundError, ParsingError, MissingSectionHeaderError,
            NoSectionError, NoOptionError, ValueError):

        logger.critical('Failed to read photostand config.\n' + format_exc())

        exit(1)

    if not photostand_config.get_sensor_is_active():
        logger.info('Sensor daemon disabled by photostand_config.')

        exit(0)

    # initialize last sensor variable
    last_sensor_value = LastSensorValue(logger)

    # initialize socket server thread
    server_thread = AfUnixServerThread(logger, photostand_config,
                                       last_sensor_value)

    server_thread.start()

    try:
        # Initialize serial port watcher
        serial_port_watcher = SerialPortWatcher(
            logger,
            photostand_config,
            last_sensor_value,
        )

        serial_port_watcher.main()

    except KeyboardInterrupt:
        server_thread.stop()
示例#27
0
def setup_logging(level):
    level = logging.getLevelName(level)
    root_logger = logging.getLogger()
    root_logger.setLevel(level)
    handler = logging.StreamHandler()
    handler.setLevel(level)
    formatter = coloredlogs.ColoredFormatter(
        "%(asctime)s %(levelname)s %(message)s")
    handler.setFormatter(formatter)
    root_logger.addHandler(handler)
示例#28
0
def main():
    """The core code of the program"""
    # Logs initialization
    log = logging.getLogger(__name__)
    logging.root.setLevel(config["Logging"]["level"])
    stream_handler = logging.StreamHandler()
    if coloredlogs is not None:
        stream_handler.formatter = coloredlogs.ColoredFormatter(
            config["Logging"]["format"],
            style="{",
            level_styles=logs_level_styles)
    else:
        stream_handler.formatter = logging.Formatter(
            config["Logging"]["format"], style="{")
    logging.root.handlers.clear()
    logging.root.addHandler(stream_handler)
    log.debug("Logging setup successfully!")

    # Ignore most python-telegram-bot logs, as they are useless most of the time
    logging.getLogger("urllib3.connectionpool").setLevel("ERROR")
    # logging.getLogger("Telegram").setLevel("ERROR")

    # Trying to connect with the Telegram bot by the token
    try:
        bot = telebot.TeleBot(config["Telegram"]["token"])
        bot.get_me()
        log.debug('Connection to the bot was successful!')
    except telebot.apihelper.ApiException:
        log.error('A request to the Telegram API was unsuccessful.')
        log.fatal(
            'Write the valid token in the config file and restart the script')
        sys.exit(1)
    log.debug("Bot connection is valid!")

    log.info("avarice is started!")

    @bot.message_handler(commands=["start"])
    def start(message):
        bf.sending_start_message(bot, message, types)
        bf.start_func(message)

    @bot.message_handler(commands=["help"])
    def helping(message):
        bf.sending_help_message(bot, message)

    @bot.message_handler(content_types=["text"])
    def message_handler(message):
        if message.chat.type == 'private':
            bf.handler(bot, types, message, None)

    @bot.callback_query_handler(func=lambda call: True)
    def callback_inline(call):
        bf.handler(bot, types, None, call)

    bot.polling(none_stop=True)
示例#29
0
async def startup_event():
  global logger

  if ver == 'local':
    log_folder = 'logs'
    if not os.path.exists(log_folder): os.mkdir(log_folder)

    # config logger
    console_formatter = coloredlogs.ColoredFormatter('%(asctime)s [%(levelname)s] (%(name)s:%(funcName)s) %(message)s', "%H:%M:%S")
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(console_formatter)

    time_formatter = logging.Formatter('%(asctime)s [%(levelname)s] (%(name)s:%(funcName)s) %(message)s', "%y-%m-%d %H:%M:%S")
    # time_handler = TimedRotatingFileHandler(f'{log_folder}/slides_ws.log', when='D', backupCount=7) # m H D : minutes hours days
    time_handler = logging.FileHandler(f'{log_folder}/slides_ws.log')
    time_handler.setFormatter(time_formatter)

    # clear uvicorn logger
    logging.getLogger("uvicorn").handlers.clear()
    access_log = logging.getLogger("uvicorn.access")
    access_log.handlers.clear()
    access_log.addHandler(console_handler)
    access_log.addHandler(time_handler)

    logging.basicConfig(
      level=logging.DEBUG,
      handlers=[
        time_handler,   # log file handler
        console_handler # console stream handler
      ]
    )
  else:
    log_folder = '/output/logs'
    if not os.path.exists(log_folder): os.makedirs(log_folder)

    time_formatter = logging.Formatter('%(asctime)s [%(process)d] [%(levelname)s] (%(name)s) %(message)s', "%y%m%d %H:%M:%S")
    # time_handler = TimedRotatingFileHandler(f'{log_folder}/slides_ws.log', when='D', backupCount=7) # m H D : minutes hours days
    time_handler = logging.FileHandler(f'{log_folder}/slides_ws.log')
    time_handler.setFormatter(time_formatter)

    logging.basicConfig(
      level=logging.DEBUG,
      handlers=[
        time_handler
      ]
    )

  logging.getLogger('matplotlib').setLevel(logging.WARNING)
  logging.getLogger('urllib3').setLevel(logging.WARNING)
  logging.getLogger('shapely').setLevel(logging.WARNING)
  logging.getLogger('passlib').setLevel(logging.WARNING)
  logging.getLogger('multipart').setLevel(logging.WARNING)
  logger = logging.getLogger('slides_ws')
  logger.info(f'Setting logger config for version : {ver}')
示例#30
0
def main(api_url, api_key, data_file, chunk_size):
    root = logging.getLogger()
    root.setLevel(logging.INFO)

    handler = logging.StreamHandler(sys.stdout)
    handler.setLevel(logging.INFO)
    formatter = coloredlogs.ColoredFormatter(
        '%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    root.addHandler(handler)
    asyncio.run(ingest(api_url, api_key, data_file, chunk_size))