예제 #1
0
def setup(
    base_logger: logging.Logger = logging.getLogger(),
    token: str = '',
    users: List[int] = [],
    timeout: int = 10,
    tg_format: str = '<b>%(name)s:%(levelname)s</b> - <code>%(message)s</code>'
):
    """
    Setup TgLogger

    :param base_logger: base logging.Logger obj
    :param token: tg bot token to log form
    :param users: list of used_id to log to
    :param timeout: seconds for retrying to send log if error occupied
    :param tg_format: logging format for tg messages (html parse mode)

    :return: logging.StreamHandler
    """
    # Logging format
    formatter = logging.Formatter(tg_format)

    # Setup TgLoggerHandler
    tg_handler = TgLoggerHandler(
        token=token,  # tg bot token
        users=users,  # list of user_id
        timeout=timeout  # default value is 10 seconds
    )
    tg_handler.setFormatter(formatter)
    base_logger.addHandler(tg_handler)

    return tg_handler
예제 #2
0
    def log_to_file(self, **kargs):
        """
        This function logs key:value pairs to a log file.
        Note: Logger name in the log file is fixed (ignis_logging)

        :param kargs: Keyword parameters to be logged (e.g t1=0.02,
        qubits=[1,2,4])
        """
        if not self._file_logging_enabled:
            if not self._warning_omitted:  # Omitting this warning only once
                msg = "File logging is disabled"
                if not self._conf_file_exists:
                    msg += ": no config file"
                logger = logging.getLogger(__name__)
                logger.warning(msg)
                self._warning_omitted = True
            return

        # We defer setting up the file handler, since its __init__ method
        # has the side effect of creating the file
        if self._file_handler is None:
            self._file_handler = IgnisLogging().get_file_handler()

        assert (self._file_handler is not None), "file_handler is not set"

        Logger.removeHandler(self, self._stream_handler)
        Logger.addHandler(self, self._file_handler)
        logstr = ""
        for k, v in kargs.items():
            logstr += "'{}':'{}' ".format(k, v)

        Logger.log(self, 100, logstr)

        Logger.removeHandler(self, self._file_handler)
        Logger.addHandler(self, self._stream_handler)
예제 #3
0
파일: logfile.py 프로젝트: hathawsh/slowlog
def make_file_logger(logfile, maxBytes=int(1e7), backupCount=10):
    """Create a logger that mimics the format of Products.LongRequestLogger"""
    if isinstance(logfile, Logger):
        # The Logger is already set up.
        return logfile

    logger = Logger('slowlog')

    if isinstance(logfile, Handler):
        # The Handler is already set up.
        handler = logfile
    else:
        if hasattr(logfile, 'write'):
            # Write to an open file.
            handler = StreamHandler(logfile)
        else:
            # Create a rotating file handler.
            handler = RotatingFileHandler(logfile,
                                          maxBytes=maxBytes,
                                          backupCount=backupCount)
        fmt = Formatter('%(asctime)s - %(message)s')
        handler.setFormatter(fmt)

    logger.addHandler(handler)
    return logger
예제 #4
0
class ExpDateCSVParser(object):
    """Parse expansion and date info from a CSV file and update the
       database with the correct dates"""

    # pylint: disable-msg=R0913
    # we may need all these arguments for some files
    def __init__(self, oLogHandler):
        self.oLogger = Logger('exp date parser')
        if oLogHandler is not None:
            self.oLogger.addHandler(oLogHandler)
        self.oLogHandler = oLogHandler

    def parse(self, fIn):
        """Process the CSV file line into the CardSetHolder"""
        oCsvFile = csv.reader(fIn)
        aRows = list(oCsvFile)
        if hasattr(self.oLogHandler, 'set_total'):
            self.oLogHandler.set_total(len(aRows))
        for sExp, sDate in aRows:
            try:
                oExp = IExpansion(sExp)
            except SQLObjectNotFound:
                # This error is non-fatal - the user may not have imported
                # the extra card lists, so we can legimately encounter
                # expansions here which aren't in the database
                self.oLogger.info('Skipped Expansion: %s' % sExp)
                continue
            oDate = datetime.datetime.strptime(sDate, "%Y%m%d").date()
            oExp.releasedate = oDate
            oExp.syncUpdate()
            self.oLogger.info('Added Expansion: %s' % sExp)
예제 #5
0
class WhiteWolfTextParser(object):
    """Actual Parser for the WW cardlist text file(s)."""

    def __init__(self, oLogHandler):
        self.oLogger = Logger('White wolf card parser')
        if oLogHandler is not None:
            self.oLogger.addHandler(oLogHandler)
        self._oState = None
        self.reset()

    def reset(self):
        """Reset the parser"""
        self._oState = WaitingForCardName({}, self.oLogger)

    def parse(self, fIn):
        """Feed lines to the state machine"""
        for sLine in fIn:
            self.feed(sLine)
        # Ensure we flush any open card text states
        self.feed('')
        if hasattr(self._oState, 'flush'):
            self._oState.flush()
        else:
            raise IOError('Failed to parse card list - '
                    'unexpected state at end of file.\n'
                    'Card list probably truncated.')

    def feed(self, sLine):
        """Feed the line to the current state"""
        # Strip BOM from line start
        sLine = sLine.decode('utf8').lstrip(u'\ufeff')
        self._oState = self._oState.transition(sLine, None)
예제 #6
0
def get_logger():
    formatter = Formatter("%(asctime)s|%(levelname)s| %(message)s")
    logger = Logger(name="log")
    handler = StreamHandler()
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    return logger
예제 #7
0
파일: logging.py 프로젝트: renaynay/trinity
def setup_trinity_file_and_queue_logging(
        logger: Logger,
        handler_stream: StreamHandler,
        logfile_path: Path,
        level: int=None) -> Tuple[Logger, 'Queue[str]', QueueListener]:
    from .mp import ctx

    if level is None:
        level = logging.DEBUG

    log_queue = ctx.Queue()

    handler_file = RotatingFileHandler(
        str(logfile_path),
        maxBytes=(10000000 * LOG_MAX_MB),
        backupCount=LOG_BACKUP_COUNT
    )

    handler_file.setLevel(level)
    handler_file.setFormatter(LOG_FORMATTER)

    logger.addHandler(handler_file)
    logger.setLevel(level)

    listener = QueueListener(
        log_queue,
        handler_stream,
        handler_file,
        respect_handler_level=True,
    )

    return logger, log_queue, listener
예제 #8
0
def add_file_out_to_logger(logger: logging.Logger = get_default_logger(),
                           log_file_path: str = "/tmp/log.txt"):
    fh = logging.FileHandler(log_file_path)
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    logger.debug(f"init logger to {log_file_path}")
예제 #9
0
    def _initialize_logger(logger: logging.Logger,
                           log_file: Optional[str]) -> None:
        """
        初始化传入的 Logger 对象,
        将 INFO 以上的日志输出到屏幕,将所有日志存入文件。
        :param logger: Logger 对象
        :param log_file: 日志文件路径
        :return: None
        """
        logger.setLevel(logging.DEBUG)

        # 将日志输出到控制台
        sh = logging.StreamHandler(sys.stdout)
        sh.setLevel(logging.INFO)
        sh.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
        logger.addHandler(sh)

        # 将日志输出到文件
        if log_file:
            fh = logging.FileHandler(log_file, encoding='utf-8')
            fh.setLevel(logging.DEBUG)
            fh.setFormatter(
                logging.Formatter(
                    '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
            logger.addHandler(fh)
예제 #10
0
def basic_config(logger: logging.Logger = logging.root, level=logging.INFO):
    """
    Configures a logger to log <=INFO to stdout and >INFO to stderr

    :param logger: Logger to configure, defaults to logging.root
    :param level: Defaults to INFO
    :return: configured logger (logger from parameters)
    """
    logger.setLevel(level)

    class InfoFilter(logging.Filter):
        def filter(self, rec):
            return rec.levelno in (logging.DEBUG, logging.INFO)

    formatter = logging.Formatter(
        "%(asctime)s - %(levelname)s - %(name)s - %(message)s",
        "%d/%m/%Y %H:%M:%S")

    std_out_handler = logging.StreamHandler(sys.stdout)
    std_out_handler.setLevel(logging.DEBUG)
    std_out_handler.setFormatter(formatter)
    std_out_handler.addFilter(InfoFilter())

    std_err_handler = logging.StreamHandler()
    std_err_handler.setLevel(logging.WARNING)
    std_err_handler.setFormatter(formatter)

    logger.addHandler(std_out_handler)
    logger.addHandler(std_err_handler)

    return logger
예제 #11
0
def conf_logging(logger: logging.Logger,
                 loglevel: str,
                 no_stdout: bool,
                 path=None) -> logging.Logger:

    if loglevel.upper() not in ("CRITICAL", "ERROR", "WARNING", "INFO",
                                "DEBUG"):
        raise ValueError("Invalid logging level")

    if path is None and no_stdout:
        raise ValueError("No file output and no console?")

    if not no_stdout:
        console_formatter = logging.Formatter("[%(levelname)s] %(message)s")
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(console_formatter)
        logger.addHandler(handler)

    if path is not None:
        open(path, "w").close()

        file_formatter = logging.Formatter(
            ("%(asctime)s [%(levelname)s] %(name)s.%(funcName)s"
             "@ L%(lineno)d\n  %(message)s"))
        handler = logging.FileHandler(path, encoding="utf-8")
        handler.setFormatter(file_formatter)

    l_name = logging.getLevelName(loglevel)
    handler.setLevel(l_name)
    logger.setLevel(l_name)

    logger.addHandler(handler)

    return logger
예제 #12
0
def logging_config(logger: logging.Logger, level: int = 20) -> logging.Logger:
    '''
    configure logging
    '''
    # the line below does not work. the logging objects are singletons.
    # local_logger = copy.deepcopy(logger)

    logger.setLevel(level)

    # console handler
    console_handler = logging.StreamHandler()

    # this handler shall respond to all logging levels.
    # set the logging level in
    console_handler.setLevel(logging.DEBUG)

    # formatter
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    # set time to UTC
    formatter.converter = time.gmtime
    # add formatter to handlers
    console_handler.setFormatter(formatter)
    # add handlers to logger
    logger.addHandler(console_handler)

    logger.debug('configured logger, time is logged in UTC')
    return logger
예제 #13
0
    def add_file_handler(logger: logging.Logger, path: str, formatter: Optional[logging.Formatter] = None,
                         level: Optional[int] = None) -> Optional[logging.FileHandler]:
        """Adds file handler to the given logger.

        Args:
            logger: Logger object to add the handler to.
            path: Path to a log file.
            formatter: Formatter object used to format logged messages.
            level: Severity threshold.

        Returns:
            Created file handler instance or None if creation failed.

        """
        try:
            file_handler = logging.FileHandler(path, 'w')
            if level:
                file_handler.setLevel(level)
            if formatter:
                file_handler.setFormatter(formatter)
            logger.addHandler(file_handler)
        except (IOError, OSError):
            logger.warning('Can not create log in %s', path)
            return None
        else:
            return file_handler
예제 #14
0
파일: worker.py 프로젝트: zhaijf1992/faust
 def _setup_spinner_handler(
         self, logger: logging.Logger, level: int) -> None:
     if self.spinner:
         logger.handlers[0].setLevel(level)
         logger.addHandler(
             terminal.SpinnerHandler(self.spinner, level=logging.DEBUG))
         logger.setLevel(logging.DEBUG)
 def log_config(self, logger: logging.Logger):
     gui_logger = GuiLogHandler(self.log)
     try:
         logger.addHandler(gui_logger)
         yield
     finally:
         logger.removeHandler(gui_logger)
예제 #16
0
def setup_logger(logger: logging.Logger, level: LevelIsh) -> None:
    lvl = mklevel(level)
    try:
        import logzero  # type: ignore[import]
        formatter = logzero.LogFormatter(
            fmt=FORMAT_COLOR,
            datefmt=DATEFMT,
        )
        use_logzero = True
    except ModuleNotFoundError:
        warnings.warn(
            "You might want to install 'logzero' for nice colored logs!")
        formatter = logging.Formatter(fmt=FORMAT_NOCOLOR, datefmt=DATEFMT)
        use_logzero = False

    if use_logzero and not COLLAPSE_DEBUG_LOGS:  # all set, nothing to do
        # 'simple' setup
        logzero.setup_logger(logger.name, level=lvl, formatter=formatter)
        return

    h = CollapseDebugHandler(
    ) if COLLAPSE_DEBUG_LOGS else logging.StreamHandler()
    logger.setLevel(lvl)
    h.setLevel(lvl)
    h.setFormatter(formatter)
    logger.addHandler(h)
    logger.propagate = False  # ugh. otherwise it duplicates log messages
예제 #17
0
def set_logger(logger: logging.Logger) -> None:
    """Configure the given logger

    Args:
        logger: the logger to configure.

    Example:

        >>> logger = logging.getLogger('MyLogger')
        ... set_logger(logger)
    """
    for hdlr in logger.handlers:
        logger.removeHandler(hdlr)
        del hdlr
    log_formatter = logging.Formatter(
        '[%(name)s][%(filename)s:%(lineno)d][%(asctime)s][%(levelname)-5.5s]: %(message)s'
    )

    console_handler = logging.StreamHandler()
    console_handler.setFormatter(log_formatter)

    file_handler = logging.FileHandler(f'{LOGS_PATH}', mode='a')
    file_handler.setFormatter(log_formatter)

    logger.addHandler(file_handler)
    logger.addHandler(console_handler)
    logger.setLevel(logging.ERROR)
예제 #18
0
def setup_logger(
        logger: logging.Logger,
        log_name: str = None,
        log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        log_dir='./logs'):
    """Setup logger with ``INFO`` level stream output + ``DEBUG`` level rotating file log

    :param logger: logger to be wrapped. Do not wrap same logger twice!
    :param log_name: name of logfile to be created
    :param log_format: format of log output
    :param log_dir: path to rotating file log directory
    """
    formatter = logging.Formatter(log_format)
    if log_name is None:
        log_name = logger.name.lower()
    try:
        # debug+ messages goes to log file
        os.makedirs(log_dir, exist_ok=True)
        f_hdl = TimedRotatingFileHandler(f'{log_dir}/{log_name}.log',
                                         encoding='utf-8',
                                         backupCount=10,
                                         when='midnight',
                                         utc=True)
        f_hdl.setLevel(logging.DEBUG)
        f_hdl.setFormatter(formatter)
        logger.addHandler(f_hdl)
    except OSError:
        pass
    # info+ messages goes to stream
    s_hdl = logging.StreamHandler()
    s_hdl.setLevel(logging.INFO)
    s_hdl.setFormatter(formatter)
    logger.addHandler(s_hdl)
예제 #19
0
def set_logger(logger: logging.Logger,
               save_dir: str = None,
               quiet: bool = False):
    """
    Sets up a logger with a stream handler and two file handlers.

    The stream handler prints to the screen depending on the value of `quiet`.
    One file handler (verbose.log) saves all logs, the other (quiet.log) only saves important info.

    :param logger: A logger.
    :param save_dir: The directory in which to save the logs.
    :param quiet: Whether the stream handler should be quiet (i.e. print only important info).
    """
    # Set logger depending on desired verbosity
    ch = logging.StreamHandler()
    if quiet:
        ch.setLevel(logging.INFO)
    else:
        ch.setLevel(logging.DEBUG)
    logger.addHandler(ch)

    if save_dir is not None:
        fh_v = logging.FileHandler(os.path.join(save_dir, 'verbose.log'))
        fh_v.setLevel(logging.DEBUG)
        fh_q = logging.FileHandler(os.path.join(save_dir, 'quiet.log'))
        fh_q.setLevel(logging.INFO)

        logger.addHandler(fh_v)
        logger.addHandler(fh_q)
예제 #20
0
    def test_thread_context_filter(
        log_store: MockLoggingHandler,
        logger: Logger,
        thread_context_filter: ThreadContextFilter,
        message: Any,
        expected_message: Any,
    ) -> None:
        logger.addHandler(log_store)

        logger.info(message)

        logger.addFilter(thread_context_filter)

        logger.info(message)

        logger.removeFilter(thread_context_filter)

        logger.info(message)

        logged_messages: List[str] = log_store.messages["info"]
        assert len(logged_messages) == 3

        for i, logged_message in enumerate(logged_messages):
            try:
                logged_message = eval(logged_message)
            except NameError:
                pass
            if i == 1:
                # message affected with the thread context logger
                assert logged_message == expected_message
            else:
                # message before/after the thread context logger
                assert logged_message == message

        log_store.reset()
예제 #21
0
파일: logging.py 프로젝트: solversa/py-evm
def setup_trinity_file_and_queue_logging(
        logger: Logger,
        formatter: Formatter,
        handler_stream: StreamHandler,
        chain_config: ChainConfig,
        level: int = logging.DEBUG
) -> Tuple[Logger, 'Queue[str]', QueueListener]:
    from .mp import ctx

    log_queue = ctx.Queue()

    handler_file = RotatingFileHandler(str(chain_config.logfile_path),
                                       maxBytes=(10000000 * LOG_MAX_MB),
                                       backupCount=LOG_BACKUP_COUNT)

    handler_file.setLevel(level)
    handler_file.setFormatter(formatter)

    logger.addHandler(handler_file)

    listener = QueueListener(
        log_queue,
        handler_stream,
        handler_file,
        respect_handler_level=True,
    )

    return logger, log_queue, listener
예제 #22
0
def add_file(logger: Logger, filename: str, verbose: str = "info"):
    """
    set up file handler to the logger with handlers

    :param logger: the logger
    :param filename: name of the logfile
    :type filename: str
    :param verbose: verbose level
    :type verbose: str
    """

    file_defined = False
    for handler in logger.handlers:
        if isinstance(handler, FileHandler):
            file_defined = True

    if not file_defined:

        # back up
        if isfile(filename):
            movefile(filename, filename + "-bak")

        fh = FileHandler(filename)
        verbose = getattr(logging, verbose.upper())
        logger.setLevel(verbose)
        fh.setLevel(logging.DEBUG)
        logger.addHandler(fh)
예제 #23
0
    def __init__(self,
                 logger: logging.Logger,
                 level: str,
                 broker: opentrons.broker.Broker) -> None:
        """ Build the scraper.

        :param logger: The :py:class:`logging.logger` to scrape
        :param level: The log level to scrape
        :param broker: Which broker to subscribe to
        """
        self._logger = logger
        self._broker = broker
        self._queue = queue.Queue()  # type: ignore
        if level != 'none':
            level = getattr(logging, level.upper(), logging.WARNING)
            self._logger.setLevel(level)
            logger.addHandler(
                AccumulatingHandler(
                    level,
                    self._queue))
        self._depth = 0
        self._commands: List[Mapping[str, Mapping[str, Any]]] = []
        self._unsub = self._broker.subscribe(
            opentrons.commands.command_types.COMMAND,
            self._command_callback)
예제 #24
0
def add_stream_handler(_logger: logging.Logger,
                       level: str = 'DEBUG') -> None:
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    stream_handler.setLevel(level)

    _logger.addHandler(stream_handler)
예제 #25
0
def add_file_handler(
    logger: logging.Logger,
    file_path: str,
    format: str = reconplogger_format,
    level: Optional[str] = 'DEBUG',
) -> logging.FileHandler:
    """Adds a file handler to a given logger.

    Args:
        logger: Logger object where to add the file handler.
        file_path: Path to log file for handler.
        format: Format for logging.
        level: Logging level for the handler.

    Returns:
        The handler object which could be used for removeHandler.
    """
    file_handler = logging.FileHandler(file_path)
    file_handler.setFormatter(logging.Formatter(format))
    if level is not None:
        if level not in logging_levels:
            raise ValueError('Invalid logging level: "' + str(level) + '".')
        file_handler.setLevel(logging_levels[level])
    logger.addHandler(file_handler)
    return file_handler
예제 #26
0
def init_logger(log: logging.Logger,
                cog_name: str,
                package_name: Optional[str] = None):
    """
    Prepare the logger for laggron cogs.

    Parameters
    ----------
    log: logging.Logger
        The logger object.
    cog_name: str
        The CamelCase name of the cog, used for cog data path.
    package_name: Optional[str]
        The name of the package, used for file names. Defaults to the lowercase cog name.
    """
    if package_name is None:
        package_name = cog_name.lower()
    formatter = logging.Formatter(
        "[{asctime}] [{levelname}] {name}: {message}",
        datefmt="%Y-%m-%d %H:%M:%S",
        style="{")
    # logging to a log file
    # file is automatically created by the module, if the parent foler exists
    cog_path = cog_data_path(raw_name=cog_name)
    if cog_path.exists():
        file_handler = RotatingFileHandler(
            stem=package_name,
            directory=cog_path,
            maxBytes=1_000_000,
            backupCount=8,
            encoding="utf-8",
        )
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)
        log.addHandler(file_handler)
예제 #27
0
def attempt_database_upgrade(oLogHandler=None):
    """Attempt to upgrade the database, going via a temporary memory copy."""
    oTempConn = connectionForURI("sqlite:///:memory:")
    oLogger = Logger('attempt upgrade')
    if oLogHandler:
        oLogger.addHandler(oLogHandler)
    (bOK, aMessages) = create_memory_copy(oTempConn, oLogHandler)
    if bOK:
        oLogger.info("Copied database to memory, performing upgrade.")
        if len(aMessages) > 0:
            oLogger.info("Messages reported: %s", aMessages)
        (bOK, aMessages) = create_final_copy(oTempConn, oLogHandler)
        if bOK:
            oLogger.info("Everything seems to have gone OK")
            if len(aMessages) > 0:
                oLogger.info("Messages reported %s", aMessages)
            return True
        else:
            oLogger.critical("Unable to perform upgrade.")
            if len(aMessages) > 0:
                oLogger.error("Errors reported: %s", aMessages)
            oLogger.critical("!!YOUR DATABASE MAY BE CORRUPTED!!")
    else:
        oLogger.error("Unable to create memory copy. Database not upgraded.")
        if len(aMessages) > 0:
            oLogger.error("Errors reported %s", aMessages)
    return False
예제 #28
0
def configure_logger_for_colour(logger: logging.Logger,
                                level: int = logging.INFO,
                                remove_existing: bool = False,
                                extranames: List[str] = None,
                                with_process_id: bool = False,
                                with_thread_id: bool = False) -> None:
    """
    Applies a preconfigured datetime/colour scheme to a logger.

    Should ONLY be called from the ``if __name__ == 'main'`` script;
    see https://docs.python.org/3.4/howto/logging.html#library-config.

    Args:
        logger: logger to modify
        level: log level to set
        remove_existing: remove existing handlers from logger first?
        extranames: additional names to append to the logger's name
        with_process_id: include the process ID in the logger's name?
        with_thread_id: include the thread ID in the logger's name?
    """
    if remove_existing:
        logger.handlers = []  # http://stackoverflow.com/questions/7484454
    handler = get_colour_handler(extranames,
                                 with_process_id=with_process_id,
                                 with_thread_id=with_thread_id)
    handler.setLevel(level)
    logger.addHandler(handler)
    logger.setLevel(level)
예제 #29
0
    def __init__(
        self,
        logger: logging.Logger = ContextLogger('jeffy'),    # type: ignore
        handlers: List[logging.Handler] = [logging.StreamHandler()],
        log_level: int = logging.INFO,
        correlation_attr_name: str = 'correlation_id'
    ):
        """
        Create new logging setting.

        Parameters
        ----------
        logger: logging.Logger
            Logger
        handlers: List[logging.Handler]
            Logging handlers
        log_level: int = logging.INFO
            Log level
        correlation_attr_name: str = 'correlation_id'
            The attribute name of log records for correlation
        """
        f = JsonFormatter()
        for h in handlers:
            h.setFormatter(f)
            logger.addHandler(h)
        logger.setLevel(log_level)
        self.logger = logger
        self.correlation_attr_name = correlation_attr_name
예제 #30
0
def _add_handler(logger: logging.Logger,
                 log_directory: Optional[str] = None,
                 **kwargs) -> None:
    """Adds a handler to a logger, either a logging.StreamHandler if log_directory is None
    otherwise a logging.FileHandler piped to the directory specified.

    Args:
        logger: the logging.Logger class to which you would like to add a handler
        log_directory: Optional log directory to pass. If not None a FileHandler is added,
            otherwise a StreamHandler
        **kwargs: Keyword arguments for the _get_log_filename
     """
    if log_directory is None:
        handler = logging.StreamHandler()
    else:
        if not os.path.exists(log_directory):
            os.makedirs(log_directory)

        handler = logging.FileHandler(
            os.path.join(log_directory, _get_log_filename(**kwargs)))

    # for some reason you need to set both of these - setting to same value to avoid confusion
    logger.setLevel(logging.INFO)
    handler.setLevel(logging.INFO)
    formatter = logging.Formatter(
        "%(levelname)s <%(thread)d> [%(asctime)s] %(name)s <%(filename)s:%(lineno)d> %(message)s"
    )
    handler.setFormatter(formatter)
    logger.addHandler(handler)
 def attempt_database_upgrade(self, oLogHandler=None):
     """Attempt to upgrade the database,
        going via a temporary memory copy."""
     oTempConn = connectionForURI("sqlite:///:memory:")
     oLogger = Logger('attempt upgrade')
     if oLogHandler:
         oLogger.addHandler(oLogHandler)
     (bOK, aMessages) = self.create_memory_copy(oTempConn, oLogHandler)
     if bOK:
         oLogger.info("Copied database to memory, performing upgrade.")
         if aMessages:
             oLogger.info("Messages reported: %s", aMessages)
         (bOK, aMessages) = self.create_final_copy(oTempConn, oLogHandler)
         if bOK:
             oLogger.info("Everything seems to have gone OK")
             if aMessages:
                 oLogger.info("Messages reported %s", aMessages)
             return True
         oLogger.critical("Unable to perform upgrade.")
         if aMessages:
             oLogger.error("Errors reported: %s", aMessages)
         oLogger.critical("!!YOUR DATABASE MAY BE CORRUPTED!!")
     else:
         oLogger.error(
             "Unable to create memory copy. Database not upgraded.")
         if aMessages:
             oLogger.error("Errors reported %s", aMessages)
     return False
예제 #32
0
def setup_logger(logger: logging.Logger, debug_mode, log_file):
    # logger = logging.getLogger(__package__)
    logger.setLevel(logging.DEBUG)

    debug_formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    std_handler = logging.StreamHandler(sys.stdout)
    if log_file == True:
        file_handler = logging.FileHandler(filename=log_file)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(debug_formatter)
        logger.addHandler(file_handler)

    if debug_mode:
        print("Debug mode")
        std_handler.setLevel(logging.DEBUG)
        std_handler.setFormatter(debug_formatter)
    else:
        std_handler.setLevel(logging.INFO)
        logger.setLevel(logging.INFO)

    # std_handler.setFormatter(debug_formatter)

    logger.addHandler(std_handler)
예제 #33
0
    def configure(self, logger: logging.Logger, verbosity: int = 0) -> int:
        """
        Add all configured handlers to the supplied logger. If verbosity > 0 then make sure we have a console logger
        and force the level of the console logger based on the verbosity.

        :param logger: The logger to add the handlers to
        :param verbosity: The verbosity level given as command line argument
        :return: The lowest log level that is going to be handled
        """
        # Remove any previously configured loggers, in case we are re-configuring
        # We are deleting, so copy the list first
        for handler in list(logger.handlers):
            logger.removeHandler(handler)

        # Add the handlers, keeping track of console loggers and saving the one with the "best" level.
        console = None
        for handler_factory in self.handlers:
            handler = handler_factory()
            logger.addHandler(handler)

            if isinstance(handler_factory, ConsoleHandlerFactory):
                console = handler

        # Set according to verbosity
        set_verbosity_logger(logger, verbosity, console)

        # Find the lowest log level
        lowest_level = logging.CRITICAL
        for handler in logger.handlers:
            if handler.level < lowest_level:
                lowest_level = handler.level

        # Return the lowest log level we want, so that we can filter lower priority messages earlier (where appropriate)
        return lowest_level
예제 #34
0
def setup_formatter(logger: logging.Logger) -> None:
    """Set up the console formatter for a given logger."""

    # Deregister any previous console loggers.
    if hasattr(logger, "streamlit_console_handler"):
        logger.removeHandler(
            logger.streamlit_console_handler)  # type: ignore[attr-defined]

    logger.streamlit_console_handler = logging.StreamHandler(
    )  # type: ignore[attr-defined]

    if config._config_options:
        # logger is required in ConfigOption.set_value
        # Getting the config option before the config file has been parsed
        # can create an infinite loop
        message_format = config.get_option("logger.messageFormat")
    else:
        message_format = DEFAULT_LOG_MESSAGE
    formatter = logging.Formatter(fmt=message_format)
    formatter.default_msec_format = "%s.%03d"
    logger.streamlit_console_handler.setFormatter(
        formatter)  # type: ignore[attr-defined]

    # Register the new console logger.
    logger.addHandler(
        logger.streamlit_console_handler)  # type: ignore[attr-defined]
예제 #35
0
def main():
    if len(sys.argv) == 1:
        print('ERROR: use: {} <file-to-add-1[ file-to-add-2[ ...]]>'\
              .format(sys.argv[0]))
        exit(1)

    logger = Logger('Manager')
    handler = StreamHandler(sys.stdout)
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - '
                          '%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    logger.info('Connecting to Manager...')
    client = ManagerClient()
    client.connect(('localhost', 5555), ('localhost', 5556))

    logger.info('Getting configuration and connecting to MongoDB...')
    client.send_api_request({'command': 'get configuration'})
    conf = client.get_api_reply()
    connection = Connection(host=conf['db']['host'], port=conf['db']['port'])
    database = connection[conf['db']['database']]
    gridfs = GridFS(database, collection=conf['db']['gridfs_collection'])

    logger.info('Inserting files...')
    document_ids = []
    for filename in sys.argv[1:]:
        logger.info('  {}'.format(filename))
        fp = open(filename, 'r')
        contents = fp.read()
        fp.close()
        document_ids.append(gridfs.put(contents, filename=filename))

    logger.info('Creating pipelines...')
    pipeline_ids = []
    for index, document_id in enumerate(document_ids):
        pipeline = {'id': str(index), '_id': str(document_id)}
        client.send_api_request({'command': 'add pipeline', 'data': pipeline})
        logger.info('Sent pipeline: {}'.format(pipeline))
        reply = client.get_api_reply()
        logger.info('Received reply: {}'.format(reply))
        subscribe_message = 'pipeline finished: {}'.format(reply['pipeline id'])
        client.broadcast_subscribe(subscribe_message)
        pipeline_ids.append(reply['pipeline id'])

    try:
        while True:
            message = client.broadcast_receive()
            logger.info('Received from manager broadcast: {}'.format(message))
            if message.startswith('pipeline finished: '):
                pipeline_id = message.split(': ')[1]
                if pipeline_id in pipeline_ids:
                    pipeline_ids.remove(pipeline_id)
                if not pipeline_ids:
                    break
    except KeyboardInterrupt:
        client.close_sockets()
예제 #36
0
def copy_database(oOrigConn, oDestConnn, oLogHandler=None):
    """Copy the database, with no attempts to upgrade.

       This is a straight copy, with no provision for funky stuff
       Compatability of database structures is assumed, but not checked.
       """
    # Not checking versions probably should be fixed
    # Copy tables needed before we can copy AbstractCard
    flush_cache()
    oVer = DatabaseVersion()
    oVer.expire_cache()
    oLogger = Logger('copy DB')
    if oLogHandler:
        oLogger.addHandler(oLogHandler)
        if hasattr(oLogHandler, 'set_total'):
            iTotal = 14 + AbstractCard.select(connection=oOrigConn).count() + \
                    PhysicalCard.select(connection=oOrigConn).count() + \
                    PhysicalCardSet.select(connection=oOrigConn).count()
            oLogHandler.set_total(iTotal)
    bRes = True
    aMessages = []
    oTrans = oDestConnn.transaction()
    aToCopy = [
            (copy_rarity, 'Rarity table', False),
            (copy_expansion, 'Expansion table', False),
            (copy_discipline, 'Discipline table', False),
            (copy_clan, 'Clan table', False),
            (copy_creed, 'Creed table', False),
            (copy_virtue, 'Virtue table', False),
            (copy_card_type, 'CardType table', False),
            (copy_ruling, 'Ruling table', False),
            (copy_discipline_pair, 'DisciplinePair table', False),
            (copy_rarity_pair, 'RarityPair table', False),
            (copy_sect, 'Sect table', False),
            (copy_title, 'Title table', False),
            (copy_artist, 'Artist table', False),
            (copy_keyword, 'Keyword table', False),
            (copy_abstract_card, 'AbstractCard table', True),
            (copy_physical_card, 'PhysicalCard table', True),
            (copy_physical_card_set, 'PhysicalCardSet table', True),
            ]
    for fCopy, sName, bPassLogger in aToCopy:
        try:
            if bRes:
                if bPassLogger:
                    fCopy(oOrigConn, oTrans, oLogger)
                else:
                    fCopy(oOrigConn, oTrans)
        except SQLObjectNotFound, oExp:
            bRes = False
            aMessages.append('Unable to copy %s: Aborting with error: %s'
                    % (sName, oExp))
        else:
            oTrans.commit()
            oTrans.cache.clear()
            if not bPassLogger:
                oLogger.info('%s copied' % sName)
예제 #37
0
def log_to_file(logger: Logger,
                filename: str,
                log_format: str="%(asctime)s %(levelname)-8s %(message)s",
                ) -> None:
    """Note: `filename` should be declared in zproject/settings.py in ZULIP_PATHS."""
    formatter = logging.Formatter(log_format)
    handler = logging.FileHandler(filename)
    handler.setFormatter(formatter)
    logger.addHandler(handler)
예제 #38
0
def write_combos():
    logger = Logger('name',20)
    handler = FileHandler('flog.log')
    logger.addHandler(handler)
    with open('namelist.txt','a') as fileobject:
        llist = ("{} {}".format(x,y) for x in names(0, 'names.txt') for y in names(1, 'names.txt'))
        for name in llist:
            if len(name) > 17:
                logger.info('{} is {} characters long'.format(name, len(name)))
            fileobject.write('{}\n'.format(name))
예제 #39
0
def main():
    logger = Logger('Pipeliner')
    handler = StreamHandler(stdout)
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - '
                          '%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    pipeliner = Pipeliner(api='tcp://localhost:5555',
                          broadcast='tcp://localhost:5556', logger=logger)
    pipeliner.start()
def configure_logger_for_colour(log: logging.Logger,
                                remove_existing: bool = True) -> None:
    """
    Applies a preconfigured datetime/colour scheme to a logger.
    Should ONLY be called from the "if __name__ == 'main'" script:
        https://docs.python.org/3.4/howto/logging.html#library-config
    """
    if remove_existing:
        log.handlers = []  # http://stackoverflow.com/questions/7484454
    log.addHandler(COLOUR_HANDLER)
예제 #41
0
파일: my_router.py 프로젝트: NAMD/pypelinin
def main():
    logger = Logger("My Router")
    handler = StreamHandler(stdout)
    formatter = Formatter("%(asctime)s - %(name)s - %(levelname)s - " "%(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    api_host_port = ("*", 5555)
    broadcast_host_port = ("*", 5556)
    default_config = {"store": {"monitoring filename": "/tmp/monitoring.log"}, "monitoring interval": 60}
    router = Router(api_host_port, broadcast_host_port, default_config, logger)
    router.start()
예제 #42
0
def create_logger(name):
    logger = Logger(name)
    ch = StreamHandler()
    ch.setLevel(DEBUG)
    # create formatter and add it to the handlers
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(ch)

    return logger
예제 #43
0
파일: my_router.py 프로젝트: NAMD/pypelinin
def main():
    logger = Logger('Test Router')
    handler = StreamHandler(stdout)
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - '
                          '%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    api_host_port = ('*', 5555)
    broadcast_host_port = ('*', 5556)
    default_config = {'store': {'data': 'test'}, 'monitoring interval': 60, }
    router = Router(api_host_port, broadcast_host_port, default_config, logger)
    router.start()
예제 #44
0
파일: my_router.py 프로젝트: NAMD/pypelinin
def main():
    logger = Logger('My Router')
    handler = StreamHandler(stdout)
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - '
                          '%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    api_host_port = ('*', 12345)
    broadcast_host_port = ('*', 12346)
    default_config = {'store': {'monitoring filename': '/tmp/monitoring.log'},
                      'monitoring interval': 60, }
    router = Router(api_host_port, broadcast_host_port, default_config, logger)
    router.start()
예제 #45
0
def configure_logger_for_colour(log: logging.Logger,
                                level: int = logging.INFO,
                                remove_existing: bool = False,
                                extranames: List[str] = None) -> None:
    """
    Applies a preconfigured datetime/colour scheme to a logger.
    Should ONLY be called from the "if __name__ == 'main'" script:
        https://docs.python.org/3.4/howto/logging.html#library-config
    """
    if remove_existing:
        log.handlers = []  # http://stackoverflow.com/questions/7484454
    log.addHandler(get_colour_handler(extranames))
    log.setLevel(level)
예제 #46
0
    def __init__(self,
                 level: int = logging.INFO,
                 window_title: str = "Python log",
                 logger: logging.Logger = None,
                 min_width: int = 800,
                 min_height: int = 400,
                 maximum_block_count: int = 1000) -> None:
        super().__init__()
        self.setStyleSheet(LOGEDIT_STYLESHEET)

        self.handler = HtmlColorHandler(self.log_message, level)
        self.may_close = False
        self.set_may_close(self.may_close)

        self.setWindowTitle(window_title)
        if min_width:
            self.setMinimumWidth(min_width)
        if min_height:
            self.setMinimumHeight(min_height)

        log_group = StyledQGroupBox("Log")
        log_layout_1 = QVBoxLayout()
        log_layout_2 = QHBoxLayout()
        self.log = QPlainTextEdit()
        # QPlainTextEdit better than QTextEdit because it supports
        # maximumBlockCount while still allowing HTML (via appendHtml,
        # not insertHtml).
        self.log.setReadOnly(True)
        self.log.setLineWrapMode(QPlainTextEdit.NoWrap)
        self.log.setMaximumBlockCount(maximum_block_count)
        log_clear_button = QPushButton('Clear log')
        log_clear_button.clicked.connect(self.log.clear)
        log_copy_button = QPushButton('Copy to clipboard')
        log_copy_button.clicked.connect(self.copy_whole_log)
        log_layout_2.addWidget(log_clear_button)
        log_layout_2.addWidget(log_copy_button)
        log_layout_2.addStretch()
        log_layout_1.addWidget(self.log)
        log_layout_1.addLayout(log_layout_2)
        log_group.setLayout(log_layout_1)

        main_widget = QWidget(self)
        self.setCentralWidget(main_widget)
        main_layout = QVBoxLayout(main_widget)
        main_layout.addWidget(log_group)

        self.emit_msg.connect(self.log_internal)

        if logger:
            logger.addHandler(self.get_handler())
예제 #47
0
def fetch_data(oFile, oOutFile=None, sHash=None, oLogHandler=None,
        fErrorHandler=None):
    """Fetch data from a file'ish object (WwFile, urlopen or file)"""
    try:
        if hasattr(oFile, 'info') and callable(oFile.info):
            sLength = oFile.info().getheader('Content-Length')
        else:
            sLength = None

        if sLength:
            oLogger = Logger('Sutekh data fetcher')
            if oLogHandler is not None:
                oLogger.addHandler(oLogHandler)
            aData = []
            iLength = int(sLength)
            if hasattr(oLogHandler, 'set_total'):
                # We promote to next integer, as we emit a signal
                # for any left over bits
                oLogHandler.set_total((iLength + 9999) // 10000)
            iTotal = 0
            bCont = True
            while bCont:
                sInf = oFile.read(10000)
                iTotal += len(sInf)
                if sInf:
                    oLogger.info('%d downloaded', iTotal)
                    if oOutFile:
                        oOutFile.write(sInf)
                    else:
                        aData.append(sInf)
                else:
                    bCont = False
            if oOutFile:
                sData = None
            else:
                sData = ''.join(aData)
        else:
            # Just try and download
            if oOutFile:
                oOutFile.write(oFile.read())
                sData = None
            else:
                sData = oFile.read()
    except urllib2.URLError, oExp:
        if fErrorHandler:
            fErrorHandler(oExp)
            sData = None
        else:
            raise
예제 #48
0
def copy_to_new_abstract_card_db(oOrigConn, oNewConn, oCardLookup,
        oLogHandler=None):
    """Copy the card sets to a new Physical Card and Abstract Card List.

      Given an existing database, and a new database created from
      a new cardlist, copy the CardSets, going via CardSetHolders, so we
      can adapt to changed names, etc.
      """
    # pylint: disable-msg=R0914
    # we need a lot of variables here
    aPhysCardSets = []
    oOldConn = sqlhub.processConnection
    sqlhub.processConnection = oOrigConn
    # Copy Physical card sets
    oLogger = Logger('copy to new abstract card DB')
    if oLogHandler:
        oLogger.addHandler(oLogHandler)
        if hasattr(oLogHandler, 'set_total'):
            iTotal = 1 + PhysicalCardSet.select(connection=oOrigConn).count()
            oLogHandler.set_total(iTotal)
    aSets = list(PhysicalCardSet.select(connection=oOrigConn))
    bDone = False
    aDone = []
    # Ensre we only process a set after it's parent
    while not bDone:
        aToDo = []
        for oSet in aSets:
            if oSet.parent is None or oSet.parent in aDone:
                oCS = make_card_set_holder(oSet, oOrigConn)
                aPhysCardSets.append(oCS)
                aDone.append(oSet)
            else:
                aToDo.append(oSet)
        if not aToDo:
            bDone = True
        else:
            aSets = aToDo
    # Save the current mapping
    oLogger.info('Memory copies made')
    # Create the cardsets from the holders
    dLookupCache = {}
    sqlhub.processConnection = oNewConn
    for oSet in aPhysCardSets:
        # create_pcs will manage transactions for us
        oSet.create_pcs(oCardLookup, dLookupCache)
        oLogger.info('Physical Card Set: %s', oSet.name)
        sqlhub.processConnection.cache.clear()
    sqlhub.processConnection = oOldConn
    return (True, [])
예제 #49
0
 def do_read_list(self, oFile, dSelected, iClashMode):
     """Read the selected list of card sets"""
     oLogHandler = SutekhCountLogHandler()
     oProgressDialog = ProgressDialog()
     oProgressDialog.set_description("Importing Files")
     oLogger = Logger('Read zip file')
     oLogger.addHandler(oLogHandler)
     oLogHandler.set_dialog(oProgressDialog)
     oLogHandler.set_total(len(dSelected))
     oProgressDialog.show()
     bDone = False
     while not bDone:
         dSelected = self._read_heart(oFile, dSelected, oLogger, iClashMode)
         bDone = len(dSelected) == 0
     oProgressDialog.destroy()
예제 #50
0
class RulingParser(SutekhBaseHTMLParser):
    """Actual Parser for the WW rulings HTML files."""

    def __init__(self, oLogHandler):
        # super().__init__ calls reset, so we need this first
        self.oLogger = Logger('WW Rulings parser')
        if oLogHandler is not None:
            self.oLogger.addHandler(oLogHandler)
        super(RulingParser, self).__init__()
        # No need to touch self._oState, reset will do that for us

    def reset(self):
        """Reset the parser"""
        super(RulingParser, self).reset()
        self._oState = NoSection(self.oLogger)
예제 #51
0
파일: web.py 프로젝트: wwqgtxx/wwqLyParse
def run_app(app: Union[Application, Awaitable[Application]], *,
            host: Optional[str]=None,
            port: Optional[int]=None,
            path: Optional[str]=None,
            sock: Optional[socket.socket]=None,
            shutdown_timeout: float=60.0,
            ssl_context: Optional[SSLContext]=None,
            print: Callable[..., None]=print,
            backlog: int=128,
            access_log_class: Type[AbstractAccessLogger]=AccessLogger,
            access_log_format: str=AccessLogger.LOG_FORMAT,
            access_log: logging.Logger=access_logger,
            handle_signals: bool=True,
            reuse_address: Optional[bool]=None,
            reuse_port: Optional[bool]=None) -> None:
    """Run an app locally"""
    loop = asyncio.get_event_loop()

    # Configure if and only if in debugging mode and using the default logger
    if loop.get_debug() and access_log.name == 'aiohttp.access':
        if access_log.level == logging.NOTSET:
            access_log.setLevel(logging.DEBUG)
        if not access_log.hasHandlers():
            access_log.addHandler(logging.StreamHandler())

    try:
        loop.run_until_complete(_run_app(app,
                                         host=host,
                                         port=port,
                                         path=path,
                                         sock=sock,
                                         shutdown_timeout=shutdown_timeout,
                                         ssl_context=ssl_context,
                                         print=print,
                                         backlog=backlog,
                                         access_log_class=access_log_class,
                                         access_log_format=access_log_format,
                                         access_log=access_log,
                                         handle_signals=handle_signals,
                                         reuse_address=reuse_address,
                                         reuse_port=reuse_port))
    except (GracefulExit, KeyboardInterrupt):  # pragma: no cover
        pass
    finally:
        _cancel_all_tasks(loop)
        if sys.version_info >= (3, 6):  # don't use PY_36 to pass mypy
            loop.run_until_complete(loop.shutdown_asyncgens())
        loop.close()
예제 #52
0
 def do_dump_list_to_zip(self, aCSList, oLogHandler=None):
     """Handle dumping a list of cards to the zip file with log fiddling"""
     self.__open_zip_for_write()
     oLogger = Logger('Write zip file')
     if oLogHandler is not None:
         oLogger.addHandler(oLogHandler)
         if hasattr(oLogHandler, 'set_total'):
             if hasattr(aCSList, 'count'):
                 # Handle case we have a select result list
                 iTotal = aCSList.count()
                 oLogHandler.set_total(iTotal)
             else:
                 oLogHandler.set_total(len(aCSList))
     aPCSList = self.write_pcs_list_to_zip(aCSList, oLogger)
     self.__close_zip()
     return aPCSList
예제 #53
0
파일: log.py 프로젝트: bdarnell/tornado
def enable_pretty_logging(options: Any = None, logger: logging.Logger = None) -> None:
    """Turns on formatted logging output as configured.

    This is called automatically by `tornado.options.parse_command_line`
    and `tornado.options.parse_config_file`.
    """
    if options is None:
        import tornado.options

        options = tornado.options.options
    if options.logging is None or options.logging.lower() == "none":
        return
    if logger is None:
        logger = logging.getLogger()
    logger.setLevel(getattr(logging, options.logging.upper()))
    if options.log_file_prefix:
        rotate_mode = options.log_rotate_mode
        if rotate_mode == "size":
            channel = logging.handlers.RotatingFileHandler(
                filename=options.log_file_prefix,
                maxBytes=options.log_file_max_size,
                backupCount=options.log_file_num_backups,
                encoding="utf-8",
            )  # type: logging.Handler
        elif rotate_mode == "time":
            channel = logging.handlers.TimedRotatingFileHandler(
                filename=options.log_file_prefix,
                when=options.log_rotate_when,
                interval=options.log_rotate_interval,
                backupCount=options.log_file_num_backups,
                encoding="utf-8",
            )
        else:
            error_message = (
                "The value of log_rotate_mode option should be "
                + '"size" or "time", not "%s".' % rotate_mode
            )
            raise ValueError(error_message)
        channel.setFormatter(LogFormatter(color=False))
        logger.addHandler(channel)

    if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers):
        # Set up color if we are in a tty and curses is installed
        channel = logging.StreamHandler()
        channel.setFormatter(LogFormatter())
        logger.addHandler(channel)
예제 #54
0
class logWriter(object):
    def __init__(self, logFacility = SysLogHandler.LOG_LOCAL0):
        format = Formatter("%(levelname)-12s %(asctime)s %(process)d %(message)s")
                 
        self.sLogger = Logger(SysLogHandler.LOG_DEBUG)
        #self.sLogger.setLevel()

        self.sysHandler = SysLogHandler(address = '/dev/log', facility = logFacility)
        self.sysHandler.setFormatter(format)
        self.sLogger.addHandler(self.sysHandler)
    
    def __del__(self):
        self.sysHandler.flush()
        self.sysHandler.close()
        
    def sendMsg(self, level, msg):
        self.sLogger.log(level,logLevelStr[level] + msg)
예제 #55
0
파일: my_broker.py 프로젝트: NAMD/pypelinin
def main():
    logger = Logger('Broker')
    handler = StreamHandler(stdout)
    formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - '
                          '%(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    broker = Broker(api='tcp://localhost:12345',       # router API
                    broadcast='tcp://localhost:12346', # router Broadcast
                    # class that will be called to retrieve/store information
                    # to pass to/to save from worker
                    store_class=NullStore,
                    logger=logger,
                    # name of the module that contain workers
                    workers='workers',
                    # each core will run 4 workers
                    number_of_workers=cpu_count() * 4)
    broker.start()
예제 #56
0
def get_debug_logger(name, strm=None):
    """Creates a basic debug log function with prettyprint capabilities.

    A basic logger is created.
    The logger's ``debug`` method is returned.
    The logger itself is returned as ``return.logger``.
    The handler is returned as ``return.handler``.
    A pretty-printing version of the log function is returned as ``return.pp``.

    >>> from sys import stdout
    >>> debug = get_debug_logger('boogie', strm=stdout)
    >>> debug('Git yer gittin it on on and boogie!')
    Git yer gittin it on on and boogie!
    >>> debug.pp(debug.__dict__)  # doctest: +ELLIPSIS
    { 'handler': <logging.StreamHandler object at 0x...>,
      'logger': <logging.Logger object at 0x...>,
      'pp': <function <lambda> at 0x...>}

    Subsequent loggers do not issue duplicate output.
    >>> debug_two = get_debug_logger('boogie', strm=stdout)
    >>> debug('Hit me one time!  OW!')
    Hit me one time!  OW!

    How does that work?
    >>> debug.logger is debug_two.logger
    False

    So logging.Logger(name) doesn't always return the same object.
    """
    from logging import Logger, StreamHandler, DEBUG
    logger = Logger(name)
    debug = lambda *args, **kwargs: logger.debug(*args, **kwargs)
    debug.logger = logger

    handler = StreamHandler(stream=strm)
    logger.addHandler(handler)
    debug.handler = handler

    from pprint import PrettyPrinter
    pformat = PrettyPrinter(indent=2).pformat
    debug.pp = lambda *args, **kwargs: debug(pformat(*args, **kwargs))
    return debug
예제 #57
0
 def do_restore_from_zip(self, oCardLookup=DEFAULT_LOOKUP,
         oLogHandler=None):
     """Recover data from the zip file"""
     self._aWarnings = []
     bTablesRefreshed = False
     bOldStyle = False
     self.__open_zip_for_read()
     oLogger = Logger('Restore zip file')
     if oLogHandler is not None:
         oLogger.addHandler(oLogHandler)
         if hasattr(oLogHandler, 'set_total'):
             oLogHandler.set_total(len(self.oZip.infolist()))
     # We do this so we can accomodate user created zipfiles,
     # that don't nessecarily have the ordering we want
     oIdParser = IdentifyXMLFile()
     # check that the zip file contains at least 1 PCS or the old
     # PhysicalCard list
     for oItem in self.oZip.infolist():
         oData = self.oZip.read(oItem.filename)
         _parse_string(oIdParser, oData, None)
         if ((oIdParser.type == 'PhysicalCard' or
                 oIdParser.type == 'PhysicalCardSet') and not
                 bTablesRefreshed):
             # We delete the Physical Card Sets
             # Since this is restoring the contents of a zip file,
             # hopefully this is safe to do
             # if we fail, the database will be in an inconsitent state,
             # but that's going to be true anyway
             refresh_tables(PHYSICAL_SET_LIST, sqlhub.processConnection)
             bTablesRefreshed = True
         if oIdParser.type == 'PhysicalCard':
             bOldStyle = True
     if not bTablesRefreshed:
         raise IOError("No valid card sets found in the zip file.")
     # We try and restore the old PCS's ensuring parents exist
     dLookupCache = {}
     aToRead = self.oZip.infolist()
     while len(aToRead) > 0:
         aToRead = self.read_items(aToRead, oCardLookup, oLogger, bOldStyle,
                 dLookupCache)
     self.__close_zip()
예제 #58
0
    def __init__(self):
        """
            初始化日志工具
        """

        # 定义日志文件名称
        filename = logfile_path + datetime.now().strftime('%Y-%m-%d') + ".log"
        # 定义日志显示格式
        formatter = Formatter("%(asctime)s  - %(levelname)s - %(message)s")
        # 定义显示类型
        filehandler = FileHandler(filename, mode='a', encoding='utf-8', delay=True)
        # 定义日志显示格式
        filehandler.setFormatter(formatter)
        # 创建日志类
        logg = Logger("logger")
        # 设置日志显示级别
        logg.setLevel(INFO)
        # 增加文件句柄
        logg.addHandler(filehandler)
        # 定义日志句柄
        self.__loghandle = logg
예제 #59
0
 def _unzip_heart(self, oFile, bExcludeStoryDecks, bExcludeDemoDecks):
     """Heart of the reading loop - ensure we read parents before
        children, and correct for renames that occur."""
     oLogHandler = SutekhCountLogHandler()
     oProgressDialog = ProgressDialog()
     oProgressDialog.set_description("Importing Starters")
     oLogger = Logger('Read zip file')
     aExistingList = [x.name for x in PhysicalCardSet.select()]
     dList = oFile.get_all_entries()
     # Check that we match starter regex
     bOK = False
     for sName in dList:
         oMatch = self.oStarterRegex.match(sName)
         if oMatch:
             bOK = True
             break
     if not bOK:
         oProgressDialog.destroy()
         return False  # No starters in zip file
     oLogger.addHandler(oLogHandler)
     oLogHandler.set_dialog(oProgressDialog)
     oLogHandler.set_total(len(dList))
     oProgressDialog.show()
     bDone = False
     while not bDone:
         dRemaining = {}
         if self._unzip_list(oFile, dList, oLogger, dRemaining,
                 bExcludeStoryDecks, bExcludeDemoDecks):
             bDone = len(dRemaining) == 0
             dList = dRemaining
         else:
             self.reload_pcs_list()
             oProgressDialog.destroy()
             return False  # Error
     # Cleanup
     self._clean_empty(oFile.get_all_entries(), aExistingList)
     self.reload_pcs_list()
     oProgressDialog.destroy()
     return True