Exemplo n.º 1
0
def logger_init(dirpath=None):
    # Adapted from http://stackoverflow.com/a/34964369/164864
    logging_queue = multiprocessing.Queue()
    # this is the handler for all log records
    filepath = "{}-{}.log".format(
        'pandarus-worker',
        datetime.datetime.now().strftime("%d-%B-%Y-%I-%M%p"))
    if dirpath is not None:
        filepath = os.path.join(dirpath, filepath)
    handler = logging.FileHandler(
        filepath,
        encoding='utf-8',
    )
    handler.setFormatter(
        logging.Formatter("%(asctime)s %(levelname)s %(lineno)d %(message)s"))

    # queue_listener gets records from the queue and sends them to the handler
    queue_listener = QueueListener(logging_queue, handler)
    queue_listener.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    logger.addHandler(handler)

    return queue_listener, logging_queue
def logger_init(file_location="multi.log"):
    q = multiprocessing.Queue()
    # this is the handler for all log records
    stream_handler = logging.StreamHandler()
    formatter = logging.Formatter(
        "%(levelname)s: %(asctime)s - %(process)s - [%(filename)s:%(lineno)s] - %(message)s"
    )
    stream_handler.setFormatter(formatter)
    file_handler = logging.FileHandler(file_location, encoding="utf8")
    file_handler.setFormatter(formatter)

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q,
                       stream_handler,
                       file_handler,
                       respect_handler_level=True)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(stream_handler)
    logger.addHandler(file_handler)

    return ql, q
Exemplo n.º 3
0
class QueueLogger:
    def __init__(self):
        self.structlog_q = None
        self.structlog_q_handler = None
        self.structlog_listener = None
        self.initialized = False

    def initialize_q(self):
        self.structlog_q = multiprocessing.Queue(-1)
        self.structlog_q_handler = QueueHandler(self.structlog_q)
        self.initialized = True

    def format_logger(self, logger):
        logger.addHandler(self.structlog_q_handler)

    def configure_listener(self, handlers):
        self.structlog_listener = QueueListener(self.structlog_q, *handlers, respect_handler_level=True)

    def setup_queue_logging(self, logger, handlers):
        self.initialize_q()
        self.format_logger(logger)
        self.configure_listener(handlers)

    def start(self):
        self.structlog_listener.start()

    def stop(self):
        if self.initialized:
            self.structlog_listener.stop()
Exemplo n.º 4
0
def _setup_logging():
    global _logger
    global _queue_listener

    logging_cfg = _get_logging_settings()
    log_queue = Queue(-1)

    _logger = logging.getLogger('sento-crawler')

    _logger.setLevel(logging_cfg.get('level'))

    logger_formatter = logging.Formatter(LOG_FORMAT)
    logger_formatter.converter = time.gmtime
    out_handler = None  # type: logging.Handler

    if logging_cfg.get('output') == VALID_OUTPUTS[0]:
        out_handler = logging.StreamHandler()
    else:
        logs_path = Path('./logs')
        logs_path.mkdir(exist_ok=True)

        out_handler = TimedRotatingFileHandler(
            filename='logs/sento_crawler.log', when='midnight', utc=True)

    out_handler.setLevel(logging.INFO)
    out_handler.setFormatter(logger_formatter)

    logger_handler = QueueHandler(log_queue)
    _queue_listener = QueueListener(log_queue, out_handler)

    _logger.addHandler(logger_handler)

    # The queue listener must be stopped when execution finishes
    # This line spawns a listener in another thread!
    _queue_listener.start()
Exemplo n.º 5
0
def processes():
    ctx = mp.get_context("spawn")
    pipes = [mp.Pipe(duplex=True) for _ in [0, 1, 2]]
    man = ctx.Manager()
    queue = man.Queue()
    processes = [Process(target=process_main,
                         args=(pipes[i][0], queue), ctx=ctx)
                 for i in [0, 1, 2]]

    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter(
        "[%(asctime)s] <%(levelname)s>:%(name)s:%(message)s"))

    ql = QueueListener(queue, handler)
    ql.start()
    default_logger.addHandler(handler)

    for p, i in zip(processes, [0, 1, 2]):
        default_logger.info("processes {} started".format(i))
        p.start()
    yield processes, [pi[1] for pi in pipes]
    for p, pi, i in zip(processes, pipes, [0, 1, 2]):
        # try graceful shutdown first
        pi[1].send(dill.dumps((exit, 0, {})))
        p.join(timeout=1)
        if p.is_alive():
            # ungraceful shutdown
            default_logger.info("processes {} ungraceful shutdown".format(i))
            p.terminate()
            p.join()
    default_logger.removeHandler(handler)
    ql.stop()
    man.shutdown()
    man.join()
    default_logger.info("processes stopped")
Exemplo n.º 6
0
class CentralizedLogHandler(QueueHandler):
    """
    A queue handler to centralize multiple worker logging.

    https://docs.python.org/3/library/logging.handlers.html#queuehandler
    """
    __slots__ = "queue", "_listener"

    def __init__(self,
                 handlers: List[logging.Handler],
                 queue: Any = None,
                 respect_handler_level: bool = True):
        """
        Initialize queued log handler.

        :param list of logging.Handler handlers: Logging handlers
        :param queue.Queue queue: transport queue
        :param bool respect_handler_level: respect handler levels
        """
        self.queue = queue or Queue()
        self._listener = QueueListener(
            self.queue, *handlers, respect_handler_level=respect_handler_level)
        self._listener.start()
        atexit.register(lambda: self._listener.stop)
        super().__init__(self.queue)
Exemplo n.º 7
0
    def test_on_node_error_hook_is_called_with_parallel_runner(
            self, tmp_path, mocker, logging_hooks):
        log_records = []

        class LogHandler(logging.Handler):  # pylint: disable=abstract-method
            def handle(self, record):
                log_records.append(record)

        broken_context_with_hooks = _create_broken_context_with_hooks(
            tmp_path, mocker, logging_hooks)
        mocker.patch(
            "kedro.framework.context.context.load_context",
            return_value=broken_context_with_hooks,
        )
        logs_queue_listener = QueueListener(logging_hooks.queue, LogHandler())
        logs_queue_listener.start()

        with pytest.raises(ValueError, match="broken"):
            broken_context_with_hooks.run(runner=ParallelRunner(max_workers=2),
                                          node_names=["node1", "node2"])
        logs_queue_listener.stop()

        on_node_error_records = [
            r for r in log_records if r.funcName == "on_node_error"
        ]
        assert len(on_node_error_records) == 2

        for call_record in on_node_error_records:
            self._assert_hook_call_record_has_expected_parameters(
                call_record,
                ["error", "node", "catalog", "inputs", "is_async", "run_id"],
            )
            expected_error = ValueError("broken")
            assert_exceptions_equal(call_record.error, expected_error)
Exemplo n.º 8
0
    def default(cls) -> logging.Logger:
        """Defines non-blocking application logger.
        Inspiration: https://www.zopatista.com/python/2019/05/11/asyncio-logging/

        Returns:
            logging.Logger: Root logger
        """
        # get root logger
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)

        formatter = logging.Formatter(
            fmt="%(asctime)s | %(levelname)8s | %(message)60s | %(filename)s:%(lineno)d at %(name)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        )

        stdout_handler = logging.StreamHandler(stream=sys.stdout)
        stdout_handler.setLevel(logging.DEBUG)
        stdout_handler.setFormatter(formatter)

        queue = SimpleQueue()
        queue_handler = QueueHandler(queue)
        logger.addHandler(queue_handler)

        listener = QueueListener(queue, *[stdout_handler], respect_handler_level=True)

        listener.start()

        return logger
Exemplo n.º 9
0
def configure_log_listener(console: bool = True,
                           log_path: str = "main.log") -> QueueListener:
    """
    Configure log queue listener to log into file and console.
    Args:
        console (bool): whether to log on console
        log_path (str): path of log file
    Returns:
        log_qlistener (logging.handlers.QueueListener): configured log queue listener
    """
    global log_qlistener
    try:
        atexit.unregister(log_qlistener.stop)
        log_qlistener.stop()
    except (AttributeError, NameError):
        pass

    handlers: List[logging.Handler] = []

    # rotating file handler
    if log_path:
        file_handler = _get_file_handler(log_path)
        handlers.append(file_handler)

    # console handler
    if console:
        stdout_handler = _get_stdout_handler()
        handlers.append(stdout_handler)

    log_qlistener = QueueListener(log_queue,
                                  *handlers,
                                  respect_handler_level=True)
    log_qlistener.start()
    atexit.register(log_qlistener.stop)
    return log_qlistener
Exemplo n.º 10
0
    def test_before_and_after_node_run_hooks_are_called_with_parallel_runner(
            self, context_with_hooks, dummy_dataframe, logs_queue):
        log_records = []

        class LogHandler(logging.Handler):  # pylint: disable=abstract-method
            def handle(self, record):
                log_records.append(record)

        logs_queue_listener = QueueListener(logs_queue, LogHandler())
        logs_queue_listener.start()
        context_with_hooks.catalog.save("cars", dummy_dataframe)
        context_with_hooks.catalog.save("boats", dummy_dataframe)
        context_with_hooks.run(runner=ParallelRunner(),
                               node_names=["node1", "node2"])
        logs_queue_listener.stop()

        before_node_run_log_records = [
            r for r in log_records if r.funcName == "before_node_run"
        ]
        assert len(before_node_run_log_records) == 2
        for record in before_node_run_log_records:
            assert record.getMessage() == "About to run node"
            assert record.node.name in ["node1", "node2"]
            assert set(record.inputs.keys()) <= {"cars", "boats"}

        after_node_run_log_records = [
            r for r in log_records if r.funcName == "after_node_run"
        ]
        assert len(after_node_run_log_records) == 2
        for record in after_node_run_log_records:
            assert record.getMessage() == "Ran node"
            assert record.node.name in ["node1", "node2"]
            assert set(record.outputs.keys()) <= {"planes", "ships"}
Exemplo n.º 11
0
def start_logger():
    queue = SimpleQueue()

    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        )

    logger = logging.getLogger("cranehook")
    logger.setLevel(settings.LOG_LEVEL)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    stream_handler.setLevel(logging.DEBUG)

    discord_handler = DiscordHandler(settings.DISCORD_WEBHOOK_URL, "cranehook")
    discord_handler.setFormatter(formatter)
    discord_handler.setLevel(logging.INFO)
    queue_listner = QueueListener(
        queue, discord_handler, stream_handler, respect_handler_level=True
    )

    queue_handler = QueueHandler(queue)
    logger.addHandler(queue_handler)

    queue_listner.start()
Exemplo n.º 12
0
    def __init__(self, url, level=NOTSET):
        self._log_queue = queue.Queue(-1)
        super().__init__(self._log_queue)

        teams_handler = TeamsHandler(url, level)
        teams_log_listener = QueueListener(self._log_queue, teams_handler)
        teams_log_listener.start()
Exemplo n.º 13
0
class sfLogger:
    def __init__(self, logger_name):
        self.format = logging.Formatter("%(message)s")
        self.log_queue = queue.Queue()
        self.queue_handler = QueueHandler(self.log_queue)
        self.queue_handler.setFormatter(self.format)
        self.logger = logging.getLogger(logger_name)
        self.logger.addHandler(self.queue_handler)
        self.logger.setLevel(logging.DEBUG)
        self.listener = QueueListener(self.log_queue, self.queue_handler)
        self.isStop = False

    def start(self):
        #print("logger.start()")
        self.listener.start()
        self.isStop = False

    def loggenerator(self):
        #print("logger.loggenerator()")
        while self.isStop == False:
            yield self.log_queue.get().getMessage()

    def stop(self):
        #print("logger.stop()")
        self.listener.stop()
        self.isStop = True
        while self.log_queue.empty() == False:
            self.log_queue.get().getMessage()
Exemplo n.º 14
0
def logger_init(log_fname):
    q = Queue()

    try:
        handler = logging.FileHandler(log_fname)
    except PermissionError as e:
        print("logger_init: Error = ", str(e))
        handler = logging.StreamHandler()
        print("logger_init: StreamHandler selected.")
    except Exception as e:
        print("logger_init: Unexpected Error = ", str(e))
        handler = logging.StreamHandler()

    handler.setFormatter(
        logging.Formatter(
            "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))

    ql = QueueListener(q, handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)

    return ql, q
Exemplo n.º 15
0
class WorkflowTest(unittest.TestCase):
    def setUp(self):

        # communications channels
        parent_workflow_conn, child_workflow_conn = multiprocessing.Pipe()
        running_event = multiprocessing.Event()

        # logging
        log_q = multiprocessing.Queue()

        def handle(record):
            logger = logging.getLogger(record.name)
            if logger.isEnabledFor(record.levelno):
                logger.handle(record)

        handler = CallbackHandler(handle)
        self.queue_listener = QueueListener(log_q, handler)
        self.queue_listener.start()

        remote_process = multiprocessing.Process(
            target=remote_main,
            name="remote process",
            args=[parent_workflow_conn, log_q, running_event])

        remote_process.daemon = True
        remote_process.start()
        running_event.wait()

        self.workflow = LocalWorkflow(child_workflow_conn)
        self.remote_process = remote_process

    def tearDown(self):
        self.workflow.shutdown_remote_process(self.remote_process)
        self.queue_listener.stop()
Exemplo n.º 16
0
    def test_on_node_error_hook_parallel_runner(self, tmp_path, logging_hooks):
        session = KedroSession.create(MOCK_PACKAGE_NAME, tmp_path)
        log_records = []

        class LogHandler(logging.Handler):  # pylint: disable=abstract-method
            def handle(self, record):
                log_records.append(record)

        logs_queue_listener = QueueListener(logging_hooks.queue, LogHandler())
        logs_queue_listener.start()

        with pytest.raises(ValueError, match="broken"):
            try:
                session.run(runner=ParallelRunner(max_workers=2),
                            node_names=["node1", "node2"])
            finally:
                logs_queue_listener.stop()

        on_node_error_records = [
            r for r in log_records if r.funcName == "on_node_error"
        ]
        assert len(on_node_error_records) == 2

        for call_record in on_node_error_records:
            _assert_hook_call_record_has_expected_parameters(
                call_record,
                ["error", "node", "catalog", "inputs", "is_async", "run_id"],
            )
            expected_error = ValueError("broken")
            assert_exceptions_equal(call_record.error, expected_error)
Exemplo n.º 17
0
def config_logging():
    if settings.DINGTALK_WEBHOOK and settings.DINGTALK_SECRET:
        dingtalk_queue = Queue()
        DEFAULT_LOGGING["handlers"]["dingtalk"] = {
            "level": logging.INFO,
            "class": "logging.handlers.QueueHandler",
            "queue": dingtalk_queue,
            "formatter": "simple",
        }
        DEFAULT_LOGGING["loggers"]["notifier"]["handlers"] = [
            "console",
            "file",
            "websocket",
            "dingtalk",
        ]
        dingtalk_handler = DingTalkHandler(
            webhook_url=settings.DINGTALK_WEBHOOK, secret=settings.DINGTALK_SECRET
        )
        dingtalk_listener = QueueListener(dingtalk_queue, dingtalk_handler)
        dingtalk_listener.start()

    Path(LOG_LOCATION).parent.mkdir(parents=True, exist_ok=True)
    logging.config.dictConfig(DEFAULT_LOGGING)
    ws_handler = YuFuRobotStreamWebsocketHandler(ws_uri=settings.WS_ROBOT_STREAM_URI)
    ws_listener = WebsocketListener(ws_queue, ws_handler)
    ws_listener.start()
Exemplo n.º 18
0
def initialize_logging(
        command_name: str,
        log_queue: "Queue[logging.LogRecord]") -> Callable[[], None]:
    """Initialize logging handlers and configuration.

    Args:
        command_name: Name of the command that is being logged.
        log_queue: Logging queue to collect log messages from sub-processes.

    Returns:
        Callback to stop the log queue listener when shutting down the platform.

    """
    _configure_logging(command_name)
    _log_unhandled_exceptions()

    log_listener = QueueListener(log_queue,
                                 *logging.getLogger().handlers,
                                 respect_handler_level=True)
    log_listener.start()

    def cleanup_callback() -> None:
        log_listener.stop()

    logger.debug(
        f"Initialized logging for main process (pid={os.getpid()}, parent={os.getppid()}, platform={platform()})"
    )

    return cleanup_callback
Exemplo n.º 19
0
    def __init__(self, n_cpu, **kwargs):
        self.queue = JoinableQueue()
        self.log_queue = Queue()
        self.n_tasks = Value('i', 0)
        kwargs["n_tasks"] = self.n_tasks

        self.processes = [
            Process(target=self.run_trial, kwargs=kwargs)
            for _ in range(int(n_cpu))
        ]

        self.mh = MinimisationHandler.create(kwargs["mh_dict"])
        for season in self.mh.seasons.keys():
            inj = self.mh.get_injector(season)
            inj.calculate_n_exp()
        self.mh_dict = kwargs["mh_dict"]
        self.scales = []

        handler = logging.StreamHandler()
        handler.setFormatter(
            logging.Formatter(
                "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))
        # ql gets records from the queue and sends them to the handler

        ql = QueueListener(self.log_queue, handler)
        ql.start()

        for p in self.processes:
            p.start()
Exemplo n.º 20
0
def configure_logging(name, level=logging.INFO):
    file_handler = RotatingFileHandler(name,
                                       mode="a+",
                                       maxBytes=48000,
                                       backupCount=1)
    file_handler.setFormatter(
        FileFormatter(
            "%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        ))
    stream_handler = StreamHandler(stream=sys.stdout)
    stream_handler.setFormatter(
        ColouredFormatter(
            "%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        ))

    log_queue = Queue()
    queue_handler = LocalQueueHandler(log_queue)

    root_logger = logging.getLogger()
    root_logger.setLevel(level)
    root_logger.addHandler(queue_handler)

    listener = QueueListener(log_queue,
                             file_handler,
                             stream_handler,
                             respect_handler_level=True)
    listener.start()
Exemplo n.º 21
0
class LokiQueueHandler(QueueHandler):
    """This handler automatically creates listener and `LokiHandler` to handle logs queue."""
    def __init__(self, queue: Queue, **kwargs):
        """Create new logger handler with the specified queue and kwargs for the `LokiHandler`."""
        super().__init__(queue)
        self.handler = LokiHandler(**kwargs)  # noqa: WPS110
        self.listener = QueueListener(self.queue, self.handler)
        self.listener.start()
Exemplo n.º 22
0
 def setup_threaded_logging(self):
     self.logging_queue = MPQueue(-1)
     shandler = logging.StreamHandler()
     sformatter = logging.Formatter('[%(name)s] %(levelname)s: %(message)s')
     shandler.setFormatter(sformatter)
     ql = QueueListener(self.logging_queue, shandler)
     ql.start()
     return ql
Exemplo n.º 23
0
def setup_main_logging(config) -> mp.Queue:
    log_queue = mp.Queue()
    log_handler = NeptuneLogHandler(config)
    log_handler.setLevel(logging.INFO)
    listener = QueueListener(log_queue,
                             log_handler,
                             respect_handler_level=True)
    listener.start()
    return log_queue
Exemplo n.º 24
0
def _configure_log_process():
    queue = multiprocessing.Manager().Queue(-1)
    file_handler = logging.FileHandler(
        os.path.join(Config.log_dir(), 'risk_extractor_multiprocessing.log'))
    formatter = '%(asctime)s-[%(levelname)s]-%(name)s-%(filename)s.' \
                '%(funcName)s(%(lineno)d)-%(message)s'
    file_handler.setFormatter(logging.Formatter(formatter))
    queue_listener = QueueListener(queue, file_handler)
    queue_listener.start()
    return queue, queue_listener
Exemplo n.º 25
0
class QueueListenerHandler(QueueHandler):
    def __init__(self, handlers):
        super().__init__(Queue())
        self._start_listener(self.queue, handlers)

    def _start_listener(self, queue, handlers) -> QueueListener:
        self.listener = QueueListener(queue, *handlers, respect_handler_level=True)
        self.listener.start()
        atexit.register(self.listener.stop)
        return self.listener
Exemplo n.º 26
0
class LokiQueueHandler(QueueHandler):
    """
    This handler automatically creates listener and `LokiHandler` to handle logs queue.
    """

    def __init__(self, queue: Queue, url: str, tags: Optional[dict] = None, auth: BasicAuth = None):
        super().__init__(queue)
        self.handler = LokiHandler(url, tags, auth)
        self.listener = QueueListener(self.queue, self.handler)
        self.listener.start()
Exemplo n.º 27
0
def log_worker():
    """ Starts a thread in parent process that listens to log queue and writes logs to file on disk"""
    h = logging.FileHandler('logs.txt')
    pid_log_msg = f'handled by PID {os.getpid()}'
    f = logging.Formatter(f'%(name)-15s %(levelname)-8s %(message)s {pid_log_msg}')
    h.setFormatter(f)
    listener = QueueListener(log_queue, h)
    listener.start()
    yield
    listener.stop()
Exemplo n.º 28
0
def setup_logging(basic=False, prefix=None):
    """
        Setup logging module.
    """
    from pyxrd.data import settings

    # Whether PyXRD should spew out debug messages
    debug = settings.DEBUG
    # Filename used for storing the logged messages
    log_file = settings.LOG_FILENAME
    # Flag indicating if a full logger should be setup (False) or
    # if simple, sparse logging is enough (True)
    basic = not settings.GUI_MODE

    fmt = '%(name)s - %(levelname)s: %(message)s'
    if prefix is not None:
        fmt = prefix + " " + fmt

    if log_file is not None and not os.path.exists(os.path.dirname(log_file)):
        os.makedirs(os.path.dirname(log_file))

    if not basic:
        # Setup file log:
        file_handler = logging.FileHandler(log_file, 'w')
        disk_fmt = logging.Formatter(
            '%(asctime)s %(levelname)-8s %(name)-40s %(message)s',
            datefmt='%m-%d %H:%M')
        file_handler.setFormatter(disk_fmt)

        # Setup console log:
        log_handler = logging.StreamHandler()
        full = logging.Formatter(fmt)
        log_handler.setFormatter(full)

        # Setup queue handler:
        log_que = queue.Queue(-1)
        queue_handler = QueueHandler(log_que)
        queue_listener = QueueListener(log_que,
                                       file_handler,
                                       log_handler,
                                       respect_handler_level=True)
        queue_listener.start()

        # Add queue handler:
        logger = logging.getLogger('')
        logger.setLevel(logging.DEBUG if debug else logging.INFO)
        logger.addHandler(queue_handler)
    else:
        # Very basic output for the root object:
        logging.basicConfig(format=fmt)
        logger = logging.getLogger('')
        logger.addHandler(queue_handler)

    settings.FINALIZERS.append(queue_listener.stop)
Exemplo n.º 29
0
def _setup_logging_queue(*handlers: Handler) -> QueueHandler:
    """Create a new LocalQueueHandler and start an associated QueueListener."""
    queue: Queue = Queue()
    queue_handler = LocalQueueHandler(queue)

    serving_listener = QueueListener(queue,
                                     *handlers,
                                     respect_handler_level=True)
    serving_listener.start()

    return queue_handler
Exemplo n.º 30
0
class AutomatonRunner(object):
    def __init__(self, app, interval=1800, buffer_size=20):
        self.app = app
        self.is_running = False
        self.interval = interval
        self.logger = logging.getLogger("AutomatonRunner")
        self.pool = ThreadPoolExecutor()
        self.logging_queue = queue.Queue(-1)

        self.logger.addHandler(QueueHandler(self.logging_queue))

        pool_scheduler = AsyncIOScheduler()
        self.log_source = ReplaySubject(buffer_size=buffer_size,
                                        scheduler=pool_scheduler)
        logging_handler = LoggingRxHandler(self.log_source)
        logging_handler.setFormatter(
            logging.Formatter(
                '\033[34m%(asctime)s \033[91m%(name)s\033[0m %(message)s'))
        self.logging_queue_listener = QueueListener(self.logging_queue,
                                                    logging_handler)
        self.logging_queue_listener.start()

    def __del__(self):
        self.logging_queue_listener.stop()

    async def run_forever_in_background(self, delay=0):
        try:
            await asyncio.sleep(delay)
            while True:
                if not self.is_running:
                    fn = functools.partial(self.run)
                    await self.app.loop.run_in_executor(self.pool, fn)
                await asyncio.sleep(self.interval)
        except asyncio.CancelledError:
            self.logger.info("Runner stopping")

    def run_once_no_wait(self):
        if not self.is_running:
            fn = functools.partial(self.run)
            self.logger.info("Start running.")
            self.app.loop.run_in_executor(self.pool, fn)

    def run(self):
        self.is_running = True
        automaton = examautomaton.RiskExamAutomaton(
            logging_queue=self.logging_queue)
        try:
            automaton.run()
        except Exception as ex:
            self.logger.warning(
                "Automaton encountered an error: {0}".format(ex))

        del automaton
        self.is_running = False
def logger_init():
    q = multiprocessing.Queue()
    # this is the handler for all log records
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter("%(levelname)s: %(asctime)s - %(process)s - %(message)s"))

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q, handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(handler)

    return ql, q
Exemplo n.º 32
0
def get_logging_queue():
    #This probably will have to be refactored to get access to manager as well.
    global __queue, __manager
    if __queue is None:
        m = multiprocessing.Manager()
        __manager = m
        q = m.Queue(-1)
        #https://docs.python.org/3/howto/logging-cookbook.html
        listener = QueueListener(q, *logging.getLogger().handlers)
        listener.start()
        def exithandler():
            q.join()
            listener.stop()
            #Seems to help silencing bugs...
            import time; time.sleep(0.2)
            m.shutdown()

        atexit.register(exithandler)
        __queue = q
        return q
    return __queue
Exemplo n.º 33
0
class JobLoggingManager:
    
    """
    Manages logging for a Vesper job.
    
    A `JobLoggingManager` manages logging for the processes of a Vesper job.
    Log records can be submitted by any process of a job using any logger
    (typically the root logger) configured with the `configure_logger`
    static method. A logger so configured writes each log record to a
    multiprocessing queue that is read by a thread running in the main
    job process, which in turn writes log messages to the job's log file.
    """
    
    
    @staticmethod
    def configure_logger(logger, logging_config):
        
        """
        Configures the specified logger to write log records to this job's
        logging queue.
        
        For the `logging_config` argument, the main job process can pass
        the `logging_config` attribute of its `JobLoggingManager`. This
        information is also passed to the `execute` method of the job's
        command as the `logging_config` attribute of the command's
        execution context. The information is picklable, so it can be
        delivered easily to any additional process started by the main
        job process as an argument to the process's target function.        
        """
        
        level, queue = logging_config
        
        logger.setLevel(level)
        
        handler = QueueHandler(queue)
        logger.addHandler(handler)

        
    def __init__(self, job, level):
        
        self.job = job
        self.level = level
        
        # Create queue through which log records can be sent from various
        # processes and threads to the logging thread.
        self.queue = Queue()
        
        formatter = Formatter('%(asctime)s %(levelname)-8s %(message)s')
        
        # Create handler that writes log messages to the job log file.
        os_utils.create_parent_directory(job.log_file_path)
        file_handler = FileHandler(job.log_file_path, 'w')
        file_handler.setFormatter(formatter)
        
        # Create handler that writes log messages to stderr.
        stderr_handler = StreamHandler()
        stderr_handler.setFormatter(formatter)
        
        self._record_counts_handler = _RecordCountsHandler()
        
        # Create logging listener that will run on its own thread and log
        # messages sent to it via the queue.
        self._listener = QueueListener(
            self.queue, file_handler, stderr_handler,
            self._record_counts_handler)
        
        
    @property
    def logging_config(self):
        return (self.level, self.queue)
    
    
    @property
    def record_counts(self):
        return dict(self._record_counts_handler.record_counts)
    
    
    def start_up_logging(self):
        self._listener.start()
        
        
    def shut_down_logging(self):
        
        # Tell logging listener to terminate, and wait for it to do so.
        self._listener.stop()
        
        logging.shutdown()
Exemplo n.º 34
0
    def run(self, p_processors_nb_threads, p_writer_nb_threads=None):
        # All log messages come and go by this queue
        log_queue = Queue()
        logger = logging.getLogger('swallow')

        if len(logger.handlers) > 1:
            logger.warn("Several handlers detected on swallow logger but can't log to more than a single handler in multiprocessing mode. Only the first one will be used.")
        elif len(logger.handlers) == 0:
            logger.warn("No handler defined for swallow logger. Log to console with info level.")
            # Handler console
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            logger.addHandler(stream_handler)

        # each log_listener gets records from the queue and sends them to a specific handler
        handler = logger.handlers[0]
        formatter = handler.formatter
        listener = QueueListener(log_queue, handler)
        listener.start()

        if p_writer_nb_threads is None:
            p_writer_nb_threads = p_processors_nb_threads

        logger.info('Running swallow process. Processor on %i threads / Writers on %i threads', p_processors_nb_threads, p_writer_nb_threads)

        start_time = datetime.datetime.now()

        # Set extra properties to readers
        for reader in self.readers:
            reader['reader'].counters = self.counters
            reader['reader'].log_queue = log_queue
            reader['reader'].log_level = logger.level
            reader['reader'].formatter = formatter

        # Set extra properties to writer
        if self.writer is not None:
            self.writer.counters = self.counters
            self.writer.log_queue = log_queue
            self.writer.log_level = logger.level
            self.writer.formatter = formatter

        read_worker = [Process(target=reader['reader'].scan_and_queue, args=(self.in_queue,), kwargs=(reader['args'])) for reader in self.readers]
        process_worker = [Process(target=get_and_parse, args=(self.in_queue, self.out_queue, self.process, self.counters, log_queue, logger.level, formatter), kwargs=(self.process_args)) for i in range(p_processors_nb_threads)]

        # writers are optionnal
        if self.writer is not None:
            write_worker = [Process(target=self.writer.dequeue_and_store, args=(self.out_queue,), kwargs=(self.writer_store_args)) for i in range(p_writer_nb_threads)]
        else:
            write_worker = []

        # Running workers
        for work in read_worker:
            work.start()
        for work in process_worker:
            work.start()
        for work in write_worker:
            work.start()

        # Waiting for workers to end :
        # worker.join() blocks the programm till the worker ends
        logger.info('Waiting for reader to finish')
        for work in read_worker:
            # Waiting for all reader to finish their jobs
            work.join()

        # At this point, reading is finished. We had a poison pill for each consumer of read queue :
        for i in range(len(process_worker)):
            self.in_queue.put(None)

        logger.info('Waiting for processors to finish')
        for work in process_worker:
            # Waiting for all processors to finish their jobs
            work.join()

        # At this point, processing is finished. We had a poison pill for each consumer of write queue :
        for i in range(len(write_worker)):
            self.out_queue.put(None)

        logger.info('Waiting for writers to finish')
        for work in write_worker:
            # Waiting for all writers to finish their jobs
            work.join()

        elsapsed_time = datetime.datetime.now() - start_time
        logger.info('Elapsed time : %ss' % elsapsed_time.total_seconds())

        avg_time = 0
        nb_items = self.counters['nb_items_scanned'].value
        if nb_items:
            avg_time = 1000*self.counters['scan_time'].value / nb_items
        logger.info('{0} items scanned ({1}ms)'.format(nb_items, avg_time))

        avg_time = 0
        avg_time_idle = 0
        nb_items = self.counters['nb_items_processed'].value
        if nb_items:
            avg_time = 1000*self.counters['real_process_time'].value / nb_items
            avg_time_idle = 1000*self.counters['idle_process_time'].value / nb_items
        logger.info('{0} items processed (process : {1}ms / idle : {2}ms)'.format(nb_items, avg_time, avg_time_idle))

        avg_time = 0
        nb_items = self.counters['nb_items_stored'].value
        if nb_items:
            avg_time = 1000*self.counters['whole_storage_time'].value / nb_items
        logger.info('{0} items stored ({1}ms)'.format(nb_items, avg_time))

        nb_items = self.counters['nb_items_error'].value
        logger.info('{0} items error'.format(nb_items))

        # Stop listening for log messages
        listener.stop()
Exemplo n.º 35
0
# Blocking Handlers
import queue
import logging
from logging.handlers import QueueHandler, QueueListener

que = queue.Queue(-1)
queue_handler = QueueHandler(que)
handler = logging.StreamHandler()
listener = QueueListener(que, handler)
root = logging.getLogger()
root.addHandler(queue_handler)
formatter = logging.Formatter("%(threadName)s: %(message)s")
handler.setFormatter(formatter)
listener.start()
root.warning("Look Out")
listener.stop()