def __init__(self, logfile_path, console=False):
     """
     Logger API at Client Side to store the logs locally and sent to Central Logger MQ
     Parameters - RMQ - Create a RabbitMQ Object and pass it 
                - logfile_path - Path where to create log file
                - console - whether to diaplay log messages on screen - Default false
     """
     self.RMQ = RabbitMQ()
     #Creating queue and logger
     self.log_queue = queue.Queue(-1)  #infinite size
     self.queue_handler = QueueHandler(self.log_queue)
     self.logger = logging.getLogger()
     self.logger.addHandler(self.queue_handler)
     #formatter
     self.formatter = logging.Formatter(' %(message)s')
     #file handler - write to file
     self.file_handler_loc = logging.FileHandler(logfile_path)
     self.file_handler_loc.setFormatter(self.formatter)
     #console handler - print on screen
     if (console == True):
         self.console_handler = logging.StreamHandler()
         self.console_handler.setFormatter(self.formatter)
         self.listener = QueueListener(self.log_queue, self.console_handler,
                                       self.file_handler_loc)
     else:
         self.listener = QueueListener(self.log_queue,
                                       self.file_handler_loc)
Exemplo n.º 2
0
 def start(self, save_result=True):
     self.__logger.addHandler(self.__qh)
     self.__counter = self.CounterHandler(save_result)
     if self.debug:
         self.__logger.addHandler(self.__stdout)
         self.__listener = QueueListener(self.__q, self.__counter)
     else:
         self.__listener = QueueListener(self.__q, self.__counter,
                                         self.__file_handler)
     self.__listener.start()
     self.__time = time.time()
Exemplo n.º 3
0
    def __init__(self, url, level=NOTSET):
        self._log_queue = queue.Queue(-1)
        super().__init__(self._log_queue)

        teams_handler = TeamsHandler(url, level)
        teams_log_listener = QueueListener(self._log_queue, teams_handler)
        teams_log_listener.start()
Exemplo n.º 4
0
def _setup_logging():
    global _logger
    global _queue_listener

    logging_cfg = _get_logging_settings()
    log_queue = Queue(-1)

    _logger = logging.getLogger('sento-crawler')

    _logger.setLevel(logging_cfg.get('level'))

    logger_formatter = logging.Formatter(LOG_FORMAT)
    logger_formatter.converter = time.gmtime
    out_handler = None  # type: logging.Handler

    if logging_cfg.get('output') == VALID_OUTPUTS[0]:
        out_handler = logging.StreamHandler()
    else:
        logs_path = Path('./logs')
        logs_path.mkdir(exist_ok=True)

        out_handler = TimedRotatingFileHandler(
            filename='logs/sento_crawler.log', when='midnight', utc=True)

    out_handler.setLevel(logging.INFO)
    out_handler.setFormatter(logger_formatter)

    logger_handler = QueueHandler(log_queue)
    _queue_listener = QueueListener(log_queue, out_handler)

    _logger.addHandler(logger_handler)

    # The queue listener must be stopped when execution finishes
    # This line spawns a listener in another thread!
    _queue_listener.start()
Exemplo n.º 5
0
    def test_on_node_error_hook_parallel_runner(self, tmp_path, logging_hooks):
        session = KedroSession.create(MOCK_PACKAGE_NAME, tmp_path)
        log_records = []

        class LogHandler(logging.Handler):  # pylint: disable=abstract-method
            def handle(self, record):
                log_records.append(record)

        logs_queue_listener = QueueListener(logging_hooks.queue, LogHandler())
        logs_queue_listener.start()

        with pytest.raises(ValueError, match="broken"):
            try:
                session.run(runner=ParallelRunner(max_workers=2),
                            node_names=["node1", "node2"])
            finally:
                logs_queue_listener.stop()

        on_node_error_records = [
            r for r in log_records if r.funcName == "on_node_error"
        ]
        assert len(on_node_error_records) == 2

        for call_record in on_node_error_records:
            _assert_hook_call_record_has_expected_parameters(
                call_record,
                ["error", "node", "catalog", "inputs", "is_async", "run_id"],
            )
            expected_error = ValueError("broken")
            assert_exceptions_equal(call_record.error, expected_error)
Exemplo n.º 6
0
def setup_multi_logging(
        queue: Queue,
        debug: bool = False) -> Tuple[logging.Logger, QueueListener]:

    top_log: logging.Logger = logging.getLogger()

    if debug:
        level = logging.DEBUG
        fmt: str = ("{levelname} | {name} | "
                    "function: {funcName} "
                    "| line: {lineno} | {message}")

        style: str = "{"
    else:
        level = logging.INFO
        fmt = "{asctime} | {name} | {levelname} " "| {message}"
        style = "{"

    queue_handler: QueueHandler = QueueHandler(queue)
    top_log.addHandler(queue_handler)
    top_log.setLevel(level)

    formatter: logging.Formatter = logging.Formatter(fmt=fmt, style=style)
    console: logging.StreamHandler = logging.StreamHandler(stream=stdout)
    console.setFormatter(formatter)

    listener: QueueListener = QueueListener(queue, console)

    return top_log, listener
Exemplo n.º 7
0
def initialize_logging(
        command_name: str,
        log_queue: "Queue[logging.LogRecord]") -> Callable[[], None]:
    """Initialize logging handlers and configuration.

    Args:
        command_name: Name of the command that is being logged.
        log_queue: Logging queue to collect log messages from sub-processes.

    Returns:
        Callback to stop the log queue listener when shutting down the platform.

    """
    _configure_logging(command_name)
    _log_unhandled_exceptions()

    log_listener = QueueListener(log_queue,
                                 *logging.getLogger().handlers,
                                 respect_handler_level=True)
    log_listener.start()

    def cleanup_callback() -> None:
        log_listener.stop()

    logger.debug(
        f"Initialized logging for main process (pid={os.getpid()}, parent={os.getppid()}, platform={platform()})"
    )

    return cleanup_callback
Exemplo n.º 8
0
    def test_on_node_error_hook_is_called_with_parallel_runner(
            self, tmp_path, mocker, logging_hooks):
        log_records = []

        class LogHandler(logging.Handler):  # pylint: disable=abstract-method
            def handle(self, record):
                log_records.append(record)

        broken_context_with_hooks = _create_broken_context_with_hooks(
            tmp_path, mocker, logging_hooks)
        mocker.patch(
            "kedro.framework.context.context.load_context",
            return_value=broken_context_with_hooks,
        )
        logs_queue_listener = QueueListener(logging_hooks.queue, LogHandler())
        logs_queue_listener.start()

        with pytest.raises(ValueError, match="broken"):
            broken_context_with_hooks.run(runner=ParallelRunner(max_workers=2),
                                          node_names=["node1", "node2"])
        logs_queue_listener.stop()

        on_node_error_records = [
            r for r in log_records if r.funcName == "on_node_error"
        ]
        assert len(on_node_error_records) == 2

        for call_record in on_node_error_records:
            self._assert_hook_call_record_has_expected_parameters(
                call_record,
                ["error", "node", "catalog", "inputs", "is_async", "run_id"],
            )
            expected_error = ValueError("broken")
            assert_exceptions_equal(call_record.error, expected_error)
Exemplo n.º 9
0
def setup_logging_queues():
    if sys.version_info.major < 3:
        raise RuntimeError("This feature requires Python 3.")

    queue_listeners = []

    # Q: What about loggers created after this is called?
    # A: if they don't attach their own handlers they should be fine
    for logger in get_all_logger_names(include_root=True):
        logger = logging.getLogger(logger)
        if logger.handlers:
            log_queue = queue.Queue(-1)  # No limit on size

            queue_handler = QueueHandler(log_queue)
            queue_listener = QueueListener(log_queue,
                                           respect_handler_level=True)

            queuify_logger(logger, queue_handler, queue_listener)
            # print("Replaced logger %s with queue listener: %s" % (
            #     logger, queue_listener
            # ))
            queue_listeners.append(queue_listener)

    for listener in queue_listeners:
        listener.start()

    atexit.register(stop_queue_listeners, *queue_listeners)
    return
Exemplo n.º 10
0
def start_logger():
    queue = SimpleQueue()

    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        )

    logger = logging.getLogger("cranehook")
    logger.setLevel(settings.LOG_LEVEL)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    stream_handler.setLevel(logging.DEBUG)

    discord_handler = DiscordHandler(settings.DISCORD_WEBHOOK_URL, "cranehook")
    discord_handler.setFormatter(formatter)
    discord_handler.setLevel(logging.INFO)
    queue_listner = QueueListener(
        queue, discord_handler, stream_handler, respect_handler_level=True
    )

    queue_handler = QueueHandler(queue)
    logger.addHandler(queue_handler)

    queue_listner.start()
def logger_init(file_location="multi.log"):
    q = multiprocessing.Queue()
    # this is the handler for all log records
    stream_handler = logging.StreamHandler()
    formatter = logging.Formatter(
        "%(levelname)s: %(asctime)s - %(process)s - [%(filename)s:%(lineno)s] - %(message)s"
    )
    stream_handler.setFormatter(formatter)
    file_handler = logging.FileHandler(file_location, encoding="utf8")
    file_handler.setFormatter(formatter)

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q,
                       stream_handler,
                       file_handler,
                       respect_handler_level=True)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(stream_handler)
    logger.addHandler(file_handler)

    return ql, q
Exemplo n.º 12
0
    def __init__(
            self,
            setup: Callable[..., WorkerSetupResult],
            function: Callable[[WorkerSetupResult], WorkerFunctionResult],
            cleanup: Callable[[WorkerSetupResult], None],
            setup_args: Optional[Tuple] = None,
            setup_kwargs: Optional[Dict] = None,
            log_handlers: Iterable[Handler] = (),
    ):
        self._running = True

        self._task_queue = mp.Queue(maxsize=500)  # TODO: figure out good value

        logging_queue = mp.Queue()
        self._log_listener = QueueListener(logging_queue, *log_handlers)
        self._log_listener.start()

        self._should_terminate_flag = mp.Value(c_bool, 0)

        self._process = mp.Process(
            name="Pye3D Background Process",
            daemon=True,
            target=BackgroundProcess._worker,
            kwargs=dict(
                setup=setup,
                function=function,
                cleanup=cleanup,
                task_queue=self._task_queue,
                should_terminate_flag=self._should_terminate_flag,
                logging_queue=logging_queue,
                setup_args=setup_args if setup_args else (),
                setup_kwargs=setup_kwargs if setup_kwargs else {},
            ),
        )
        self._process.start()
Exemplo n.º 13
0
def logger_init(dirpath=None):
    # Adapted from http://stackoverflow.com/a/34964369/164864
    logging_queue = multiprocessing.Queue()
    # this is the handler for all log records
    filepath = "{}-{}.log".format(
        'pandarus-worker',
        datetime.datetime.now().strftime("%d-%B-%Y-%I-%M%p"))
    if dirpath is not None:
        filepath = os.path.join(dirpath, filepath)
    handler = logging.FileHandler(
        filepath,
        encoding='utf-8',
    )
    handler.setFormatter(
        logging.Formatter("%(asctime)s %(levelname)s %(lineno)d %(message)s"))

    # queue_listener gets records from the queue and sends them to the handler
    queue_listener = QueueListener(logging_queue, handler)
    queue_listener.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    logger.addHandler(handler)

    return queue_listener, logging_queue
Exemplo n.º 14
0
    def _load(self, settings):
        disabled = settings.get("disabled", list())
        handlers = list()
        if "stream" not in disabled:
            stream = settings.get("stream", dict())
            stream_level = stream.get("level", None)
            stream_formatter = stream.get("formatter", None)
            stream_date_format = stream.get("date_format", None)
            stream_args = stream_level, stream_formatter, stream_date_format
            stream_handler = Stream(*stream_args)
            handlers.append(stream_handler)

        if "file" not in disabled:
            rotated = settings.get("file", dict())
            rotated_filename = rotated.get("filename", f"{self.name}.log")
            rotated_when = rotated.get("when", "midnight")
            rotated_interval = rotated.get("interval", 1)
            rotated_backup_count = rotated.get("backup_count", 5)
            rotated_level = rotated.get("level", None)
            rotated_formatter = rotated.get("formatter", None)
            rotated_date_format = rotated.get("date_format", None)
            rotated_args = (rotated_filename, rotated_when, rotated_interval,
                            rotated_backup_count, rotated_level,
                            rotated_formatter, rotated_date_format)
            rotated_handler = Rotated(*rotated_args)
            handlers.append(rotated_handler)

        self.queue = Queue()
        self.queue_handler = QueueHandler(self.queue)

        args = tuple(handlers)
        kwargs = dict()
        kwargs["respect_handler_level"] = True
        self.listener = QueueListener(self.queue, *args, **kwargs)
        self.core.addHandler(self.queue_handler)
Exemplo n.º 15
0
    def __init__(self, n_cpu, **kwargs):
        self.queue = JoinableQueue()
        self.log_queue = Queue()
        self.n_tasks = Value('i', 0)
        kwargs["n_tasks"] = self.n_tasks

        self.processes = [
            Process(target=self.run_trial, kwargs=kwargs)
            for _ in range(int(n_cpu))
        ]

        self.mh = MinimisationHandler.create(kwargs["mh_dict"])
        for season in self.mh.seasons.keys():
            inj = self.mh.get_injector(season)
            inj.calculate_n_exp()
        self.mh_dict = kwargs["mh_dict"]
        self.scales = []

        handler = logging.StreamHandler()
        handler.setFormatter(
            logging.Formatter(
                "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))
        # ql gets records from the queue and sends them to the handler

        ql = QueueListener(self.log_queue, handler)
        ql.start()

        for p in self.processes:
            p.start()
Exemplo n.º 16
0
 def __init__(self, chat_ids: List[str], token: str, proxies: Optional[Dict[str, str]]=None,
              disable_web_page_preview: bool=False, disable_notification: bool=False,
              reply_to_message_id: Optional[int]=None,
              reply_markup: Optional[Dict[str, Any]]=None) -> None:
     """
     Initialization.
     :param token: Telegram token.
     :optional proxies: Proxy for requests. Format proxies corresponds format proxies 
     in requests library.
     Parameters for message to telegram, see https://core.telegram.org/bots/api#sendmessage
     :optional disable_web_page_preview: Disables link previews for links in this message.
     :optional disable_notification: Sends the message silently. 
     Users will receive a notification with no sound.
     :optional reply_to_message_id: If the message is a reply, ID of the original message.
     :optional reply_markup: Additional interface options. 
     A JSON-serialized object for an inline keyboard, custom reply keyboard,
     instructions to remove reply keyboard or to force a reply from the user.
     """
     self.queue = Queue(-1)  # type: Queue
     super().__init__(self.queue)
     self.handler = TelegramMessageHandler(
         chat_ids,
         token,
         proxies=proxies,
         disable_web_page_preview=disable_web_page_preview,
         disable_notification=disable_notification,
         reply_to_message_id=reply_to_message_id,
         reply_markup=reply_markup
     )
     # Set default formatter
     self.handler.setFormatter(TelegramHtmlFormatter())
     self.listener = QueueListener(self.queue, self.handler)
     self.listener.start()
Exemplo n.º 17
0
def configure_log_listener(console: bool = True,
                           log_path: str = "main.log") -> QueueListener:
    """
    Configure log queue listener to log into file and console.
    Args:
        console (bool): whether to log on console
        log_path (str): path of log file
    Returns:
        log_qlistener (logging.handlers.QueueListener): configured log queue listener
    """
    global log_qlistener
    try:
        atexit.unregister(log_qlistener.stop)
        log_qlistener.stop()
    except (AttributeError, NameError):
        pass

    handlers: List[logging.Handler] = []

    # rotating file handler
    if log_path:
        file_handler = _get_file_handler(log_path)
        handlers.append(file_handler)

    # console handler
    if console:
        stdout_handler = _get_stdout_handler()
        handlers.append(stdout_handler)

    log_qlistener = QueueListener(log_queue,
                                  *handlers,
                                  respect_handler_level=True)
    log_qlistener.start()
    atexit.register(log_qlistener.stop)
    return log_qlistener
Exemplo n.º 18
0
def config_logging():
    if settings.DINGTALK_WEBHOOK and settings.DINGTALK_SECRET:
        dingtalk_queue = Queue()
        DEFAULT_LOGGING["handlers"]["dingtalk"] = {
            "level": logging.INFO,
            "class": "logging.handlers.QueueHandler",
            "queue": dingtalk_queue,
            "formatter": "simple",
        }
        DEFAULT_LOGGING["loggers"]["notifier"]["handlers"] = [
            "console",
            "file",
            "websocket",
            "dingtalk",
        ]
        dingtalk_handler = DingTalkHandler(
            webhook_url=settings.DINGTALK_WEBHOOK, secret=settings.DINGTALK_SECRET
        )
        dingtalk_listener = QueueListener(dingtalk_queue, dingtalk_handler)
        dingtalk_listener.start()

    Path(LOG_LOCATION).parent.mkdir(parents=True, exist_ok=True)
    logging.config.dictConfig(DEFAULT_LOGGING)
    ws_handler = YuFuRobotStreamWebsocketHandler(ws_uri=settings.WS_ROBOT_STREAM_URI)
    ws_listener = WebsocketListener(ws_queue, ws_handler)
    ws_listener.start()
Exemplo n.º 19
0
def make_async_logging(log_config):
    # Now we have our user specified logging config, pipe all logging messages
    # through a queue to make it asynchronous

    # These are the handlers for our root logger, they should go through a queue
    root_handlers = log_config["root"].pop("handlers")

    # Create a new handler to replace all the above that just pops messages on
    # a queue, and set it as the handler for the root logger (and children)
    q = queue.SimpleQueue()
    log_config["handlers"]["queue"] = {
        "class": "logging.handlers.QueueHandler",
        "queue": q,
    }
    log_config["root"]["handlers"] = ["queue"]
    configurator = logging.config.DictConfigurator(log_config)
    configurator.configure()

    # Our handlers can be got from the converted config dict
    handlers = [configurator.config["handlers"][h] for h in root_handlers]

    # Now make a queue listener that consumes messages on the queue and forwards
    # them to any of the appropriate original root handlers
    listener = QueueListener(q, *handlers, respect_handler_level=True)
    return listener
Exemplo n.º 20
0
    def get_logger(cls, name):
        """
        This uses a trick to identify the main process from its children so that only the main one set up the config.
        """
        logger = logging.getLogger(name)
        if not logger.handlers:
            if cls.config:
                cls.config.set_logger_queue(cls.logger_queue)
                if cls.pid:  # main process
                    # it is time to create the log directories if they do not exist
                    cls._check_or_create_directory()
                    try:
                        logging.config.dictConfig(cls.config_logger['root'])
                    except ValueError as e:
                        raise Exception(
                            'Error while processing logger for Error: {}'.
                            format(e))
                    # end try/except ValueError

                    # set up queue listener
                    if not cls.logger_queue_listener:
                        cls.logger_queue_listener = QueueListener(
                            cls.logger_queue, SimpleHandler())
                        cls.logger_queue_listener.start()
                    # end if not cls.logger_queue_listener
                # end clause if cls.config, doing else now
                else:
                    # this is called by a child process.
                    logging.config.dictConfig(cls.config_logger['subprocess'])
                # end if cls.pid
            # end if cls.config
        # end if not logger.handlers
        return logger
Exemplo n.º 21
0
def logger_init(log_fname):
    q = Queue()

    try:
        handler = logging.FileHandler(log_fname)
    except PermissionError as e:
        print("logger_init: Error = ", str(e))
        handler = logging.StreamHandler()
        print("logger_init: StreamHandler selected.")
    except Exception as e:
        print("logger_init: Unexpected Error = ", str(e))
        handler = logging.StreamHandler()

    handler.setFormatter(
        logging.Formatter(
            "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))

    ql = QueueListener(q, handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)

    return ql, q
Exemplo n.º 22
0
 def __init__(self, job, level):
     
     self.job = job
     self.level = level
     
     # Create queue through which log records can be sent from various
     # processes and threads to the logging thread.
     self.queue = Queue()
     
     formatter = Formatter('%(asctime)s %(levelname)-8s %(message)s')
     
     # Create handler that writes log messages to the job log file.
     os_utils.create_parent_directory(job.log_file_path)
     file_handler = FileHandler(job.log_file_path, 'w')
     file_handler.setFormatter(formatter)
     
     # Create handler that writes log messages to stderr.
     stderr_handler = StreamHandler()
     stderr_handler.setFormatter(formatter)
     
     self._record_counts_handler = _RecordCountsHandler()
     
     # Create logging listener that will run on its own thread and log
     # messages sent to it via the queue.
     self._listener = QueueListener(
         self.queue, file_handler, stderr_handler,
         self._record_counts_handler)
Exemplo n.º 23
0
    def default(cls) -> logging.Logger:
        """Defines non-blocking application logger.
        Inspiration: https://www.zopatista.com/python/2019/05/11/asyncio-logging/

        Returns:
            logging.Logger: Root logger
        """
        # get root logger
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)

        formatter = logging.Formatter(
            fmt="%(asctime)s | %(levelname)8s | %(message)60s | %(filename)s:%(lineno)d at %(name)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        )

        stdout_handler = logging.StreamHandler(stream=sys.stdout)
        stdout_handler.setLevel(logging.DEBUG)
        stdout_handler.setFormatter(formatter)

        queue = SimpleQueue()
        queue_handler = QueueHandler(queue)
        logger.addHandler(queue_handler)

        listener = QueueListener(queue, *[stdout_handler], respect_handler_level=True)

        listener.start()

        return logger
Exemplo n.º 24
0
    def initLogger(self):
        if not self._capture_output:
            return

        #普通日志输出控制台
        if self._stream is None:
            self._stream = EmittingStream(textWritten=self.normalOutputWritten)
        self.log_handler = logging.StreamHandler(self._stream)
        FORMAT = logging.Formatter(
            '%(asctime)-15s [%(levelname)s] - %(message)s [%(name)s::%(funcName)s]'
        )
        self.log_handler.setFormatter(FORMAT)
        add_class_logger_handler(
            [
                MyMainWindow,
                CollectSpotThread,  #CollectToMySQLThread, CollectToMemThread,
                UsePytdxImportToH5Thread,
                UseTdxImportToH5Thread,
                ImportTdxToH5Task,
                SchedImportThread
            ],
            logging.INFO)
        for name in logging.Logger.manager.loggerDict.keys():
            logger = logging.getLogger(name)
            logger.addHandler(self.log_handler)
            logger.setLevel(logging.DEBUG)

        # 多进程日志队列
        self.mp_log_q = multiprocessing.Queue()
        self.mp_log_q_lisener = QueueListener(self.mp_log_q, self.log_handler)
        self.mp_log_q_lisener.start()
Exemplo n.º 25
0
def configure_logging(name, level=logging.INFO):
    file_handler = RotatingFileHandler(name,
                                       mode="a+",
                                       maxBytes=48000,
                                       backupCount=1)
    file_handler.setFormatter(
        FileFormatter(
            "%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        ))
    stream_handler = StreamHandler(stream=sys.stdout)
    stream_handler.setFormatter(
        ColouredFormatter(
            "%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        ))

    log_queue = Queue()
    queue_handler = LocalQueueHandler(log_queue)

    root_logger = logging.getLogger()
    root_logger.setLevel(level)
    root_logger.addHandler(queue_handler)

    listener = QueueListener(log_queue,
                             file_handler,
                             stream_handler,
                             respect_handler_level=True)
    listener.start()
Exemplo n.º 26
0
    def test_before_and_after_node_run_hooks_are_called_with_parallel_runner(
            self, context_with_hooks, dummy_dataframe, logs_queue):
        log_records = []

        class LogHandler(logging.Handler):  # pylint: disable=abstract-method
            def handle(self, record):
                log_records.append(record)

        logs_queue_listener = QueueListener(logs_queue, LogHandler())
        logs_queue_listener.start()
        context_with_hooks.catalog.save("cars", dummy_dataframe)
        context_with_hooks.catalog.save("boats", dummy_dataframe)
        context_with_hooks.run(runner=ParallelRunner(),
                               node_names=["node1", "node2"])
        logs_queue_listener.stop()

        before_node_run_log_records = [
            r for r in log_records if r.funcName == "before_node_run"
        ]
        assert len(before_node_run_log_records) == 2
        for record in before_node_run_log_records:
            assert record.getMessage() == "About to run node"
            assert record.node.name in ["node1", "node2"]
            assert set(record.inputs.keys()) <= {"cars", "boats"}

        after_node_run_log_records = [
            r for r in log_records if r.funcName == "after_node_run"
        ]
        assert len(after_node_run_log_records) == 2
        for record in after_node_run_log_records:
            assert record.getMessage() == "Ran node"
            assert record.node.name in ["node1", "node2"]
            assert set(record.outputs.keys()) <= {"planes", "ships"}
Exemplo n.º 27
0
    def setUp(self):

        # communications channels
        parent_workflow_conn, child_workflow_conn = multiprocessing.Pipe()
        running_event = multiprocessing.Event()

        # logging
        log_q = multiprocessing.Queue()

        def handle(record):
            logger = logging.getLogger(record.name)
            if logger.isEnabledFor(record.levelno):
                logger.handle(record)

        handler = CallbackHandler(handle)
        self.queue_listener = QueueListener(log_q, handler)
        self.queue_listener.start()

        remote_process = multiprocessing.Process(
            target=remote_main,
            name="remote process",
            args=[parent_workflow_conn, log_q, running_event])

        remote_process.daemon = True
        remote_process.start()
        running_event.wait()

        self.workflow = LocalWorkflow(child_workflow_conn)
        self.remote_process = remote_process
Exemplo n.º 28
0
def setup_trinity_file_and_queue_logging(
        logger: Logger,
        formatter: Formatter,
        handler_stream: StreamHandler,
        chain_config: ChainConfig,
        level: int = logging.DEBUG
) -> Tuple[Logger, 'Queue[str]', QueueListener]:
    from .mp import ctx

    log_queue = ctx.Queue()

    handler_file = RotatingFileHandler(str(chain_config.logfile_path),
                                       maxBytes=(10000000 * LOG_MAX_MB),
                                       backupCount=LOG_BACKUP_COUNT)

    handler_file.setLevel(level)
    handler_file.setFormatter(formatter)

    logger.addHandler(handler_file)

    listener = QueueListener(
        log_queue,
        handler_stream,
        handler_file,
        respect_handler_level=True,
    )

    return logger, log_queue, listener
Exemplo n.º 29
0
def setup_trinity_file_and_queue_logging(
        logger: Logger,
        handler_stream: StreamHandler,
        logfile_path: Path,
        level: int=None) -> Tuple[Logger, 'Queue[str]', QueueListener]:
    from .mp import ctx

    if level is None:
        level = logging.DEBUG

    log_queue = ctx.Queue()

    handler_file = RotatingFileHandler(
        str(logfile_path),
        maxBytes=(10000000 * LOG_MAX_MB),
        backupCount=LOG_BACKUP_COUNT
    )

    handler_file.setLevel(level)
    handler_file.setFormatter(LOG_FORMATTER)

    logger.addHandler(handler_file)
    logger.setLevel(level)

    listener = QueueListener(
        log_queue,
        handler_stream,
        handler_file,
        respect_handler_level=True,
    )

    return logger, log_queue, listener
Exemplo n.º 30
0
def processes():
    ctx = mp.get_context("spawn")
    pipes = [mp.Pipe(duplex=True) for _ in [0, 1, 2]]
    man = ctx.Manager()
    queue = man.Queue()
    processes = [Process(target=process_main,
                         args=(pipes[i][0], queue), ctx=ctx)
                 for i in [0, 1, 2]]

    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter(
        "[%(asctime)s] <%(levelname)s>:%(name)s:%(message)s"))

    ql = QueueListener(queue, handler)
    ql.start()
    default_logger.addHandler(handler)

    for p, i in zip(processes, [0, 1, 2]):
        default_logger.info("processes {} started".format(i))
        p.start()
    yield processes, [pi[1] for pi in pipes]
    for p, pi, i in zip(processes, pipes, [0, 1, 2]):
        # try graceful shutdown first
        pi[1].send(dill.dumps((exit, 0, {})))
        p.join(timeout=1)
        if p.is_alive():
            # ungraceful shutdown
            default_logger.info("processes {} ungraceful shutdown".format(i))
            p.terminate()
            p.join()
    default_logger.removeHandler(handler)
    ql.stop()
    man.shutdown()
    man.join()
    default_logger.info("processes stopped")