Пример #1
0
 def test_use_root_logger_by_default_and_write_to_custom_tqdm(self):
     logger = logging.root
     CustomTqdm.messages = []
     with tqdm_logging_redirect(total=1, tqdm_class=CustomTqdm) as pbar:
         assert isinstance(pbar, CustomTqdm)
         logger.info('test')
         assert CustomTqdm.messages == ['test']
Пример #2
0
 def test_should_add_and_remove_handler_from_root_logger_by_default(self):
     original_handlers = list(logging.root.handlers)
     with tqdm_logging_redirect(total=1) as pbar:
         assert isinstance(logging.root.handlers[-1], TqdmLoggingHandler)
         LOGGER.info('test')
         pbar.update(1)
     assert logging.root.handlers == original_handlers
Пример #3
0
 def test_should_add_and_remove_handler_from_custom_logger(self):
     logger = logging.Logger('test')
     with tqdm_logging_redirect(total=1, loggers=[logger]) as pbar:
         assert len(logger.handlers) == 1
         assert isinstance(logger.handlers[0], TqdmLoggingHandler)
         logger.info('test')
         pbar.update(1)
     assert not logger.handlers
Пример #4
0
 def test_should_format_message(self):
     logger = logging.Logger('test')
     console_handler = logging.StreamHandler(sys.stdout)
     console_handler.setFormatter(logging.Formatter(r'prefix:%(message)s'))
     logger.handlers = [console_handler]
     CustomTqdm.messages = []
     with tqdm_logging_redirect(loggers=[logger], tqdm_class=CustomTqdm):
         logger.info('test')
     assert CustomTqdm.messages == ['prefix:test']
Пример #5
0
 def test_should_not_fail_with_logger_without_console_handler(self):
     logger = logging.Logger('test')
     logger.handlers = []
     with tqdm_logging_redirect(total=1, loggers=[logger]):
         logger.info('test')
     assert not logger.handlers
Пример #6
0
    async def run(self):
        """
        Run application.
        """

        exit_code = ExitCodes.EXIT_SUCCESS

        self.logger.info(f"{self.PROG}: Version v{__version__}")
        self.logger.debug(f"Configuration: {dict(self.config)!r}")

        try:
            pid_lock, got_pid_lock = self._get_pid_lock(
                self.config["path_pidfile"]
            )

            net_codes = ",".join(self.config["network"])
            sta_codes = ",".join(self.config["station"])
            loc_codes = ",".join(self.config["location"])
            cha_codes = ",".join(self.config["channel"])

            connector = aiohttp.TCPConnector(
                limit=self.config["worker_pool_size"]
            )
            timeout = aiohttp.ClientTimeout(total=self.config["timeout"])

            async with aiohttp.ClientSession(
                connector=connector, headers=self._HEADERS
            ) as session:

                # download stream epochs from eidaws-stationlite
                stream_epoch_dict = {}
                for level in self.config["level"]:
                    self.logger.debug(
                        f"Request stream epochs for level: {level!r}"
                    )

                    # https://github.com/aio-libs/aiohttp/issues/4549
                    # TODO(damb): Debug with tcpdump
                    await asyncio.sleep(0.02)
                    stream_epochs = await self._emerge_stream_epochs(
                        session,
                        net_codes,
                        sta_codes,
                        loc_codes,
                        cha_codes,
                        level,
                    )
                    if not stream_epochs:
                        self.logger.debug(
                            f"No stream epochs received for level: {level!r}"
                        )
                        continue

                    stream_epoch_dict[level] = stream_epochs
                    self.logger.debug(
                        f"Received {len(stream_epoch_dict[level])} stream "
                        "epoch(s)."
                    )

                if not stream_epoch_dict:
                    self.logger.info("Nothing to do")
                    return

                _history_dump = None
                if self.config["history_json_dump"]:
                    _history_dump = []

                start = timer()
                stats_counter = Counter()
                lock = asyncio.Lock()
                crawled_total = 0
                with tqdm_logging_redirect(
                    tqdm_class=tqdm,
                    loggers=[logging.root, logging.getLogger("eidaws")],
                    disable=not self.config["progress_bar"],
                ) as pbar:
                    async with Pool(
                        worker_coro=Worker(
                            session,
                            stats_counter,
                            lock,
                            delay=self.config["delay"],
                            pbar=pbar,
                            history=_history_dump,
                        ).run,
                        max_workers=self.config["worker_pool_size"],
                    ) as pool:
                        crawled_total = await self._crawl(
                            pool,
                            stream_epoch_dict,
                            formats=self.config["format"],
                            pbar=pbar,
                            timeout=timeout,
                            headers=self._HEADERS,
                        )

                    if crawled_total:
                        self.logger.info(
                            "Crawling HTTP response code statistics "
                            f"(total requests: {sum(stats_counter.values())}): "
                            f"{dict(stats_counter)!r}"
                        )
                        self.logger.info(
                            "Finished crawling successfully in "
                            f"{round(timer() - start, 6)}s"
                        )
                    else:
                        self.logger.info("Nothing to do")
                        return

                if _history_dump:
                    self.logger.debug(
                        "Dumping crawling history to {!r}".format(
                            self.config["history_json_dump"]
                            if self.config["history_json_dump"] != "-"
                            else "stdout"
                        )
                    )

                    for entry in _history_dump:
                        entry["stream"] = _serialize_stream_epoch(
                            entry["stream"]
                        )

                    ofd = (
                        open(self.config["history_json_dump"], "w")
                        if self.config["history_json_dump"] != "-"
                        else sys.stdout
                    )
                    json.dump(_history_dump, ofd)
                    if ofd is not sys.stdout:
                        ofd.close()

        except Error as err:
            self.logger.error(err)
            exit_code = ExitCodes.EXIT_ERROR
        except Exception as err:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            self.logger.critical("Local Exception: %s" % err)
            self.logger.critical(
                "Traceback information: "
                + repr(
                    traceback.format_exception(
                        exc_type, exc_value, exc_traceback
                    )
                )
            )
            exit_code = ExitCodes.EXIT_ERROR
        finally:

            try:
                if got_pid_lock:
                    pid_lock.release()
            except NameError:
                pass

        sys.exit(exit_code)