예제 #1
0
    def setUpClass(cls):
        arg_parser = argparse.ArgumentParser(add_help=False)
        cli.add_argument_parser_logging(arg_parser,
                                        default_log_level=LogLevel.DEBUG)
        opts = arg_parser.parse_args(args=[])
        log_config.setup_logging(
            opts.log_format,
            opts.log_level,
            default_logger_names=[
                "bxcommon", "bxgateway", "bxrelay", "bxgateway_internal",
                "bxapi"
            ],
            log_level_overrides=opts.log_level_overrides,
            enable_fluent_logger=opts.log_fluentd_enable,
            fluentd_host=opts.log_fluentd_host,
            fluentd_queue_size=opts.log_fluentd_queue_size,
            third_party_loggers=node_runner.THIRD_PARTY_LOGGERS,
            fluent_log_level=opts.log_level_fluentd,
            stdout_log_level=opts.log_level_stdout,
        )
        log_config.set_level([LogRecordType.Config.value], LogLevel.WARNING)

        http_service.get_json = MagicMock()
        http_service.post_json = MagicMock()
        http_service.patch_json = MagicMock()
        http_service.delete_json = MagicMock()
        memory_statistics.start_recording = MagicMock()

        REGISTRY.register = MagicMock()
        helpers.set_extensions_parallelism()
예제 #2
0
    def test_custom_logger(self):
        log_config.setup_logging(log_format=log_config.LogFormat.JSON,
                                 default_log_level=log_config.LogLevel.TRACE,
                                 default_logger_names="",
                                 log_level_overrides={},
                                 enable_fluent_logger=True,
                                 fluentd_host="fluentd",
                                 third_party_loggers=[
                                     logging.LoggerConfig(
                                         "test_logging", "{",
                                         logging.LogLevel.TRACE,
                                         handler_type.HandlerType.Fluent)
                                 ])
        logger = logging.get_logger("test_logging")
        handlers = self._get_handlers(logger)
        self.assertEqual(len(handlers), 1)
        stream_handlers = [
            handler for handler in handlers
            if isinstance(handler, StreamHandler)
        ]
        fluentd_handlers = [
            handler for handler in handlers
            if isinstance(handler, FluentHandler)
        ]
        self.assertEqual(len(stream_handlers), 0)
        self.assertEqual(len(fluentd_handlers), 1)
        for handler in handlers:
            self.assertEqual(handler.level, 0)

        fluentd_handler = fluentd_handlers[0]
        self.assertIsInstance(fluentd_handler.formatter,
                              formatters.FluentJSONFormatter)
예제 #3
0
    def test_create_logger_fluentd(self):
        log_config.setup_logging(log_format=log_config.LogFormat.JSON,
                                 default_log_level=log_config.LogLevel.TRACE,
                                 default_logger_names="",
                                 log_level_overrides={},
                                 enable_fluent_logger=True,
                                 fluentd_host="fluentd")
        logger = logging.get_logger("test_logging")
        handlers = self._get_handlers(logger)
        self.assertEqual(len(handlers), 2)
        stream_handlers = [
            handler for handler in handlers
            if isinstance(handler, StreamHandler)
        ]
        fluentd_handlers = [
            handler for handler in handlers
            if isinstance(handler, FluentHandler)
        ]
        self.assertEqual(len(stream_handlers), 1)
        self.assertEqual(len(fluentd_handlers), 1)
        for handler in handlers:
            self.assertEqual(handler.level, 0)

        fluentd_handler = fluentd_handlers[0]
        stream_handler = stream_handlers[0]
        self.assertIsInstance(fluentd_handler.formatter,
                              formatters.JSONFormatter)
        self.assertIsInstance(stream_handler.formatter,
                              formatters.JSONFormatter)
예제 #4
0
    def test_create_logger(self):
        log_config.setup_logging(log_format=log_config.LogFormat.JSON,
                                 default_log_level=log_config.LogLevel.TRACE,
                                 default_logger_names="",
                                 log_level_overrides={})

        logger = logging.get_logger("test_logging")
        handlers = self._get_handlers(logger)
        self.assertEqual(len(handlers), 1)
        for handler in handlers:
            self.assertIsInstance(handler, StreamHandler)
            self.assertIsInstance(handler.formatter, formatters.JSONFormatter)
예제 #5
0
def run_node(
    process_id_file_path: str,
    opts: OptsType,
    get_node_class: Callable[[], Type[AbstractNode]],
    node_type: NodeType,
    logger_names: Optional[Iterable[str]] = tuple(LOGGER_NAMES),
    ssl_service_factory: Callable[
        [NodeType, str, str, str],
        NodeSSLService] = default_ssl_service_factory,
    third_party_loggers: Optional[List[LoggerConfig]] = None,
    node_init_tasks: Optional[List[InitTaskType]] = None,
) -> None:

    if third_party_loggers is None:
        third_party_loggers = THIRD_PARTY_LOGGERS
    opts.logger_names = logger_names
    log_config.setup_logging(
        opts.log_format,
        opts.log_level,
        # pyre-fixme[6]: Expected `Iterable[str]` for 3rd param but got
        #  `Iterable[Optional[str]]`.
        logger_names,
        opts.log_level_overrides,
        enable_fluent_logger=opts.log_fluentd_enable,
        fluentd_host=opts.log_fluentd_host,
        fluentd_queue_size=opts.log_fluentd_queue_size,
        third_party_loggers=third_party_loggers,
        fluent_log_level=opts.log_level_fluentd,
        stdout_log_level=opts.log_level_stdout,
        fluentd_tag_suffix=node_type.name.lower())
    if node_init_tasks is None:
        node_init_tasks = common_init_tasks.init_tasks

    startup_param = sys.argv[1:]
    logger.info("Startup Parameters are: {}", " ".join(startup_param))

    _verify_environment()

    config.log_pid(process_id_file_path)
    gc.callbacks.append(gc_logger.gc_callback)
    # we disable GC generation cleanup.
    # use gc.collect() if memory exceeds threshold
    gc.disable()
    try:
        if opts.use_extensions:
            from bxcommon.utils.proxy import task_pool_proxy

            task_pool_proxy.init(opts.thread_pool_parallelism_degree)
            logger.debug(
                "Initialized task thread pool parallelism degree to {}.",
                task_pool_proxy.get_pool_size(),
            )

        _run_node(
            opts,
            get_node_class,
            node_type,
            node_init_tasks=node_init_tasks,
            ssl_service_factory=ssl_service_factory,
        )
    except TerminationError:
        logger.fatal("Node terminated")
    except HighMemoryError:
        logger.info("Restarting node due to high memory")
        _close_handles()
        python = sys.executable
        os.execl(python, python, *sys.argv)
    except Exception as e:  # pylint: disable=broad-except
        logger.fatal("Unhandled exception {} raised, terminating!", e)

    _close_handles()
예제 #6
0
 def setUp(self) -> None:
     log_config.setup_logging(log_format=log_config.LogFormat.JSON,
                              default_log_level=log_config.LogLevel.TRACE,
                              default_logger_names="",
                              log_level_overrides={})