def setup( config, setup_db=True, register_mq_exchanges=True, register_internal_trigger_types=False, ): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges 5. Registers internal trigger types (optional, disabled by default) :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() if cfg.CONF.debug: cfg.CONF.verbose = True # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig( format="%(asctime)s %(levelname)s [-] %(message)s", level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used logging.ignore_statsd_log_messages() logging.ignore_lib2to3_log_messages() # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_internal_trigger_types: triggers.register_internal_trigger_types()
def setup(config, setup_db=True, register_mq_exchanges=True, register_internal_trigger_types=False): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges 5. Registers internal trigger types (optional, disabled by default) :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() if cfg.CONF.debug: cfg.CONF.verbose = True # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig(format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used logging.ignore_statsd_log_messages() logging.ignore_lib2to3_log_messages() # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_internal_trigger_types: triggers.register_internal_trigger_types()
def setup(self): # Setup stuff goes here. For example, you might establish connections # to external system once and reuse it. This is called only once by the system. setup.db_setup() self.logger = self.sensor_service.get_logger(__name__) self._poll_interval = 3 self._base_url = url_util.get_url_without_trailing_slash( cfg.CONF.mistral.v2_base_url) self._client = mistral.client( mistral_url=self._base_url, username=cfg.CONF.mistral.keystone_username, api_key=cfg.CONF.mistral.keystone_password, project_name=cfg.CONF.mistral.keystone_project_name, auth_url=cfg.CONF.mistral.keystone_auth_url, cacert=cfg.CONF.mistral.cacert, insecure=cfg.CONF.mistral.insecure)
def __init__(self, pack, file_path, parameters=None, user=None, parent_args=None): """ :param pack: Name of the pack this action belongs to. :type pack: ``str`` :param file_path: Path to the action module. :type file_path: ``str`` :param parameters: action parameters. :type parameters: ``dict`` or ``None`` :param user: Name of the user who triggered this action execution. :type user: ``str`` :param parent_args: Command line arguments passed to the parent process. :type parse_args: ``list`` """ self._pack = pack self._file_path = file_path self._parameters = parameters or {} self._user = user self._parent_args = parent_args or [] self._class_name = None self._logger = logging.getLogger('PythonActionWrapper') try: config.parse_args(args=self._parent_args) except Exception as e: LOG.debug( 'Failed to parse config using parent args (parent_args=%s): %s' % (str(self._parent_args), str(e))) # We don't need to ensure indexes every subprocess because they should already be created # and ensured by other services db_setup(ensure_indexes=False) # Note: We can only set a default user value if one is not provided after parsing the # config if not self._user: self._user = cfg.CONF.system_user.user
def __init__(self, pack, file_path, parameters=None, user=None, parent_args=None): """ :param pack: Name of the pack this action belongs to. :type pack: ``str`` :param file_path: Path to the action module. :type file_path: ``str`` :param parameters: action parameters. :type parameters: ``dict`` or ``None`` :param user: Name of the user who triggered this action execution. :type user: ``str`` :param parent_args: Command line arguments passed to the parent process. :type parse_args: ``list`` """ self._pack = pack self._file_path = file_path self._parameters = parameters or {} self._user = user self._parent_args = parent_args or [] self._class_name = None self._logger = logging.getLogger('PythonActionWrapper') try: config.parse_args(args=self._parent_args) except Exception as e: LOG.debug('Failed to parse config using parent args (parent_args=%s): %s' % (str(self._parent_args), str(e))) # We don't need to ensure indexes every subprocess because they should already be created # and ensured by other services db_setup(ensure_indexes=False) # Note: We can only set a default user value if one is not provided after parsing the # config if not self._user: self._user = cfg.CONF.system_user.user
def setup(config, setup_db=True, register_mq_exchanges=True): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() if cfg.CONF.debug: cfg.CONF.verbose = True # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig( format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry()
def setup(config, setup_db=True, register_mq_exchanges=True): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig(format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry()
def setup(service, config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=True, config_args=None): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Set log level for all the loggers to DEBUG if --debug flag is present or if system.debug config option is set to True. 4. Registers RabbitMQ exchanges 5. Registers common signal handlers 6. Register internal trigger types :param service: Name of the service. :param config: Config object to use to parse args. """ # Set up logger which logs everything which happens during and before config # parsing to sys.stdout logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None) # Parse args to setup config. if config_args: config.parse_args(config_args) else: config.parse_args() config_file_paths = cfg.CONF.config_file config_file_paths = [os.path.abspath(path) for path in config_file_paths] LOG.debug('Using config files: %s', ','.join(config_file_paths)) # Setup logging. logging_config_path = config.get_logging_config_path() logging_config_path = os.path.abspath(logging_config_path) LOG.debug('Using logging config: %s', logging_config_path) logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr, excludes=cfg.CONF.log.excludes) if cfg.CONF.debug or cfg.CONF.system.debug: enable_debugging() if cfg.CONF.profile: enable_profiling() # All other setup which requires config to be parsed and logging to # be correctly setup. if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_signal_handlers: register_common_signal_handlers() if register_internal_trigger_types: triggers.register_internal_trigger_types() # TODO: This is a "not so nice" workaround until we have a proper migration system in place if run_migrations: run_all_rbac_migrations() if cfg.CONF.rbac.enable and not cfg.CONF.auth.enable: msg = ('Authentication is not enabled. RBAC only works when authentication is enabled.' 'You can either enable authentication or disable RBAC.') raise Exception(msg)
def setup(service, config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=True, config_args=None): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Set log level for all the loggers to DEBUG if --debug flag is present or if system.debug config option is set to True. 4. Registers RabbitMQ exchanges 5. Registers common signal handlers 6. Register internal trigger types :param service: Name of the service. :param config: Config object to use to parse args. """ # Set up logger which logs everything which happens during and before config # parsing to sys.stdout logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None) # Parse args to setup config. if config_args: config.parse_args(config_args) else: config.parse_args() config_file_paths = cfg.CONF.config_file config_file_paths = [os.path.abspath(path) for path in config_file_paths] LOG.debug('Using config files: %s', ','.join(config_file_paths)) # Setup logging. logging_config_path = config.get_logging_config_path() logging_config_path = os.path.abspath(logging_config_path) LOG.debug('Using logging config: %s', logging_config_path) try: logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr, excludes=cfg.CONF.log.excludes) except KeyError as e: tb_msg = traceback.format_exc() if 'log.setLevel' in tb_msg: msg = 'Invalid log level selected. Log level names need to be all uppercase.' msg += '\n\n' + getattr(e, 'message', str(e)) raise KeyError(msg) else: raise e if cfg.CONF.debug or cfg.CONF.system.debug: enable_debugging() if cfg.CONF.profile: enable_profiling() # All other setup which requires config to be parsed and logging to # be correctly setup. if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_signal_handlers: register_common_signal_handlers() if register_internal_trigger_types: triggers.register_internal_trigger_types() # TODO: This is a "not so nice" workaround until we have a proper migration system in place if run_migrations: run_all_rbac_migrations() metrics_initialize()
def __init__(self, config, action_service=None): super(UpdateWorkflowStatusAction, self).__init__(config=config, action_service=action_service) setup.db_setup()
def setup(service, config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=True, register_runners=True, config_args=None): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Set log level for all the loggers to DEBUG if --debug flag is present or if system.debug config option is set to True. 4. Registers RabbitMQ exchanges 5. Registers common signal handlers 6. Register internal trigger types 7. Register all the runners which are installed inside StackStorm virtualenv. :param service: Name of the service. :param config: Config object to use to parse args. """ # Set up logger which logs everything which happens during and before config # parsing to sys.stdout logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None) # Parse args to setup config. if config_args: config.parse_args(config_args) else: config.parse_args() version = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1], sys.version_info[2]) LOG.debug('Using Python: %s (%s)' % (version, sys.executable)) config_file_paths = cfg.CONF.config_file config_file_paths = [os.path.abspath(path) for path in config_file_paths] LOG.debug('Using config files: %s', ','.join(config_file_paths)) # Setup logging. logging_config_path = config.get_logging_config_path() logging_config_path = os.path.abspath(logging_config_path) LOG.debug('Using logging config: %s', logging_config_path) is_debug_enabled = (cfg.CONF.debug or cfg.CONF.system.debug) try: logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr, excludes=cfg.CONF.log.excludes) except KeyError as e: tb_msg = traceback.format_exc() if 'log.setLevel' in tb_msg: msg = 'Invalid log level selected. Log level names need to be all uppercase.' msg += '\n\n' + getattr(e, 'message', str(e)) raise KeyError(msg) else: raise e exclude_log_levels = [stdlib_logging.AUDIT] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid # duplicate "AUDIT" messages in production deployments where default service log level is # set to "INFO" and we already log messages with level AUDIT to a special dedicated log # file. ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO and handler.level < stdlib_logging.AUDIT) if not is_debug_enabled and ignore_audit_log_messages: LOG.debug( 'Excluding log messages with level "AUDIT" for handler "%s"' % (handler)) handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) if not is_debug_enabled: # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used logging.ignore_statsd_log_messages() logging.ignore_lib2to3_log_messages() if is_debug_enabled: enable_debugging() else: # Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2 # ms which cause too much noise add_global_filters_for_all_loggers() if cfg.CONF.profile: enable_profiling() # All other setup which requires config to be parsed and logging to be correctly setup. if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_signal_handlers: register_common_signal_handlers() if register_internal_trigger_types: triggers.register_internal_trigger_types() # TODO: This is a "not so nice" workaround until we have a proper migration system in place if run_migrations: run_all_rbac_migrations() if register_runners: runnersregistrar.register_runners() register_kombu_serializers() metrics_initialize()
def setup( service, config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=True, register_runners=True, service_registry=False, capabilities=None, config_args=None, ): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Set log level for all the loggers to DEBUG if --debug flag is present or if system.debug config option is set to True. 4. Registers RabbitMQ exchanges 5. Registers common signal handlers 6. Register internal trigger types 7. Register all the runners which are installed inside StackStorm virtualenv. 8. Register service in the service registry with the provided capabilities :param service: Name of the service. :param config: Config object to use to parse args. """ capabilities = capabilities or {} # Set up logger which logs everything which happens during and before config # parsing to sys.stdout logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None) # Parse args to setup config. if config_args is not None: config.parse_args(config_args) else: config.parse_args() version = "%s.%s.%s" % ( sys.version_info[0], sys.version_info[1], sys.version_info[2], ) # We print locale related info to make it easier to troubleshoot issues where locale is not # set correctly (e.g. using C / ascii, but services are trying to work with unicode data # would result in things blowing up) fs_encoding = sys.getfilesystemencoding() default_encoding = sys.getdefaultencoding() lang_env = os.environ.get("LANG", "unknown") try: language_code, encoding = locale.getdefaultlocale() if language_code and encoding: used_locale = ".".join([language_code, encoding]) else: used_locale = "unable to retrieve locale" except Exception as e: used_locale = "unable to retrieve locale: %s " % (str(e)) LOG.info("Using Python: %s (%s)" % (version, sys.executable)) LOG.info( "Using fs encoding: %s, default encoding: %s, LANG env variable: %s, locale: %s" % (fs_encoding, default_encoding, lang_env, used_locale)) config_file_paths = cfg.CONF.config_file config_file_paths = [os.path.abspath(path) for path in config_file_paths] LOG.info("Using config files: %s", ",".join(config_file_paths)) # Setup logging. logging_config_path = config.get_logging_config_path() logging_config_path = os.path.abspath(logging_config_path) LOG.info("Using logging config: %s", logging_config_path) is_debug_enabled = cfg.CONF.debug or cfg.CONF.system.debug try: logging.setup( logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr, excludes=cfg.CONF.log.excludes, ) except KeyError as e: tb_msg = traceback.format_exc() if "log.setLevel" in tb_msg: msg = ( "Invalid log level selected. Log level names need to be all uppercase." ) msg += "\n\n" + getattr(e, "message", six.text_type(e)) raise KeyError(msg) else: raise e exclude_log_levels = [stdlib_logging.AUDIT] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid # duplicate "AUDIT" messages in production deployments where default service log level is # set to "INFO" and we already log messages with level AUDIT to a special dedicated log # file. ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO and handler.level < stdlib_logging.AUDIT) if not is_debug_enabled and ignore_audit_log_messages: LOG.debug( 'Excluding log messages with level "AUDIT" for handler "%s"' % (handler)) handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) if not is_debug_enabled: # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used logging.ignore_statsd_log_messages() logging.ignore_lib2to3_log_messages() if is_debug_enabled: enable_debugging() else: # Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2 # ms which cause too much noise add_global_filters_for_all_loggers() if cfg.CONF.profile: enable_profiling() # All other setup which requires config to be parsed and logging to be correctly setup. if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_signal_handlers: register_common_signal_handlers() if register_internal_trigger_types: triggers.register_internal_trigger_types() # TODO: This is a "not so nice" workaround until we have a proper migration system in place if run_migrations: run_all_rbac_migrations() if register_runners: runnersregistrar.register_runners() register_kombu_serializers() metrics_initialize() # Register service in the service registry if cfg.CONF.coordination.service_registry and service_registry: # NOTE: It's important that we pass start_heart=True to start the hearbeat process register_service_in_service_registry(service=service, capabilities=capabilities, start_heart=True) if sys.version_info[0] == 2: LOG.warning(PYTHON2_DEPRECATION)
def setup(service, config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=True, register_runners=True, service_registry=False, capabilities=None, config_args=None): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Set log level for all the loggers to DEBUG if --debug flag is present or if system.debug config option is set to True. 4. Registers RabbitMQ exchanges 5. Registers common signal handlers 6. Register internal trigger types 7. Register all the runners which are installed inside StackStorm virtualenv. 8. Register service in the service registry with the provided capabilities :param service: Name of the service. :param config: Config object to use to parse args. """ capabilities = capabilities or {} # Set up logger which logs everything which happens during and before config # parsing to sys.stdout logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None) # Parse args to setup config. if config_args is not None: config.parse_args(config_args) else: config.parse_args() version = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1], sys.version_info[2]) LOG.debug('Using Python: %s (%s)' % (version, sys.executable)) config_file_paths = cfg.CONF.config_file config_file_paths = [os.path.abspath(path) for path in config_file_paths] LOG.debug('Using config files: %s', ','.join(config_file_paths)) # Setup logging. logging_config_path = config.get_logging_config_path() logging_config_path = os.path.abspath(logging_config_path) LOG.debug('Using logging config: %s', logging_config_path) is_debug_enabled = (cfg.CONF.debug or cfg.CONF.system.debug) try: logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr, excludes=cfg.CONF.log.excludes) except KeyError as e: tb_msg = traceback.format_exc() if 'log.setLevel' in tb_msg: msg = 'Invalid log level selected. Log level names need to be all uppercase.' msg += '\n\n' + getattr(e, 'message', six.text_type(e)) raise KeyError(msg) else: raise e exclude_log_levels = [stdlib_logging.AUDIT] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid # duplicate "AUDIT" messages in production deployments where default service log level is # set to "INFO" and we already log messages with level AUDIT to a special dedicated log # file. ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO and handler.level < stdlib_logging.AUDIT) if not is_debug_enabled and ignore_audit_log_messages: LOG.debug('Excluding log messages with level "AUDIT" for handler "%s"' % (handler)) handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) if not is_debug_enabled: # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used logging.ignore_statsd_log_messages() logging.ignore_lib2to3_log_messages() if is_debug_enabled: enable_debugging() else: # Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2 # ms which cause too much noise add_global_filters_for_all_loggers() if cfg.CONF.profile: enable_profiling() # All other setup which requires config to be parsed and logging to be correctly setup. if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_signal_handlers: register_common_signal_handlers() if register_internal_trigger_types: triggers.register_internal_trigger_types() # TODO: This is a "not so nice" workaround until we have a proper migration system in place if run_migrations: run_all_rbac_migrations() if register_runners: runnersregistrar.register_runners() register_kombu_serializers() metrics_initialize() # Register service in the service registry if cfg.CONF.coordination.service_registry and service_registry: # NOTE: It's important that we pass start_heart=True to start the hearbeat process register_service_in_service_registry(service=service, capabilities=capabilities, start_heart=True)
def setup( config, setup_db=True, register_mq_exchanges=True, register_internal_trigger_types=False, ignore_register_config_opts_errors=False, ): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges 5. Registers internal trigger types (optional, disabled by default) :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config # NOTE: This code is not the best, but it's only realistic option we have at this point. # Refactoring all the code and config modules to avoid import time side affects would be a big # rabbit hole. Luckily registering same options twice is not really a big deal or fatal error # so we simply ignore such errors. if config.__name__ == "st2common.config" and ignore_register_config_opts_errors: config.parse_args(ignore_errors=True) else: config.parse_args() if cfg.CONF.debug: cfg.CONF.verbose = True # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig( format="%(asctime)s %(levelname)s [-] %(message)s", level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used logging.ignore_statsd_log_messages() logging.ignore_lib2to3_log_messages() # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry() if register_internal_trigger_types: triggers.register_internal_trigger_types()