Example #1
0
def _setup():
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # 1. parse config args
    config.parse_args()

    # 2. setup logging.
    logging.setup(cfg.CONF.sensorcontainer.logging)

    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database,
                                                     'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database,
                                                     'password') else None
    db_setup(cfg.CONF.database.db_name,
             cfg.CONF.database.host,
             cfg.CONF.database.port,
             username=username,
             password=password)
    register_exchanges()
    register_common_signal_handlers()

    # 4. Register internal triggers
    # Note: We need to do import here because of a messed up configuration
    # situation (this module depends on configuration being parsed)
    from st2common.triggers import register_internal_trigger_types
    register_internal_trigger_types()
Example #2
0
File: api.py Project: timff/st2
def _setup():
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # 1. parse args to setup config.
    config.parse_args()

    # 2. setup logging.
    logging.setup(cfg.CONF.auth.logging)

    if cfg.CONF.auth.mode not in VALID_MODES:
        raise ValueError('Valid modes are: %s' % (','.join(VALID_MODES)))

    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database,
                                                     'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database,
                                                     'password') else None
    db_setup(cfg.CONF.database.db_name,
             cfg.CONF.database.host,
             cfg.CONF.database.port,
             username=username,
             password=password)
Example #3
0
    def __init__(self, pack, file_path, class_name, trigger_types,
                 poll_interval=None, parent_args=None):
        """
        :param pack: Name of the pack this sensor belongs to.
        :type pack: ``str``

        :param file_path: Path to the sensor module file.
        :type file_path: ``str``

        :param class_name: Sensor class name.
        :type class_name: ``str``

        :param trigger_types: A list of references to trigger types which
                                  belong to this sensor.
        :type trigger_types: ``list`` of ``str``

        :param poll_interval: Sensor poll interval (in seconds).
        :type poll_interval: ``int`` or ``None``

        :param parent_args: Command line arguments passed to the parent process.
        :type parse_args: ``list``
        """
        self._pack = pack
        self._file_path = file_path
        self._class_name = class_name
        self._trigger_types = trigger_types or []
        self._poll_interval = poll_interval
        self._parent_args = parent_args or []
        self._trigger_names = {}

        # 1. Parse the config with inherited parent args
        try:
            config.parse_args(args=self._parent_args)
        except Exception:
            pass

        # 2. Establish DB connection
        username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
        password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
        db_setup_with_retry(cfg.CONF.database.db_name, cfg.CONF.database.host,
                            cfg.CONF.database.port, username=username, password=password)

        # 3. Instantiate the watcher
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix='sensorwrapper_%s_%s' %
                                               (self._pack, self._class_name),
                                               exclusive=True)

        # 4. Set up logging
        self._logger = logging.getLogger('SensorWrapper.%s.%s' %
                                         (self._pack, self._class_name))
        logging.setup(cfg.CONF.sensorcontainer.logging)

        if '--debug' in parent_args:
            set_log_level_for_all_loggers()

        self._sensor_instance = self._get_sensor_instance()
Example #4
0
def _setup():
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # 1. parse config args
    config.parse_args()

    # 2. setup logging.
    logging.setup(cfg.CONF.sensorcontainer.logging)

    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
    db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
             username=username, password=password)
    register_exchanges()
    register_common_signal_handlers()

    # 4. Register internal triggers
    # Note: We need to do import here because of a messed up configuration
    # situation (this module depends on configuration being parsed)
    from st2common.triggers import register_internal_trigger_types
    register_internal_trigger_types()
Example #5
0
def setup(service, config, setup_db=True, register_mq_exchanges=True,
          register_signal_handlers=True, run_migrations=True):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # Parse args to setup config.
    config.parse_args()

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)
    logging.setup(logging_config_path)

    if cfg.CONF.debug:
        set_log_level_for_all_loggers(level=stdlib_logging.DEBUG)

    # All other setup which requires config to be parsed and logging to
    # be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges()

    if register_signal_handlers:
        register_common_signal_handlers()

    # TODO: This is a "not so nice" workaround until we have a proper migration system in place
    if run_migrations:
        insert_system_roles()

    if cfg.CONF.rbac.enable and not cfg.CONF.auth.enable:
        msg = ('Authentication is not enabled. RBAC only works when authentication is enabled.'
               'You can either enable authentication or disable RBAC.')
        raise Exception(msg)
Example #6
0
 def test_log_critical(self):
     """Test that CRITICAL log entry does not go to the audit log."""
     logging.setup(self.cfg_path)
     log = logging.getLogger(__name__)
     msg = uuid.uuid4().hex
     log.critical(msg)
     info_log_entries = open(self.info_log_path).read()
     self.assertIn(msg, info_log_entries)
     audit_log_entries = open(self.audit_log_path).read()
     self.assertNotIn(msg, audit_log_entries)
Example #7
0
 def test_log_audit(self):
     """Test that AUDIT log entry goes to the audit log."""
     logging.setup(self.cfg_path)
     log = logging.getLogger(__name__)
     msg = uuid.uuid4().hex
     log.audit(msg)
     info_log_entries = open(self.info_log_path).read()
     self.assertIn(msg, info_log_entries)
     audit_log_entries = open(self.audit_log_path).read()
     self.assertIn(msg, audit_log_entries)
Example #8
0
def _setup():
    # 1. parse args to setup config.
    config.parse_args()

    # 2. setup logging.
    logging.setup(cfg.CONF.api.logging)

    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
    db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
             username=username, password=password)
Example #9
0
 def test_logger_set_level(self):
     logging.setup(self.cfg_path)
     log = logging.getLogger(__name__)
     self.assertEqual(log.getEffectiveLevel(), logbase.DEBUG)
     log.setLevel(logbase.INFO)
     self.assertEqual(log.getEffectiveLevel(), logbase.INFO)
     log.setLevel(logbase.WARN)
     self.assertEqual(log.getEffectiveLevel(), logbase.WARN)
     log.setLevel(logbase.ERROR)
     self.assertEqual(log.getEffectiveLevel(), logbase.ERROR)
     log.setLevel(logbase.CRITICAL)
     self.assertEqual(log.getEffectiveLevel(), logbase.CRITICAL)
     log.setLevel(logbase.AUDIT)
     self.assertEqual(log.getEffectiveLevel(), logbase.AUDIT)
Example #10
0
def setup(service,
          config,
          setup_db=True,
          register_mq_exchanges=True,
          register_signal_handlers=True):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # Parse args to setup config.
    config.parse_args()

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)
    logging.setup(logging_config_path)

    if cfg.CONF.debug:
        set_log_level_for_all_loggers(level=stdlib_logging.DEBUG)

    # All other setup which requires config to be parsed and logging to
    # be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges()

    if register_signal_handlers:
        register_common_signal_handlers()
Example #11
0
def _setup():
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # 1. parse args to setup config.
    config.parse_args()
    # 2. setup logging.
    logging.setup(cfg.CONF.resultstracker.logging)
    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
    db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
             username=username, password=password)
Example #12
0
    def __init__(self, pack, file_path, class_name, trigger_types,
                 poll_interval=None, parent_args=None):
        """
        :param pack: Name of the pack this sensor belongs to.
        :type pack: ``str``

        :param file_path: Path to the sensor module file.
        :type file_path: ``str``

        :param class_name: Sensor class name.
        :type class_name: ``str``

        :param trigger_types: A list of references to trigger types which
                                  belong to this sensor.
        :type trigger_types: ``list`` of ``str``

        :param poll_interval: Sensor poll interval (in seconds).
        :type poll_interval: ``int`` or ``None``

        :param parent_args: Command line arguments passed to the parent process.
        :type parse_args: ``list``
        """
        self._pack = pack
        self._file_path = file_path
        self._class_name = class_name
        self._trigger_types = trigger_types or []
        self._poll_interval = poll_interval
        self._parent_args = parent_args or []
        self._trigger_names = {}

        # 1. Parse the config with inherited parent args
        try:
            config.parse_args(args=self._parent_args)
        except Exception:
            pass

        # 2. Instantiate the watcher
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types)

        # 3. Set up logging
        self._logger = logging.getLogger('SensorWrapper.%s' %
                                         (self._class_name))
        logging.setup(cfg.CONF.sensorcontainer.logging)

        self._sensor_instance = self._get_sensor_instance()
Example #13
0
def _setup():
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # 1. parse args to setup config.
    config.parse_args()
    # 2. setup logging.
    logging.setup(cfg.CONF.notifier.logging)
    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
    db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
             username=username, password=password)
    register_exchanges()
Example #14
0
def _setup():
    # 1. parse args to setup config.
    config.parse_args()
    # 2. setup logging.
    logging.setup(cfg.CONF.actionrunner.logging)
    # 3. all other setup which requires config to be parsed and logging to
    # be correctly setup.
    username = cfg.CONF.database.username if hasattr(cfg.CONF.database,
                                                     'username') else None
    password = cfg.CONF.database.password if hasattr(cfg.CONF.database,
                                                     'password') else None
    db_setup(cfg.CONF.database.db_name,
             cfg.CONF.database.host,
             cfg.CONF.database.port,
             username=username,
             password=password)
Example #15
0
def setup(service, config, setup_db=True, register_mq_exchanges=True,
          register_signal_handlers=True):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH)

    # Parse args to setup config.
    config.parse_args()

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)
    logging.setup(logging_config_path)

    if cfg.CONF.debug:
        set_log_level_for_all_loggers(level=stdlib_logging.DEBUG)

    # All other setup which requires config to be parsed and logging to
    # be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges()

    if register_signal_handlers:
        register_common_signal_handlers()
Example #16
0
File: wsgi.py Project: langelee/st2
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from pecan import load_app
from oslo_config import cfg

from st2api import config  # noqa
from st2common import log as logging
from st2common.models import db


cfg.CONF(args=["--config-file", "/etc/st2/st2.conf"])

logging.setup(cfg.CONF.api.logging)

username = cfg.CONF.database.username if hasattr(cfg.CONF.database, "username") else None
password = cfg.CONF.database.password if hasattr(cfg.CONF.database, "password") else None
db.db_setup(
    cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port, username=username, password=password
)

pecan_config = {
    "app": {
        "root": "st2api.controllers.root.RootController",
        "modules": ["st2api"],
        "debug": cfg.CONF.api_pecan.debug,
        "errors": {"__force_dict__": True},
    }
}
Example #17
0
    def __init__(
        self,
        pack,
        file_path,
        class_name,
        trigger_types,
        poll_interval=None,
        parent_args=None,
        db_ensure_indexes=True,
    ):
        """
        :param pack: Name of the pack this sensor belongs to.
        :type pack: ``str``

        :param file_path: Path to the sensor module file.
        :type file_path: ``str``

        :param class_name: Sensor class name.
        :type class_name: ``str``

        :param trigger_types: A list of references to trigger types which
                                  belong to this sensor.
        :type trigger_types: ``list`` of ``str``

        :param poll_interval: Sensor poll interval (in seconds).
        :type poll_interval: ``int`` or ``None``

        :param parent_args: Command line arguments passed to the parent process.
        :type parse_args: ``list``

        :param db_ensure_indexes: True to ensure indexes. This should really only be set to False
                                  in tests to speed things up.
        """
        self._pack = pack
        self._file_path = file_path
        self._class_name = class_name
        self._trigger_types = trigger_types or []
        self._poll_interval = poll_interval
        self._parent_args = parent_args or []
        self._trigger_names = {}

        # 1. Parse the config with inherited parent args
        try:
            config.parse_args(args=self._parent_args)
        except Exception:
            LOG.exception("Failed to parse config using parent args "
                          '(parent_args=%s): "%s".' % (str(self._parent_args)))

        # 2. Establish DB connection
        username = (cfg.CONF.database.username if hasattr(
            cfg.CONF.database, "username") else None)
        password = (cfg.CONF.database.password if hasattr(
            cfg.CONF.database, "password") else None)
        db_setup_with_retry(
            cfg.CONF.database.db_name,
            cfg.CONF.database.host,
            cfg.CONF.database.port,
            username=username,
            password=password,
            ensure_indexes=db_ensure_indexes,
            ssl=cfg.CONF.database.ssl,
            ssl_keyfile=cfg.CONF.database.ssl_keyfile,
            ssl_certfile=cfg.CONF.database.ssl_certfile,
            ssl_cert_reqs=cfg.CONF.database.ssl_cert_reqs,
            ssl_ca_certs=cfg.CONF.database.ssl_ca_certs,
            authentication_mechanism=cfg.CONF.database.
            authentication_mechanism,
            ssl_match_hostname=cfg.CONF.database.ssl_match_hostname,
        )

        # 3. Instantiate the watcher
        self._trigger_watcher = TriggerWatcher(
            create_handler=self._handle_create_trigger,
            update_handler=self._handle_update_trigger,
            delete_handler=self._handle_delete_trigger,
            trigger_types=self._trigger_types,
            queue_suffix="sensorwrapper_%s_%s" %
            (self._pack, self._class_name),
            exclusive=True,
        )

        # 4. Set up logging
        self._logger = logging.getLogger("SensorWrapper.%s.%s" %
                                         (self._pack, self._class_name))
        logging.setup(cfg.CONF.sensorcontainer.logging)

        if "--debug" in parent_args:
            set_log_level_for_all_loggers()
        else:
            # NOTE: statsd logger logs everything by default under INFO so we ignore those log
            # messages unless verbose / debug mode is used
            logging.ignore_statsd_log_messages()

        self._sensor_instance = self._get_sensor_instance()
Example #18
0
def setup(service, config, setup_db=True, register_mq_exchanges=True,
          register_signal_handlers=True, register_internal_trigger_types=False,
          run_migrations=True, config_args=None):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present or
       if system.debug config option is set to True.
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers
    6. Register internal trigger types

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)

    # Parse args to setup config.
    if config_args:
        config.parse_args(config_args)
    else:
        config.parse_args()

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)

    try:
        logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr,
                      excludes=cfg.CONF.log.excludes)
    except KeyError as e:
        tb_msg = traceback.format_exc()
        if 'log.setLevel' in tb_msg:
            msg = 'Invalid log level selected. Log level names need to be all uppercase.'
            msg += '\n\n' + getattr(e, 'message', str(e))
            raise KeyError(msg)
        else:
            raise e

    if cfg.CONF.debug or cfg.CONF.system.debug:
        enable_debugging()

    if cfg.CONF.profile:
        enable_profiling()

    # All other setup which requires config to be parsed and logging to
    # be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges_with_retry()

    if register_signal_handlers:
        register_common_signal_handlers()

    if register_internal_trigger_types:
        triggers.register_internal_trigger_types()

    # TODO: This is a "not so nice" workaround until we have a proper migration system in place
    if run_migrations:
        run_all_rbac_migrations()

    metrics_initialize()
Example #19
0
def setup(service, config, setup_db=True, register_mq_exchanges=True,
          register_signal_handlers=True, register_internal_trigger_types=False,
          run_migrations=True, register_runners=True, service_registry=False,
          capabilities=None, config_args=None):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present or
       if system.debug config option is set to True.
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers
    6. Register internal trigger types
    7. Register all the runners which are installed inside StackStorm virtualenv.
    8. Register service in the service registry with the provided capabilities

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    capabilities = capabilities or {}

    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)

    # Parse args to setup config.
    if config_args is not None:
        config.parse_args(config_args)
    else:
        config.parse_args()

    version = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
    LOG.debug('Using Python: %s (%s)' % (version, sys.executable))

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)

    is_debug_enabled = (cfg.CONF.debug or cfg.CONF.system.debug)

    try:
        logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr,
                      excludes=cfg.CONF.log.excludes)
    except KeyError as e:
        tb_msg = traceback.format_exc()
        if 'log.setLevel' in tb_msg:
            msg = 'Invalid log level selected. Log level names need to be all uppercase.'
            msg += '\n\n' + getattr(e, 'message', six.text_type(e))
            raise KeyError(msg)
        else:
            raise e

    exclude_log_levels = [stdlib_logging.AUDIT]
    handlers = stdlib_logging.getLoggerClass().manager.root.handlers

    for handler in handlers:
        # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid
        # duplicate "AUDIT" messages in production deployments where default service log level is
        # set to "INFO" and we already log messages with level AUDIT to a special dedicated log
        # file.
        ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO and
                                     handler.level < stdlib_logging.AUDIT)
        if not is_debug_enabled and ignore_audit_log_messages:
            LOG.debug('Excluding log messages with level "AUDIT" for handler "%s"' % (handler))
            handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels))

    if not is_debug_enabled:
        # NOTE: statsd logger logs everything by default under INFO so we ignore those log
        # messages unless verbose / debug mode is used
        logging.ignore_statsd_log_messages()

    logging.ignore_lib2to3_log_messages()

    if is_debug_enabled:
        enable_debugging()
    else:
        # Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2
        # ms which cause too much noise
        add_global_filters_for_all_loggers()

    if cfg.CONF.profile:
        enable_profiling()

    # All other setup which requires config to be parsed and logging to be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges_with_retry()

    if register_signal_handlers:
        register_common_signal_handlers()

    if register_internal_trigger_types:
        triggers.register_internal_trigger_types()

    # TODO: This is a "not so nice" workaround until we have a proper migration system in place
    if run_migrations:
        run_all_rbac_migrations()

    if register_runners:
        runnersregistrar.register_runners()

    register_kombu_serializers()

    metrics_initialize()

    # Register service in the service registry
    if cfg.CONF.coordination.service_registry and service_registry:
        # NOTE: It's important that we pass start_heart=True to start the hearbeat process
        register_service_in_service_registry(service=service, capabilities=capabilities,
                                             start_heart=True)
Example #20
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from pecan import load_app
from oslo.config import cfg

from st2auth import config  # noqa
from st2common import log as logging
from st2common.models import db


cfg.CONF(args=['--config-file', '/etc/st2/st2.conf'])

logging.setup(cfg.CONF.auth.logging)

username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
db.db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port,
            username=username, password=password)

pecan_config = {
    'app': {
        'root': 'st2auth.controllers.root.RootController',
        'modules': ['st2auth'],
        'debug': cfg.CONF.auth.debug,
        'errors': {'__force_dict__': True}
    }
}
Example #21
0
def setup(
    service,
    config,
    setup_db=True,
    register_mq_exchanges=True,
    register_signal_handlers=True,
    register_internal_trigger_types=False,
    run_migrations=True,
    register_runners=True,
    service_registry=False,
    capabilities=None,
    config_args=None,
):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present or
       if system.debug config option is set to True.
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers
    6. Register internal trigger types
    7. Register all the runners which are installed inside StackStorm virtualenv.
    8. Register service in the service registry with the provided capabilities

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    capabilities = capabilities or {}

    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)

    # Parse args to setup config.
    if config_args is not None:
        config.parse_args(config_args)
    else:
        config.parse_args()

    version = "%s.%s.%s" % (
        sys.version_info[0],
        sys.version_info[1],
        sys.version_info[2],
    )

    # We print locale related info to make it easier to troubleshoot issues where locale is not
    # set correctly (e.g. using C / ascii, but services are trying to work with unicode data
    # would result in things blowing up)

    fs_encoding = sys.getfilesystemencoding()
    default_encoding = sys.getdefaultencoding()
    lang_env = os.environ.get("LANG", "unknown")

    try:
        language_code, encoding = locale.getdefaultlocale()

        if language_code and encoding:
            used_locale = ".".join([language_code, encoding])
        else:
            used_locale = "unable to retrieve locale"
    except Exception as e:
        used_locale = "unable to retrieve locale: %s " % (str(e))

    LOG.info("Using Python: %s (%s)" % (version, sys.executable))
    LOG.info(
        "Using fs encoding: %s, default encoding: %s, LANG env variable: %s, locale: %s"
        % (fs_encoding, default_encoding, lang_env, used_locale))

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.info("Using config files: %s", ",".join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.info("Using logging config: %s", logging_config_path)

    is_debug_enabled = cfg.CONF.debug or cfg.CONF.system.debug

    try:
        logging.setup(
            logging_config_path,
            redirect_stderr=cfg.CONF.log.redirect_stderr,
            excludes=cfg.CONF.log.excludes,
        )
    except KeyError as e:
        tb_msg = traceback.format_exc()
        if "log.setLevel" in tb_msg:
            msg = (
                "Invalid log level selected. Log level names need to be all uppercase."
            )
            msg += "\n\n" + getattr(e, "message", six.text_type(e))
            raise KeyError(msg)
        else:
            raise e

    exclude_log_levels = [stdlib_logging.AUDIT]
    handlers = stdlib_logging.getLoggerClass().manager.root.handlers

    for handler in handlers:
        # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid
        # duplicate "AUDIT" messages in production deployments where default service log level is
        # set to "INFO" and we already log messages with level AUDIT to a special dedicated log
        # file.
        ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO
                                     and handler.level < stdlib_logging.AUDIT)
        if not is_debug_enabled and ignore_audit_log_messages:
            LOG.debug(
                'Excluding log messages with level "AUDIT" for handler "%s"' %
                (handler))
            handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels))

    if not is_debug_enabled:
        # NOTE: statsd logger logs everything by default under INFO so we ignore those log
        # messages unless verbose / debug mode is used
        logging.ignore_statsd_log_messages()

    logging.ignore_lib2to3_log_messages()

    if is_debug_enabled:
        enable_debugging()
    else:
        # Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2
        # ms which cause too much noise
        add_global_filters_for_all_loggers()

    if cfg.CONF.profile:
        enable_profiling()

    # All other setup which requires config to be parsed and logging to be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges_with_retry()

    if register_signal_handlers:
        register_common_signal_handlers()

    if register_internal_trigger_types:
        triggers.register_internal_trigger_types()

    # TODO: This is a "not so nice" workaround until we have a proper migration system in place
    if run_migrations:
        run_all_rbac_migrations()

    if register_runners:
        runnersregistrar.register_runners()

    register_kombu_serializers()

    metrics_initialize()

    # Register service in the service registry
    if cfg.CONF.coordination.service_registry and service_registry:
        # NOTE: It's important that we pass start_heart=True to start the hearbeat process
        register_service_in_service_registry(service=service,
                                             capabilities=capabilities,
                                             start_heart=True)

    if sys.version_info[0] == 2:
        LOG.warning(PYTHON2_DEPRECATION)