コード例 #1
0
ファイル: data_migration.py プロジェクト: ISCAS-VDI/aodh-base
def main():
    args = get_parser().parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    formatter = logging.Formatter(
        '[%(asctime)s] %(levelname)-8s %(message)s')
    console.setFormatter(formatter)
    root_logger.addHandler(console)
    if args.debug:
        root_logger.setLevel(logging.DEBUG)
    else:
        root_logger.setLevel(logging.INFO)

    _validate_conn_options(args)

    nosql_conf = cfg.ConfigOpts()
    db_options.set_defaults(nosql_conf, args.nosql_conn)
    nosql_conf.register_opts(storage.OPTS, 'database')
    nosql_conn = storage.get_connection_from_config(nosql_conf)

    sql_conf = cfg.ConfigOpts()
    db_options.set_defaults(sql_conf, args.sql_conn)
    sql_conf.register_opts(storage.OPTS, 'database')
    sql_conn = storage.get_connection_from_config(sql_conf)

    root_logger.info(
        _LI("Starting to migrate alarms data from NoSQL to SQL..."))

    count = 0
    for alarm in nosql_conn.get_alarms():
        root_logger.debug("Migrating alarm %s..." % alarm.alarm_id)
        try:
            sql_conn.create_alarm(alarm)
            count += 1
        except exception.DBDuplicateEntry:
            root_logger.warning(_LW("Duplicated alarm %s found, skipped."),
                                alarm.alarm_id)
        if not args.migrate_history:
            continue

        history_count = 0
        for history in nosql_conn.get_alarm_changes(alarm.alarm_id, None):
            history_data = history.as_dict()
            root_logger.debug("    Migrating alarm history data with"
                              " event_id %s..." % history_data['event_id'])
            try:
                sql_conn.record_alarm_change(history_data)
                history_count += 1
            except exception.DBDuplicateEntry:
                root_logger.warning(
                    _LW("    Duplicated alarm history %s found, skipped."),
                    history_data['event_id'])
        root_logger.info(_LI("    Migrated %(count)s history data of alarm "
                             "%(alarm_id)s"),
                         {'count': history_count, 'alarm_id': alarm.alarm_id})

    root_logger.info(_LI("End alarms data migration from NoSQL to SQL, %s"
                         " alarms have been migrated."), count)
コード例 #2
0
ファイル: test_get_connection.py プロジェクト: sileht/aodh
 def test_get_connection_no_such_engine(self):
     self.CONF.set_override('connection', 'no-such-engine://localhost',
                            group='database')
     self.CONF.set_override('max_retries', 0, 'database')
     try:
         storage.get_connection_from_config(self.CONF)
     except RuntimeError as err:
         self.assertIn('no-such-engine', six.text_type(err))
コード例 #3
0
ファイル: test_get_connection.py プロジェクト: openstack/aodh
 def test_get_connection_no_such_engine(self):
     self.CONF.set_override('connection', 'no-such-engine://localhost',
                            group='database', enforce_type=True)
     self.CONF.set_override('max_retries', 0, 'database',
                            enforce_type=True)
     try:
         storage.get_connection_from_config(self.CONF)
     except RuntimeError as err:
         self.assertIn('no-such-engine', six.text_type(err))
コード例 #4
0
ファイル: test_get_connection.py プロジェクト: chungg/aodh
 def test_retries(self):
     with mock.patch.object(retrying.time, 'sleep') as retry_sleep:
         try:
             self.CONF.set_override("connection", "no-such-engine://",
                                    group="database")
             storage.get_connection_from_config(self.CONF)
         except RuntimeError as err:
             self.assertIn('no-such-engine', six.text_type(err))
             self.assertEqual(9, retry_sleep.call_count)
             retry_sleep.assert_called_with(10.0)
コード例 #5
0
ファイル: app.py プロジェクト: chungg/aodh
def setup_app(pecan_config=None, extra_hooks=None):
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [hooks.ConfigHook(),
                 hooks.DBHook(
                     storage.get_connection_from_config(cfg.CONF, 'alarm'),),
                 hooks.TranslationHook()]
    if extra_hooks:
        app_hooks.extend(extra_hooks)

    if not pecan_config:
        pecan_config = get_pecan_config()

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = CONF.api.pecan_debug
    if service.get_workers('api') != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, '
                        'the value is overrided with False'))

    app = pecan.make_app(
        pecan_config.app.root,
        debug=pecan_debug,
        force_canonical=getattr(pecan_config.app, 'force_canonical', True),
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )

    return app
コード例 #6
0
ファイル: db.py プロジェクト: pczerkas/aodh
    def setUp(self):
        super(TestBase, self).setUp()
        engine = urlparse.urlparse(self.db_url).scheme

        # NOTE(Alexei_987) Shortcut to skip expensive db setUp
        test_method = self._get_test_method()
        if (hasattr(test_method, '_run_with')
                and engine not in test_method._run_with):
            raise testcase.TestSkipped(
                'Test is not applicable for %s' % engine)

        conf = service.prepare_service([])
        self.CONF = self.useFixture(fixture_config.Config(conf)).conf
        self.CONF.set_override('connection', self.db_url, group="database")

        try:
            self.db_manager = self._get_driver_manager(engine)(self.CONF)
        except ValueError as exc:
            self.skipTest("missing driver manager: %s" % exc)
        self.useFixture(self.db_manager)

        self.CONF.set_override('connection', self.db_manager.url,
                               group="database")

        self.alarm_conn = storage.get_connection_from_config(self.CONF)
        self.alarm_conn.upgrade()

        self.useFixture(mockpatch.Patch(
            'aodh.storage.get_connection_from_config',
            side_effect=self._get_connection))
コード例 #7
0
ファイル: app.py プロジェクト: paperandsoap/aodh
def setup_app(pecan_config=PECAN_CONFIG, conf=None):
    if conf is None:
        # NOTE(jd) That sucks but pecan forces us to use kwargs :(
        raise RuntimeError("Config is actually mandatory")
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [
        hooks.ConfigHook(conf),
        hooks.DBHook(storage.get_connection_from_config(conf)),
        hooks.TranslationHook()
    ]

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = conf.api.pecan_debug
    if conf.api.workers != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning(
            _LW('pecan_debug cannot be enabled, if workers is > 1, '
                'the value is overrided with False'))

    app = pecan.make_app(pecan_config['app']['root'],
                         debug=pecan_debug,
                         hooks=app_hooks,
                         wrap_app=middleware.ParsableErrorMiddleware,
                         guess_content_type_from_ext=False)

    return app
コード例 #8
0
 def test_only_default_url(self):
     self.CONF.set_override("connection",
                            "log://",
                            group="database",
                            enforce_type=True)
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
コード例 #9
0
ファイル: db.py プロジェクト: sileht/aodh
    def setUp(self):
        super(TestBase, self).setUp()
        engine = urlparse.urlparse(self.db_url).scheme

        # NOTE(Alexei_987) Shortcut to skip expensive db setUp
        test_method = self._get_test_method()
        if (hasattr(test_method, '_run_with')
                and engine not in test_method._run_with):
            raise testcase.TestSkipped('Test is not applicable for %s' %
                                       engine)

        conf = service.prepare_service([])
        self.CONF = self.useFixture(fixture_config.Config(conf)).conf
        self.CONF.set_override('connection', self.db_url, group="database")

        try:
            self.db_manager = self._get_driver_manager(engine)(self.CONF)
        except ValueError as exc:
            self.skipTest("missing driver manager: %s" % exc)
        self.useFixture(self.db_manager)

        self.CONF.set_override('connection',
                               self.db_manager.url,
                               group="database")

        self.alarm_conn = storage.get_connection_from_config(self.CONF)
        self.alarm_conn.upgrade()

        self.useFixture(
            mockpatch.Patch('aodh.storage.get_connection_from_config',
                            side_effect=self._get_connection))
コード例 #10
0
def setup_app(pecan_config=PECAN_CONFIG, conf=None):
    if conf is None:
        # NOTE(jd) That sucks but pecan forces us to use kwargs :(
        raise RuntimeError("Config is actually mandatory")
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [hooks.ConfigHook(conf),
                 hooks.DBHook(
                     storage.get_connection_from_config(conf)),
                 hooks.TranslationHook()]

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    # NOTE(sileht): pecan debug won't work in multi-process environment
    pecan_debug = conf.api.pecan_debug
    if conf.api.workers != 1 and pecan_debug:
        pecan_debug = False
        LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, '
                        'the value is overrided with False'))

    app = pecan.make_app(
        pecan_config['app']['root'],
        debug=pecan_debug,
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )

    return app
コード例 #11
0
ファイル: db.py プロジェクト: scottwedge/OpenStack-Stein
    def setUp(self):
        super(TestBase, self).setUp()
        db_url = os.environ.get('AODH_TEST_STORAGE_URL',
                                'sqlite://').replace("mysql://",
                                                     "mysql+pymysql://")
        engine = urlparse.urlparse(db_url).scheme
        # In case some drivers have additional specification, for example:
        # PyMySQL will have scheme mysql+pymysql.
        self.engine = engine.split('+')[0]

        conf = service.prepare_service(argv=[], config_files=[])
        self.CONF = self.useFixture(fixture_config.Config(conf)).conf
        self.CONF.set_override('connection', db_url, group="database")

        manager = self.DRIVER_MANAGERS.get(self.engine)
        if not manager:
            self.skipTest("missing driver manager: %s" % self.engine)

        self.db_manager = manager(self.CONF)

        self.useFixture(self.db_manager)

        self.CONF.set_override('connection',
                               self.db_manager.url,
                               group="database")

        self.alarm_conn = storage.get_connection_from_config(self.CONF)
        self.alarm_conn.upgrade()

        self.useFixture(
            fixtures.MockPatch('aodh.storage.get_connection_from_config',
                               side_effect=self._get_connection))
コード例 #12
0
 def test_get_connection(self):
     self.CONF.set_override('connection',
                            'log://localhost',
                            group='database',
                            enforce_type=True)
     engine = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(engine, impl_log.Connection)
コード例 #13
0
ファイル: event.py プロジェクト: pczerkas/aodh
 def __init__(self, conf):
     super(EventAlarmEvaluationService, self).__init__()
     self.conf = conf
     self.storage_conn = storage.get_connection_from_config(self.conf)
     self.evaluator = event.EventAlarmEvaluator(
         self.conf,
         rpc.RPCAlarmNotifier(self.conf))
コード例 #14
0
ファイル: db.py プロジェクト: openstack/aodh
    def setUp(self):
        super(TestBase, self).setUp()
        db_url = os.environ.get("AODH_TEST_STORAGE_URL", "sqlite://").replace("mysql://", "mysql+pymysql://")
        engine = urlparse.urlparse(db_url).scheme
        # In case some drivers have additional specification, for example:
        # PyMySQL will have scheme mysql+pymysql.
        self.engine = engine.split("+")[0]

        conf = service.prepare_service(argv=[], config_files=[])
        self.CONF = self.useFixture(fixture_config.Config(conf)).conf
        self.CONF.set_override("connection", db_url, group="database", enforce_type=True)

        manager = self.DRIVER_MANAGERS.get(self.engine)
        if not manager:
            self.skipTest("missing driver manager: %s" % self.engine)

        self.db_manager = manager(self.CONF)

        self.useFixture(self.db_manager)

        self.CONF.set_override("connection", self.db_manager.url, group="database", enforce_type=True)

        self.alarm_conn = storage.get_connection_from_config(self.CONF)
        self.alarm_conn.upgrade()

        self.useFixture(mockpatch.Patch("aodh.storage.get_connection_from_config", side_effect=self._get_connection))
コード例 #15
0
ファイル: alarm_conversion.py プロジェクト: cug-heshun/aodh
def conversion():
    confirm = moves.input("This tool is used for converting the combination "
                          "alarms to composite alarms, please type 'yes' to "
                          "confirm: ")
    if confirm != 'yes':
        print("Alarm conversion aborted!")
        return
    args = get_parser().parse_args()
    conf = service.prepare_service()
    conn = storage.get_connection_from_config(conf)
    combination_alarms = list(conn.get_alarms(alarm_type='combination',
                                              alarm_id=args.alarm_id or None))
    count = 0
    for alarm in combination_alarms:
        new_name = 'From-combination: %s' % alarm.alarm_id
        n_alarm = list(conn.get_alarms(name=new_name, alarm_type='composite'))
        if n_alarm:
            LOG.warning(_LW('Alarm %(alarm)s has been already converted as '
                            'composite alarm: %(n_alarm_id)s, skipped.'),
                        {'alarm': alarm.alarm_id,
                         'n_alarm_id': n_alarm[0].alarm_id})
            continue
        try:
            composite_rule = _generate_composite_rule(conn, alarm)
        except DependentAlarmNotFound as e:
            LOG.warning(_LW('The dependent alarm %(dep_alarm)s of alarm %'
                            '(com_alarm)s not found, skipped.'),
                        {'com_alarm': e.com_alarm_id,
                         'dep_alarm': e.dependent_alarm_id})
            continue
        except UnsupportedSubAlarmType as e:
            LOG.warning(_LW('Alarm conversion from combination to composite '
                            'only support combination alarms depending '
                            'threshold alarms, the type of alarm %(alarm)s '
                            'is: %(type)s, skipped.'),
                        {'alarm': e.sub_alarm_id, 'type': e.sub_alarm_type})
            continue
        new_alarm = models.Alarm(**alarm.as_dict())
        new_alarm.alarm_id = str(uuid.uuid4())
        new_alarm.name = new_name
        new_alarm.type = 'composite'
        new_alarm.description = ('composite alarm converted from combination '
                                 'alarm: %s' % alarm.alarm_id)
        new_alarm.rule = composite_rule
        new_alarm.timestamp = datetime.datetime.now()
        conn.create_alarm(new_alarm)
        LOG.info(_LI('End Converting combination alarm %(s_alarm)s to '
                     'composite alarm %(d_alarm)s'),
                 {'s_alarm': alarm.alarm_id, 'd_alarm': new_alarm.alarm_id})
        count += 1
    if args.delete_combination_alarm:
        for alarm in combination_alarms:
            LOG.info(_LI('Deleting the combination alarm %s...'),
                     alarm.alarm_id)
            conn.delete_alarm(alarm.alarm_id)
    LOG.info(_LI('%s combination alarms have been converted to composite '
                 'alarms.'), count)
コード例 #16
0
ファイル: storage.py プロジェクト: openstack/aodh
def expirer():
    conf = service.prepare_service()

    if conf.database.alarm_history_time_to_live > 0:
        LOG.debug("Clearing expired alarm history data")
        storage_conn = storage.get_connection_from_config(conf)
        storage_conn.clear_expired_alarm_history_data(conf.database.alarm_history_time_to_live)
    else:
        LOG.info(_LI("Nothing to clean, database alarm history time to live " "is disabled"))
コード例 #17
0
ファイル: fixtures.py プロジェクト: paperandsoap/aodh
    def start_fixture(self):
        """Set up config."""

        self.conf = None
        self.conn = None

        # Determine the database connection.
        db_url = os.environ.get(
            'AODH_TEST_STORAGE_URL',
            os.environ.get("OVERTEST_URL",
                           'sqlite://').replace("mysql://",
                                                "mysql+pymysql://"))
        if not db_url:
            raise case.SkipTest('No database connection configured')

        conf = service.prepare_service([], config_files=[])
        # NOTE(jd): prepare_service() is called twice: first by load_app() for
        # Pecan, then Pecan calls pastedeploy, which starts the app, which has
        # no way to pass the conf object so that Paste apps calls again
        # prepare_service. In real life, that's not a problem, but here we want
        # to be sure that the second time the same conf object is returned
        # since we tweaked it. To that, once we called prepare_service() we
        # mock it so it returns the same conf object.
        self.prepare_service = service.prepare_service
        service.prepare_service = mock.Mock()
        service.prepare_service.return_value = conf
        conf = fixture_config.Config(conf).conf
        self.conf = conf
        opts.set_defaults(self.conf)

        conf.set_override('policy_file',
                          os.path.abspath('aodh/tests/open-policy.json'),
                          group='oslo_policy',
                          enforce_type=True)
        conf.set_override(
            'paste_config',
            os.path.abspath('aodh/tests/functional/gabbi/gabbi_paste.ini'),
            group='api',
        )

        conf.set_override('pecan_debug', True, group='api', enforce_type=True)

        parsed_url = list(urlparse.urlparse(db_url))
        parsed_url[2] += '-%s' % str(uuid.uuid4()).replace('-', '')
        db_url = urlparse.urlunparse(parsed_url)

        conf.set_override('connection',
                          db_url,
                          group='database',
                          enforce_type=True)

        if (parsed_url[0].startswith("mysql")
                or parsed_url[0].startswith("postgresql")):
            sqlalchemy_utils.create_database(conf.database.connection)

        self.conn = storage.get_connection_from_config(self.conf)
        self.conn.upgrade()
コード例 #18
0
ファイル: fixtures.py プロジェクト: ISCAS-VDI/aodh-base
    def start_fixture(self):
        """Set up config."""

        self.conf = None
        self.conn = None

        # Determine the database connection.
        db_url = os.environ.get(
            'AODH_TEST_STORAGE_URL', "").replace(
                "mysql://", "mysql+pymysql://")
        if not db_url:
            self.fail('No database connection configured')

        conf = service.prepare_service([], config_files=[])
        # NOTE(jd): prepare_service() is called twice: first by load_app() for
        # Pecan, then Pecan calls pastedeploy, which starts the app, which has
        # no way to pass the conf object so that Paste apps calls again
        # prepare_service. In real life, that's not a problem, but here we want
        # to be sure that the second time the same conf object is returned
        # since we tweaked it. To that, once we called prepare_service() we
        # mock it so it returns the same conf object.
        self.prepare_service = service.prepare_service
        service.prepare_service = mock.Mock()
        service.prepare_service.return_value = conf
        conf = fixture_config.Config(conf).conf
        self.conf = conf
        opts.set_defaults(self.conf)

        conf.set_override('policy_file',
                          os.path.abspath(
                              'aodh/tests/open-policy.json'),
                          group='oslo_policy',
                          enforce_type=True)
        conf.set_override(
            'paste_config',
            os.path.abspath('aodh/tests/functional/gabbi/gabbi_paste.ini'),
            group='api',
        )

        conf.set_override('pecan_debug', True, group='api',
                          enforce_type=True)

        parsed_url = urlparse.urlparse(db_url)
        if parsed_url.scheme != 'sqlite':
            parsed_url = list(parsed_url)
            parsed_url[2] += '-%s' % str(uuid.uuid4()).replace('-', '')
            db_url = urlparse.urlunparse(parsed_url)

        conf.set_override('connection', db_url, group='database',
                          enforce_type=True)

        if (parsed_url[0].startswith("mysql")
           or parsed_url[0].startswith("postgresql")):
            sqlalchemy_utils.create_database(conf.database.connection)

        self.conn = storage.get_connection_from_config(self.conf)
        self.conn.upgrade()
コード例 #19
0
ファイル: fixtures.py プロジェクト: bopopescu/OpenStack-Ocata
    def start_fixture(self):
        """Set up config."""

        global LOAD_APP_KWARGS

        self.conf = None
        self.conn = None

        # Determine the database connection.
        db_url = os.environ.get('AODH_TEST_STORAGE_URL',
                                "").replace("mysql://", "mysql+pymysql://")
        if not db_url:
            self.fail('No database connection configured')

        conf = service.prepare_service([], config_files=[])
        # NOTE(jd): prepare_service() is called twice: first by load_app() for
        # Pecan, then Pecan calls pastedeploy, which starts the app, which has
        # no way to pass the conf object so that Paste apps calls again
        # prepare_service. In real life, that's not a problem, but here we want
        # to be sure that the second time the same conf object is returned
        # since we tweaked it. To that, once we called prepare_service() we
        # mock it so it returns the same conf object.
        self.prepare_service = service.prepare_service
        service.prepare_service = mock.Mock()
        service.prepare_service.return_value = conf
        conf = fixture_config.Config(conf).conf
        self.conf = conf
        opts.set_defaults(self.conf)

        conf.set_override('policy_file',
                          os.path.abspath('aodh/tests/open-policy.json'),
                          group='oslo_policy',
                          enforce_type=True)
        conf.set_override('auth_mode', None, group='api')

        parsed_url = urlparse.urlparse(db_url)
        if parsed_url.scheme != 'sqlite':
            parsed_url = list(parsed_url)
            parsed_url[2] += '-%s' % uuidutils.generate_uuid(dashed=False)
            db_url = urlparse.urlunparse(parsed_url)

        conf.set_override('connection',
                          db_url,
                          group='database',
                          enforce_type=True)

        if (parsed_url[0].startswith("mysql")
                or parsed_url[0].startswith("postgresql")):
            sqlalchemy_utils.create_database(conf.database.connection)

        self.conn = storage.get_connection_from_config(self.conf)
        self.conn.upgrade()

        LOAD_APP_KWARGS = {
            'conf': conf,
        }
コード例 #20
0
def expirer():
    conf = service.prepare_service()

    if conf.database.alarm_history_time_to_live > 0:
        LOG.debug("Clearing expired alarm history data")
        storage_conn = storage.get_connection_from_config(conf)
        storage_conn.clear_expired_alarm_history_data(
            conf.database.alarm_history_time_to_live)
    else:
        LOG.info("Nothing to clean, database alarm history time to live "
                 "is disabled")
コード例 #21
0
def setup_app(root, conf):
    app_hooks = [hooks.ConfigHook(conf),
                 hooks.DBHook(
                     storage.get_connection_from_config(conf)),
                 hooks.TranslationHook()]
    return pecan.make_app(
        root,
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )
コード例 #22
0
ファイル: event.py プロジェクト: yi-cloud/aodh
 def __init__(self, worker_id, conf):
     super(EventAlarmEvaluationService, self).__init__(worker_id)
     self.conf = conf
     self.storage_conn = storage.get_connection_from_config(self.conf)
     self.evaluator = event.EventAlarmEvaluator(self.conf)
     self.listener = messaging.get_batch_notification_listener(
         messaging.get_transport(self.conf),
         [oslo_messaging.Target(
             topic=self.conf.listener.event_alarm_topic)],
         [EventAlarmEndpoint(self.evaluator)], False,
         self.conf.listener.batch_size,
         self.conf.listener.batch_timeout)
     self.listener.start()
コード例 #23
0
ファイル: event.py プロジェクト: paperandsoap/aodh
 def start(self):
     super(EventAlarmEvaluationService, self).start()
     self.storage_conn = storage.get_connection_from_config(self.conf)
     self.evaluator = event.EventAlarmEvaluator(self.conf)
     self.listener = messaging.get_batch_notification_listener(
         messaging.get_transport(self.conf),
         [oslo_messaging.Target(
             topic=self.conf.listener.event_alarm_topic)],
         [EventAlarmEndpoint(self.evaluator)], False,
         self.conf.listener.batch_size,
         self.conf.listener.batch_timeout)
     self.listener.start()
     # Add a dummy thread to have wait() working
     self.tg.add_timer(604800, lambda: None)
コード例 #24
0
def main(argv):
    cfg.CONF([], project='aodh')
    if os.getenv("AODH_TEST_STORAGE_URL"):
        url = ("%s?table_prefix=%s" %
               (os.getenv("AODH_TEST_STORAGE_URL"),
                os.getenv("AODH_TEST_HBASE_TABLE_PREFIX", "test")))
        cfg.CONF.set_override("connection", url, group="database",
                              enforce_type=True)
        alarm_conn = storage.get_connection_from_config(cfg.CONF)
        for arg in argv:
            if arg == "--upgrade":
                alarm_conn.upgrade()
            if arg == "--clear":
                alarm_conn.clear()
コード例 #25
0
 def setUp(self):
     sql_conf = service.prepare_service(argv=[], config_files=[])
     self.sql_conf = self.useFixture(fixture_config.Config(sql_conf)).conf
     # using sqlite to represent the type of SQL dbs
     self.sql_conf.set_override('connection', "sqlite://",
                                group="database", enforce_type=True)
     self.sql_namager = tests_db.SQLiteManager(self.sql_conf)
     self.useFixture(self.sql_namager)
     self.sql_conf.set_override('connection', self.sql_namager.url,
                                group="database", enforce_type=True)
     self.sql_alarm_conn = storage.get_connection_from_config(self.sql_conf)
     self.sql_alarm_conn.upgrade()
     super(TestDataMigration, self).setUp()
     self.add_some_alarms()
     self._add_some_alarm_changes()
コード例 #26
0
def main(argv):
    cfg.CONF([], project='aodh')
    if os.getenv("AODH_TEST_STORAGE_URL"):
        url = ("%s?table_prefix=%s" %
               (os.getenv("AODH_TEST_STORAGE_URL"),
                os.getenv("AODH_TEST_HBASE_TABLE_PREFIX", "test")))
        cfg.CONF.set_override("connection",
                              url,
                              group="database",
                              enforce_type=True)
        alarm_conn = storage.get_connection_from_config(cfg.CONF)
        for arg in argv:
            if arg == "--upgrade":
                alarm_conn.upgrade()
            if arg == "--clear":
                alarm_conn.clear()
コード例 #27
0
ファイル: __init__.py プロジェクト: openstack/aodh
    def __init__(self, worker_id, conf):
        super(AlarmEvaluationService, self).__init__(worker_id)
        self.conf = conf

        ef = lambda: futures.ThreadPoolExecutor(max_workers=10)
        self.periodic = periodics.PeriodicWorker.create(
            [], executor_factory=ef)

        self.evaluators = extension.ExtensionManager(
            namespace=self.EVALUATOR_EXTENSIONS_NAMESPACE,
            invoke_on_load=True,
            invoke_args=(self.conf,)
        )
        self.storage_conn = storage.get_connection_from_config(self.conf)

        self.partition_coordinator = coordination.PartitionCoordinator(
            self.conf)
        self.partition_coordinator.start()
        self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME)

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        if self.evaluators:
            @periodics.periodic(spacing=self.conf.evaluation_interval,
                                run_immediately=not delay_start)
            def evaluate_alarms():
                self._evaluate_assigned_alarms()

            self.periodic.add(evaluate_alarms)

        if self.partition_coordinator.is_active():
            heartbeat_interval = min(self.conf.coordination.heartbeat,
                                     self.conf.evaluation_interval / 4)

            @periodics.periodic(spacing=heartbeat_interval,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            self.periodic.add(heartbeat)

        t = threading.Thread(target=self.periodic.start)
        t.daemon = True
        t.start()
コード例 #28
0
    def __init__(self, worker_id, conf):
        super(AlarmEvaluationService, self).__init__(worker_id)
        self.conf = conf

        ef = lambda: futures.ThreadPoolExecutor(max_workers=10)  # noqa: E731
        self.periodic = periodics.PeriodicWorker.create([],
                                                        executor_factory=ef)

        self.evaluators = extension.ExtensionManager(
            namespace=self.EVALUATOR_EXTENSIONS_NAMESPACE,
            invoke_on_load=True,
            invoke_args=(self.conf, ))
        self.storage_conn = storage.get_connection_from_config(self.conf)

        self.partition_coordinator = coordination.PartitionCoordinator(
            self.conf)
        self.partition_coordinator.start()
        self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME)

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        if self.evaluators:

            @periodics.periodic(spacing=self.conf.evaluation_interval,
                                run_immediately=not delay_start)
            def evaluate_alarms():
                self._evaluate_assigned_alarms()

            self.periodic.add(evaluate_alarms)

        if self.partition_coordinator.is_active():
            heartbeat_interval = min(self.conf.coordination.heartbeat,
                                     self.conf.evaluation_interval / 4)

            @periodics.periodic(spacing=heartbeat_interval,
                                run_immediately=True)
            def heartbeat():
                self.partition_coordinator.heartbeat()

            self.periodic.add(heartbeat)

        t = threading.Thread(target=self.periodic.start)
        t.daemon = True
        t.start()
コード例 #29
0
    def setUp(self):
        super(TestBase, self).setUp()
        db_url = os.environ.get(
            'AODH_TEST_STORAGE_URL',
            os.environ.get("OVERTEST_URL",
                           'sqlite://').replace("mysql://",
                                                "mysql+pymysql://"))
        engine = urlparse.urlparse(db_url).scheme
        # In case some drivers have additional specification, for example:
        # PyMySQL will have scheme mysql+pymysql.
        engine = engine.split('+')[0]

        # NOTE(Alexei_987) Shortcut to skip expensive db setUp
        test_method = self._get_test_method()
        if (hasattr(test_method, '_run_with')
                and engine not in test_method._run_with):
            raise testcase.TestSkipped('Test is not applicable for %s' %
                                       engine)

        conf = service.prepare_service(argv=[], config_files=[])
        self.CONF = self.useFixture(fixture_config.Config(conf)).conf
        self.CONF.set_override('connection',
                               db_url,
                               group="database",
                               enforce_type=True)

        manager = self.DRIVER_MANAGERS.get(engine)
        if not manager:
            self.skipTest("missing driver manager: %s" % engine)

        self.db_manager = manager(self.CONF)

        self.useFixture(self.db_manager)

        self.CONF.set_override('connection',
                               self.db_manager.url,
                               group="database",
                               enforce_type=True)

        self.alarm_conn = storage.get_connection_from_config(self.CONF)
        self.alarm_conn.upgrade()

        self.useFixture(
            mockpatch.Patch('aodh.storage.get_connection_from_config',
                            side_effect=self._get_connection))
コード例 #30
0
ファイル: app.py プロジェクト: openstack/aodh
def setup_app(pecan_config=PECAN_CONFIG, conf=None):
    if conf is None:
        # NOTE(jd) That sucks but pecan forces us to use kwargs :(
        raise RuntimeError("Config is actually mandatory")
    # FIXME: Replace DBHook with a hooks.TransactionHook
    app_hooks = [hooks.ConfigHook(conf),
                 hooks.DBHook(
                     storage.get_connection_from_config(conf)),
                 hooks.TranslationHook()]

    pecan.configuration.set_config(dict(pecan_config), overwrite=True)

    app = pecan.make_app(
        pecan_config['app']['root'],
        hooks=app_hooks,
        wrap_app=middleware.ParsableErrorMiddleware,
        guess_content_type_from_ext=False
    )

    return app
コード例 #31
0
    def setUp(self):
        super(TestBase, self).setUp()
        db_url = os.environ.get(
            'AODH_TEST_STORAGE_URL',
            os.environ.get(
                "OVERTEST_URL", 'sqlite://').replace(
                    "mysql://", "mysql+pymysql://"))
        engine = urlparse.urlparse(db_url).scheme
        # In case some drivers have additional specification, for example:
        # PyMySQL will have scheme mysql+pymysql.
        engine = engine.split('+')[0]

        # NOTE(Alexei_987) Shortcut to skip expensive db setUp
        test_method = self._get_test_method()
        if (hasattr(test_method, '_run_with')
                and engine not in test_method._run_with):
            raise testcase.TestSkipped(
                'Test is not applicable for %s' % engine)

        conf = service.prepare_service(argv=[], config_files=[])
        self.CONF = self.useFixture(fixture_config.Config(conf)).conf
        self.CONF.set_override('connection', db_url, group="database",
                               enforce_type=True)

        manager = self.DRIVER_MANAGERS.get(engine)
        if not manager:
            self.skipTest("missing driver manager: %s" % engine)

        self.db_manager = manager(self.CONF)

        self.useFixture(self.db_manager)

        self.CONF.set_override('connection', self.db_manager.url,
                               group="database", enforce_type=True)

        self.alarm_conn = storage.get_connection_from_config(self.CONF)
        self.alarm_conn.upgrade()

        self.useFixture(mockpatch.Patch(
            'aodh.storage.get_connection_from_config',
            side_effect=self._get_connection))
コード例 #32
0
ファイル: __init__.py プロジェクト: sileht/aodh
    def start(self):
        super(AlarmEvaluationService, self).start()
        self.storage_conn = storage.get_connection_from_config(self.conf)
        self.partition_coordinator.start()
        self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME)

        # allow time for coordination if necessary
        delay_start = self.partition_coordinator.is_active()

        if self.evaluators:
            interval = self.conf.evaluation_interval
            self.tg.add_timer(
                interval,
                self._evaluate_assigned_alarms,
                initial_delay=interval if delay_start else None)
        if self.partition_coordinator.is_active():
            heartbeat_interval = min(self.conf.coordination.heartbeat,
                                     self.conf.evaluation_interval / 4)
            self.tg.add_timer(heartbeat_interval,
                              self.partition_coordinator.heartbeat)
        # Add a dummy thread to have wait() working
        self.tg.add_timer(604800, lambda: None)
コード例 #33
0
ファイル: storage.py プロジェクト: yi-cloud/aodh
def expirer():
    conf = service.prepare_service()

    if conf.database.alarm_history_time_to_live > 0:
        LOG.debug("Clearing expired alarm history data")
        conn = storage.get_connection_from_config(conf)
        max_count = conf.database.alarm_histories_delete_batch_size
        try:
            if max_count > 0:
                conn.clear_expired_alarm_history_data(
                    conf.database.alarm_history_time_to_live,
                    max_count)
            else:
                deleted = max_count = 100
                while deleted and deleted > 0:
                    deleted = conn.clear_expired_alarm_history_data(
                        conf.database.alarm_history_time_to_live,
                        max_count)
        except TypeError:
            LOG.warning("Storage driver does not support "
                        "'alarm_histories_delete_batch_size' config option.")
    else:
        LOG.info("Nothing to clean, database alarm history time to live "
                 "is disabled")
コード例 #34
0
 def stop_fixture(self):
     """Reset the config and remove data."""
     if self.conf:
         storage.get_connection_from_config(self.conf).clear()
         self.conf.reset()
     service.prepare_service = self.prepare_service
コード例 #35
0
 def _storage_conn(self):
     if not self.storage_conn:
         self.storage_conn = storage.get_connection_from_config(self.conf)
     return self.storage_conn
コード例 #36
0
ファイル: storage.py プロジェクト: chungg/aodh
def dbsync():
    service.prepare_service()
    storage.get_connection_from_config(cfg.CONF, 'alarm').upgrade()
コード例 #37
0
ファイル: __init__.py プロジェクト: ISCAS-VDI/aodh-base
 def _storage_conn(self):
     if not self.storage_conn:
         self.storage_conn = storage.get_connection_from_config(self.conf)
     return self.storage_conn
コード例 #38
0
ファイル: test_get_connection.py プロジェクト: openstack/aodh
 def test_only_default_url(self):
     self.CONF.set_override("connection", "log://", group="database",
                            enforce_type=True)
     conn = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(conn, impl_log.Connection)
コード例 #39
0
def dbsync():
    conf = service.prepare_service()
    storage.get_connection_from_config(conf).upgrade()
コード例 #40
0
ファイル: fixtures.py プロジェクト: pczerkas/aodh
 def stop_fixture(self):
     """Reset the config and remove data."""
     if self.conf:
         storage.get_connection_from_config(self.conf).clear()
         self.conf.reset()
     service.prepare_service = self.prepare_service
コード例 #41
0
ファイル: test_get_connection.py プロジェクト: openstack/aodh
 def test_get_connection(self):
     self.CONF.set_override('connection', 'log://localhost',
                            group='database', enforce_type=True)
     engine = storage.get_connection_from_config(self.CONF)
     self.assertIsInstance(engine, impl_log.Connection)
コード例 #42
0
def dbsync():
    conf = service.prepare_service()
    storage.get_connection_from_config(conf).upgrade()
コード例 #43
0
 def __init__(self, conf):
     super(EventAlarmEvaluationService, self).__init__()
     self.conf = conf
     self.storage_conn = storage.get_connection_from_config(self.conf)
     self.evaluator = event.EventAlarmEvaluator(self.conf)
コード例 #44
0
def conversion():
    args = get_parser().parse_args()
    conf = service.prepare_service([])
    conn = storage.get_connection_from_config(conf)
    combination_alarms = list(
        conn.get_alarms(alarm_type='combination',
                        alarm_id=args.alarm_id or None))
    count = 0
    for alarm in combination_alarms:
        new_name = 'From-combination: %s' % alarm.alarm_id
        n_alarm = list(conn.get_alarms(name=new_name, alarm_type='composite'))
        if n_alarm:
            LOG.warning(
                _LW('Alarm %(alarm)s has been already converted as '
                    'composite alarm: %(n_alarm_id)s, skipped.'), {
                        'alarm': alarm.alarm_id,
                        'n_alarm_id': n_alarm[0].alarm_id
                    })
            continue
        try:
            composite_rule = _generate_composite_rule(conn, alarm)
        except DependentAlarmNotFound as e:
            LOG.warning(
                _LW('The dependent alarm %(dep_alarm)s of alarm %'
                    '(com_alarm)s not found, skipped.'), {
                        'com_alarm': e.com_alarm_id,
                        'dep_alarm': e.dependent_alarm_id
                    })
            continue
        except UnsupportedSubAlarmType as e:
            LOG.warning(
                _LW('Alarm conversion from combination to composite '
                    'only support combination alarms depending '
                    'threshold alarms, the type of alarm %(alarm)s '
                    'is: %(type)s, skipped.'), {
                        'alarm': e.sub_alarm_id,
                        'type': e.sub_alarm_type
                    })
            continue
        new_alarm = models.Alarm(**alarm.as_dict())
        new_alarm.alarm_id = uuidutils.generate_uuid()
        new_alarm.name = new_name
        new_alarm.type = 'composite'
        new_alarm.description = ('composite alarm converted from combination '
                                 'alarm: %s' % alarm.alarm_id)
        new_alarm.rule = composite_rule
        new_alarm.timestamp = datetime.datetime.now()
        conn.create_alarm(new_alarm)
        LOG.info(
            _LI('End Converting combination alarm %(s_alarm)s to '
                'composite alarm %(d_alarm)s'), {
                    's_alarm': alarm.alarm_id,
                    'd_alarm': new_alarm.alarm_id
                })
        count += 1
    if args.delete_combination_alarm:
        for alarm in combination_alarms:
            LOG.info(_LI('Deleting the combination alarm %s...'),
                     alarm.alarm_id)
            conn.delete_alarm(alarm.alarm_id)
    LOG.info(
        _LI('%s combination alarms have been converted to composite '
            'alarms.'), count)