Example #1
0
class Connection(base.Connection):
    """Put the data into a SQLAlchemy database. """
    CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES,
                                      AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = base.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )

    def __init__(self, conf, url):
        # Set max_retries to 0, since oslo.db in certain cases may attempt
        # to retry making the db connection retried max_retries ^ 2 times
        # in failure case and db reconnection has already been implemented
        # in storage.__init__.get_connection_from_config function
        options = dict(conf.database.items())
        options['max_retries'] = 0
        # oslo.db doesn't support options defined by Aodh
        for opt in storage.OPTS:
            options.pop(opt.name, None)
        self._engine_facade = db_session.EngineFacade(url, **options)
        self.conf = conf

    def disconnect(self):
        self._engine_facade.get_engine().dispose()

    def _get_alembic_config(self):
        cfg = config.Config("%s/sqlalchemy/alembic/alembic.ini" %
                            os.path.dirname(__file__))
        cfg.set_main_option('sqlalchemy.url', self.conf.database.connection)
        return cfg

    def upgrade(self, nocreate=False):
        cfg = self._get_alembic_config()
        cfg.conf = self.conf
        if nocreate:
            command.upgrade(cfg, "head")
        else:
            engine = self._engine_facade.get_engine()
            ctxt = migration.MigrationContext.configure(engine.connect())
            current_version = ctxt.get_current_revision()
            if current_version is None:
                models.Base.metadata.create_all(engine)
                command.stamp(cfg, "head")
            else:
                command.upgrade(cfg, "head")

    def clear(self):
        engine = self._engine_facade.get_engine()
        for table in reversed(models.Base.metadata.sorted_tables):
            engine.execute(table.delete())
        engine.dispose()

    def _retrieve_data(self, filter_expr, orderby, limit, table):
        if limit == 0:
            return []

        session = self._engine_facade.get_session()
        engine = self._engine_facade.get_engine()
        query = session.query(table)
        transformer = sql_utils.QueryTransformer(table,
                                                 query,
                                                 dialect=engine.dialect.name)
        if filter_expr is not None:
            transformer.apply_filter(filter_expr)

        transformer.apply_options(orderby, limit)

        retrieve = {
            models.Alarm: self._retrieve_alarms,
            models.AlarmChange: self._retrieve_alarm_history
        }
        return retrieve[table](transformer.get_query())

    @staticmethod
    def _row_to_alarm_model(row):
        return alarm_api_models.Alarm(
            alarm_id=row.alarm_id,
            enabled=row.enabled,
            type=row.type,
            name=row.name,
            description=row.description,
            timestamp=row.timestamp,
            user_id=row.user_id,
            project_id=row.project_id,
            state=row.state,
            state_timestamp=row.state_timestamp,
            ok_actions=row.ok_actions,
            alarm_actions=row.alarm_actions,
            insufficient_data_actions=(row.insufficient_data_actions),
            rule=row.rule,
            time_constraints=row.time_constraints,
            repeat_actions=row.repeat_actions,
            severity=row.severity)

    def _retrieve_alarms(self, query):
        return (self._row_to_alarm_model(x) for x in query.all())

    def get_alarms(self,
                   name=None,
                   user=None,
                   state=None,
                   meter=None,
                   project=None,
                   enabled=None,
                   alarm_id=None,
                   alarm_type=None,
                   severity=None,
                   exclude=None):
        """Yields a lists of alarms that match filters.

        :param name: Optional name for alarm.
        :param user: Optional ID for user that owns the resource.
        :param state: Optional string for alarm state.
        :param meter: Optional string for alarms associated with meter.
        :param project: Optional ID for project that owns the resource.
        :param enabled: Optional boolean to list disable alarm.
        :param alarm_id: Optional alarm_id to return one alarm.
        :param alarm_type: Optional alarm type.
        :param severity: Optional alarm severity.
        :param exclude: Optional dict for inequality constraint.
        """

        session = self._engine_facade.get_session()
        query = session.query(models.Alarm)
        if name is not None:
            query = query.filter(models.Alarm.name == name)
        if enabled is not None:
            query = query.filter(models.Alarm.enabled == enabled)
        if user is not None:
            query = query.filter(models.Alarm.user_id == user)
        if project is not None:
            query = query.filter(models.Alarm.project_id == project)
        if alarm_id is not None:
            query = query.filter(models.Alarm.alarm_id == alarm_id)
        if state is not None:
            query = query.filter(models.Alarm.state == state)
        if alarm_type is not None:
            query = query.filter(models.Alarm.type == alarm_type)
        if severity is not None:
            query = query.filter(models.Alarm.severity == severity)
        if exclude is not None:
            for key, value in six.iteritems(exclude):
                query = query.filter(getattr(models.Alarm, key) != value)

        query = query.order_by(desc(models.Alarm.timestamp))
        alarms = self._retrieve_alarms(query)

        # TODO(cmart): improve this by using sqlalchemy.func factory
        if meter is not None:
            alarms = filter(
                lambda row: row.rule.get('meter_name', None) == meter, alarms)

        return alarms

    def create_alarm(self, alarm):
        """Create an alarm.

        :param alarm: The alarm to create.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            alarm_row = models.Alarm(alarm_id=alarm.alarm_id)
            alarm_row.update(alarm.as_dict())
            session.add(alarm_row)

        return self._row_to_alarm_model(alarm_row)

    def update_alarm(self, alarm):
        """Update an alarm.

        :param alarm: the new Alarm to update
        """
        session = self._engine_facade.get_session()
        with session.begin():
            alarm_row = session.merge(models.Alarm(alarm_id=alarm.alarm_id))
            alarm_row.update(alarm.as_dict())

        return self._row_to_alarm_model(alarm_row)

    def delete_alarm(self, alarm_id):
        """Delete an alarm and its history data.

        :param alarm_id: ID of the alarm to delete
        """
        session = self._engine_facade.get_session()
        with session.begin():
            session.query(models.Alarm).filter(
                models.Alarm.alarm_id == alarm_id).delete()
            # FIXME(liusheng): we should use delete cascade
            session.query(models.AlarmChange).filter(
                models.AlarmChange.alarm_id == alarm_id).delete()

    @staticmethod
    def _row_to_alarm_change_model(row):
        return alarm_api_models.AlarmChange(event_id=row.event_id,
                                            alarm_id=row.alarm_id,
                                            type=row.type,
                                            detail=row.detail,
                                            user_id=row.user_id,
                                            project_id=row.project_id,
                                            on_behalf_of=row.on_behalf_of,
                                            timestamp=row.timestamp)

    def query_alarms(self, filter_expr=None, orderby=None, limit=None):
        """Yields a lists of alarms that match filter."""
        return self._retrieve_data(filter_expr, orderby, limit, models.Alarm)

    def _retrieve_alarm_history(self, query):
        return (self._row_to_alarm_change_model(x) for x in query.all())

    def query_alarm_history(self, filter_expr=None, orderby=None, limit=None):
        """Return an iterable of model.AlarmChange objects."""
        return self._retrieve_data(filter_expr, orderby, limit,
                                   models.AlarmChange)

    def get_alarm_changes(self,
                          alarm_id,
                          on_behalf_of,
                          user=None,
                          project=None,
                          alarm_type=None,
                          severity=None,
                          start_timestamp=None,
                          start_timestamp_op=None,
                          end_timestamp=None,
                          end_timestamp_op=None):
        """Yields list of AlarmChanges describing alarm history

        Changes are always sorted in reverse order of occurrence, given
        the importance of currency.

        Segregation for non-administrative users is done on the basis
        of the on_behalf_of parameter. This allows such users to have
        visibility on both the changes initiated by themselves directly
        (generally creation, rule changes, or deletion) and also on those
        changes initiated on their behalf by the alarming service (state
        transitions after alarm thresholds are crossed).

        :param alarm_id: ID of alarm to return changes for
        :param on_behalf_of: ID of tenant to scope changes query (None for
                             administrative user, indicating all projects)
        :param user: Optional ID of user to return changes for
        :param project: Optional ID of project to return changes for
        :param alarm_type: Optional change type
        :param severity: Optional alarm severity
        :param start_timestamp: Optional modified timestamp start range
        :param start_timestamp_op: Optional timestamp start range operation
        :param end_timestamp: Optional modified timestamp end range
        :param end_timestamp_op: Optional timestamp end range operation
        """
        session = self._engine_facade.get_session()
        query = session.query(models.AlarmChange)
        query = query.filter(models.AlarmChange.alarm_id == alarm_id)

        if on_behalf_of is not None:
            query = query.filter(
                models.AlarmChange.on_behalf_of == on_behalf_of)
        if user is not None:
            query = query.filter(models.AlarmChange.user_id == user)
        if project is not None:
            query = query.filter(models.AlarmChange.project_id == project)
        if alarm_type is not None:
            query = query.filter(models.AlarmChange.type == alarm_type)
        if severity is not None:
            query = query.filter(models.AlarmChange.severity == severity)
        if start_timestamp:
            if start_timestamp_op == 'gt':
                query = query.filter(
                    models.AlarmChange.timestamp > start_timestamp)
            else:
                query = query.filter(
                    models.AlarmChange.timestamp >= start_timestamp)
        if end_timestamp:
            if end_timestamp_op == 'le':
                query = query.filter(
                    models.AlarmChange.timestamp <= end_timestamp)
            else:
                query = query.filter(
                    models.AlarmChange.timestamp < end_timestamp)

        query = query.order_by(desc(models.AlarmChange.timestamp))
        return self._retrieve_alarm_history(query)

    def record_alarm_change(self, alarm_change):
        """Record alarm change event."""
        session = self._engine_facade.get_session()
        with session.begin():
            alarm_change_row = models.AlarmChange(
                event_id=alarm_change['event_id'])
            alarm_change_row.update(alarm_change)
            session.add(alarm_change_row)

    def clear_expired_alarm_history_data(self, alarm_history_ttl):
        """Clear expired alarm history data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param alarm_history_ttl: Number of seconds to keep alarm history
                                  records for.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            valid_start = (timeutils.utcnow() -
                           datetime.timedelta(seconds=alarm_history_ttl))
            deleted_rows = (session.query(models.AlarmChange).filter(
                models.AlarmChange.timestamp < valid_start).delete())
            LOG.info(_LI("%d alarm histories are removed from database"),
                     deleted_rows)
Example #2
0
class Connection(hbase_base.Connection, base.Connection):
    """Put the alarm data into a HBase database

    Collections:

    - alarm:

      - row_key: uuid of alarm
      - Column Families:

        f: contains the raw incoming alarm data

    - alarm_h:

      - row_key: uuid of alarm + ":" + reversed timestamp
      - Column Families:

        f: raw incoming alarm_history data. Timestamp becomes now()
          if not determined
    """

    CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES,
                                      AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = base.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )
    _memory_instance = None

    ALARM_TABLE = "alarm"
    ALARM_HISTORY_TABLE = "alarm_h"

    def upgrade(self):
        tables = [self.ALARM_HISTORY_TABLE, self.ALARM_TABLE]
        column_families = {'f': dict()}
        with self.conn_pool.connection() as conn:
            hbase_utils.create_tables(conn, tables, column_families)
            hbase_migration.migrate_tables(conn, tables)

    def clear(self):
        LOG.debug('Dropping HBase schema...')
        with self.conn_pool.connection() as conn:
            for table in [self.ALARM_TABLE, self.ALARM_HISTORY_TABLE]:
                try:
                    conn.disable_table(table)
                except Exception:
                    LOG.debug('Cannot disable table but ignoring error')
                try:
                    conn.delete_table(table)
                except Exception:
                    LOG.debug('Cannot delete table but ignoring error')

    def update_alarm(self, alarm, upsert=False):
        """Create an alarm.

        :param alarm: The alarm to create. It is Alarm object, so we need to
          call as_dict()
        """
        _id = alarm.alarm_id
        alarm_to_store = hbase_utils.serialize_entry(alarm.as_dict())
        with self.conn_pool.connection() as conn:
            alarm_table = conn.table(self.ALARM_TABLE)
            if not upsert:
                q = hbase_utils.make_query(alarm_id=alarm.alarm_id)
                query_alarm = alarm_table.scan(filter=q)
                if len(list(query_alarm)) == 0:
                    raise storage.AlarmNotFound(alarm.alarm_id)
            alarm_table.put(_id, alarm_to_store)
            stored_alarm = hbase_utils.deserialize_entry(alarm_table.row(_id))
        return models.Alarm(**stored_alarm)

    def create_alarm(self, alarm):
        return self.update_alarm(alarm, upsert=True)

    def delete_alarm(self, alarm_id):
        """Delete an alarm and its history data."""
        with self.conn_pool.connection() as conn:
            alarm_table = conn.table(self.ALARM_TABLE)
            alarm_table.delete(alarm_id)
            q = hbase_utils.make_query(alarm_id=alarm_id)
            alarm_history_table = conn.table(self.ALARM_HISTORY_TABLE)
            for alarm_id, ignored in alarm_history_table.scan(filter=q):
                alarm_history_table.delete(alarm_id)

    def get_alarms(self,
                   name=None,
                   user=None,
                   state=None,
                   meter=None,
                   project=None,
                   enabled=None,
                   alarm_id=None,
                   alarm_type=None,
                   severity=None,
                   exclude=None,
                   pagination=None):
        if pagination:
            raise aodh.NotImplementedError('Pagination query not implemented')
        if meter:
            raise aodh.NotImplementedError('Filter by meter not implemented')

        q = hbase_utils.make_query(alarm_id=alarm_id,
                                   name=name,
                                   enabled=enabled,
                                   user_id=user,
                                   project_id=project,
                                   state=state,
                                   type=alarm_type,
                                   severity=severity,
                                   exclude=exclude)

        with self.conn_pool.connection() as conn:
            alarm_table = conn.table(self.ALARM_TABLE)
            gen = alarm_table.scan(filter=q)
            alarms = [
                hbase_utils.deserialize_entry(data) for ignored, data in gen
            ]
            for alarm in sorted(alarms,
                                key=operator.itemgetter('timestamp'),
                                reverse=True):
                yield models.Alarm(**alarm)

    def get_alarm_changes(self,
                          alarm_id,
                          on_behalf_of,
                          user=None,
                          project=None,
                          alarm_type=None,
                          severity=None,
                          start_timestamp=None,
                          start_timestamp_op=None,
                          end_timestamp=None,
                          end_timestamp_op=None,
                          pagination=None):
        if pagination:
            raise aodh.NotImplementedError('Pagination query not implemented')
        q = hbase_utils.make_query(alarm_id=alarm_id,
                                   on_behalf_of=on_behalf_of,
                                   type=alarm_type,
                                   user_id=user,
                                   project_id=project,
                                   severity=severity)
        start_row, end_row = hbase_utils.make_timestamp_query(
            hbase_utils.make_general_rowkey_scan,
            start=start_timestamp,
            start_op=start_timestamp_op,
            end=end_timestamp,
            end_op=end_timestamp_op,
            bounds_only=True,
            some_id=alarm_id)
        with self.conn_pool.connection() as conn:
            alarm_history_table = conn.table(self.ALARM_HISTORY_TABLE)
            gen = alarm_history_table.scan(filter=q,
                                           row_start=start_row,
                                           row_stop=end_row)
            for ignored, data in gen:
                stored_entry = hbase_utils.deserialize_entry(data)
                yield models.AlarmChange(**stored_entry)

    def record_alarm_change(self, alarm_change):
        """Record alarm change event."""
        alarm_change_dict = hbase_utils.serialize_entry(alarm_change)
        ts = alarm_change.get('timestamp') or datetime.datetime.now()
        rts = hbase_utils.timestamp(ts)
        with self.conn_pool.connection() as conn:
            alarm_history_table = conn.table(self.ALARM_HISTORY_TABLE)
            alarm_history_table.put(
                hbase_utils.prepare_key(alarm_change.get('alarm_id'), rts),
                alarm_change_dict)
Example #3
0
class Connection(base.Connection):
    """Put the data into a SQLAlchemy database. """
    CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES,
                                      AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = base.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )

    def __init__(self, conf, url):
        # Set max_retries to 0, since oslo.db in certain cases may attempt
        # to retry making the db connection retried max_retries ^ 2 times
        # in failure case and db reconnection has already been implemented
        # in storage.__init__.get_connection_from_config function
        options = dict(conf.database.items())
        options['max_retries'] = 0
        # oslo.db doesn't support options defined by Aodh
        for opt in storage.OPTS:
            options.pop(opt.name, None)
        self._engine_facade = enginefacade.LegacyEngineFacade(
            self.dress_url(url), **options)

        if osprofiler_sqlalchemy:
            osprofiler_sqlalchemy.add_tracing(sqlalchemy,
                                              self._engine_facade.get_engine(),
                                              'db')
        self.conf = conf

    @staticmethod
    def dress_url(url):
        # If no explicit driver has been set, we default to pymysql
        if url.startswith("mysql://"):
            url = sqlalchemy_url.make_url(url)
            url = url.set(drivername="mysql+pymysql")
            return str(url)
        return url

    def disconnect(self):
        self._engine_facade.get_engine().dispose()

    def _get_alembic_config(self):
        cfg = config.Config("%s/sqlalchemy/alembic/alembic.ini" %
                            os.path.dirname(__file__))
        cfg.set_main_option('sqlalchemy.url',
                            self.conf.database.connection.replace("%", "%%"))
        return cfg

    def upgrade(self, nocreate=False):
        cfg = self._get_alembic_config()
        cfg.conf = self.conf
        if nocreate:
            command.upgrade(cfg, "head")
        else:
            engine = self._engine_facade.get_engine()
            ctxt = migration.MigrationContext.configure(engine.connect())
            current_version = ctxt.get_current_revision()
            if current_version is None:
                models.Base.metadata.create_all(engine, checkfirst=False)
                command.stamp(cfg, "head")
            else:
                command.upgrade(cfg, "head")

    def clear(self):
        engine = self._engine_facade.get_engine()
        for table in reversed(models.Base.metadata.sorted_tables):
            engine.execute(table.delete())
        engine.dispose()

    def _retrieve_data(self, filter_expr, orderby, limit, table):
        if limit == 0:
            return []

        session = self._engine_facade.get_session()
        engine = self._engine_facade.get_engine()
        query = session.query(table)
        transformer = sql_utils.QueryTransformer(table,
                                                 query,
                                                 dialect=engine.dialect.name)
        if filter_expr is not None:
            transformer.apply_filter(filter_expr)

        transformer.apply_options(orderby, limit)

        retrieve = {
            models.Alarm: self._retrieve_alarms,
            models.AlarmChange: self._retrieve_alarm_history
        }
        return retrieve[table](transformer.get_query())

    @staticmethod
    def _row_to_alarm_model(row):
        return alarm_api_models.Alarm(
            alarm_id=row.alarm_id,
            enabled=row.enabled,
            type=row.type,
            name=row.name,
            description=row.description,
            timestamp=row.timestamp,
            user_id=row.user_id,
            project_id=row.project_id,
            state=row.state,
            state_timestamp=row.state_timestamp,
            state_reason=row.state_reason,
            ok_actions=row.ok_actions,
            alarm_actions=row.alarm_actions,
            insufficient_data_actions=(row.insufficient_data_actions),
            rule=row.rule,
            time_constraints=row.time_constraints,
            repeat_actions=row.repeat_actions,
            severity=row.severity,
            evaluate_timestamp=row.evaluate_timestamp)

    def _retrieve_alarms(self, query):
        return [self._row_to_alarm_model(x) for x in query.all()]

    @staticmethod
    def _get_pagination_query(session, query, pagination, api_model, model):
        if not pagination.get('sort'):
            pagination['sort'] = api_model.DEFAULT_SORT
        marker = None
        if pagination.get('marker'):
            key_attr = getattr(model, api_model.PRIMARY_KEY)
            marker_query = copy.copy(query)
            marker_query = marker_query.filter(
                key_attr == pagination['marker'])
            try:
                marker = marker_query.limit(1).one()
            except exc.NoResultFound:
                raise storage.InvalidMarker('Marker %s not found.' %
                                            pagination['marker'])
        limit = pagination.get('limit')
        # we sort by "severity" by its semantic than its alphabetical
        # order when "severity" specified in sorts.
        for sort_key, sort_dir in pagination['sort'][::-1]:
            if sort_key == 'severity':
                engine = session.connection()
                if engine.dialect.name != "mysql":
                    raise aodh.NotImplementedError
                sort_dir_func = {'asc': asc, 'desc': desc}[sort_dir]
                query = query.order_by(
                    sort_dir_func(
                        func.field(getattr(model, sort_key), 'low', 'moderate',
                                   'critical')))
                pagination['sort'].remove((sort_key, sort_dir))

        sort_keys = [s[0] for s in pagination['sort']]
        sort_dirs = [s[1] for s in pagination['sort']]
        return oslo_sql_utils.paginate_query(query,
                                             model,
                                             limit,
                                             sort_keys,
                                             sort_dirs=sort_dirs,
                                             marker=marker)

    def get_alarms(self, meter=None, pagination=None, **kwargs):
        """Yields a lists of alarms that match filters."""
        pagination = pagination or {}
        session = self._engine_facade.get_session()
        query = session.query(models.Alarm)
        query = apply_filters(query, models.Alarm, **kwargs)
        query = self._get_pagination_query(session, query, pagination,
                                           alarm_api_models.Alarm,
                                           models.Alarm)

        alarms = self._retrieve_alarms(query)

        # TODO(cmart): improve this by using sqlalchemy.func factory
        if meter is not None:
            alarms = filter(
                lambda row: row.rule.get('meter_name', None) == meter, alarms)

        return alarms

    def create_alarm(self, alarm):
        """Create an alarm.

        :param alarm: The alarm to create.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            alarm_row = models.Alarm(alarm_id=alarm.alarm_id)
            alarm_row.update(alarm.as_dict())
            session.add(alarm_row)

        return self._row_to_alarm_model(alarm_row)

    def update_alarm(self, alarm):
        """Update an alarm.

        :param alarm: the new Alarm to update
        """
        session = self._engine_facade.get_session()
        with session.begin():
            count = session.query(models.Alarm).filter(
                models.Alarm.alarm_id == alarm.alarm_id).update(
                    alarm.as_dict())
            if not count:
                raise storage.AlarmNotFound(alarm.alarm_id)
        return alarm

    def delete_alarm(self, alarm_id):
        """Delete an alarm and its history data.

        :param alarm_id: ID of the alarm to delete
        """
        session = self._engine_facade.get_session()
        with session.begin():
            session.query(models.Alarm).filter(
                models.Alarm.alarm_id == alarm_id).delete()
            # FIXME(liusheng): we should use delete cascade
            session.query(models.AlarmChange).filter(
                models.AlarmChange.alarm_id == alarm_id).delete()

    @staticmethod
    def _row_to_alarm_change_model(row):
        return alarm_api_models.AlarmChange(event_id=row.event_id,
                                            alarm_id=row.alarm_id,
                                            type=row.type,
                                            detail=row.detail,
                                            user_id=row.user_id,
                                            project_id=row.project_id,
                                            on_behalf_of=row.on_behalf_of,
                                            timestamp=row.timestamp,
                                            severity=row.severity)

    def query_alarms(self, filter_expr=None, orderby=None, limit=None):
        """Yields a lists of alarms that match filter."""
        return self._retrieve_data(filter_expr, orderby, limit, models.Alarm)

    def _retrieve_alarm_history(self, query):
        return (self._row_to_alarm_change_model(x) for x in query.all())

    def query_alarm_history(self, filter_expr=None, orderby=None, limit=None):
        """Return an iterable of model.AlarmChange objects."""
        return self._retrieve_data(filter_expr, orderby, limit,
                                   models.AlarmChange)

    def get_alarm_changes(self,
                          alarm_id,
                          on_behalf_of,
                          user=None,
                          project=None,
                          alarm_type=None,
                          severity=None,
                          start_timestamp=None,
                          start_timestamp_op=None,
                          end_timestamp=None,
                          end_timestamp_op=None,
                          pagination=None):
        """Yields list of AlarmChanges describing alarm history

        Changes are always sorted in reverse order of occurrence, given
        the importance of currency.

        Segregation for non-administrative users is done on the basis
        of the on_behalf_of parameter. This allows such users to have
        visibility on both the changes initiated by themselves directly
        (generally creation, rule changes, or deletion) and also on those
        changes initiated on their behalf by the alarming service (state
        transitions after alarm thresholds are crossed).

        :param alarm_id: ID of alarm to return changes for
        :param on_behalf_of: ID of tenant to scope changes query (None for
                             administrative user, indicating all projects)
        :param user: Optional ID of user to return changes for
        :param project: Optional ID of project to return changes for
        :param alarm_type: Optional change type
        :param severity: Optional alarm severity
        :param start_timestamp: Optional modified timestamp start range
        :param start_timestamp_op: Optional timestamp start range operation
        :param end_timestamp: Optional modified timestamp end range
        :param end_timestamp_op: Optional timestamp end range operation
        :param pagination: Pagination query parameters.
        """
        pagination = pagination or {}
        session = self._engine_facade.get_session()
        query = session.query(models.AlarmChange)
        query = query.filter(models.AlarmChange.alarm_id == alarm_id)

        if on_behalf_of is not None:
            query = query.filter(
                models.AlarmChange.on_behalf_of == on_behalf_of)
        if user is not None:
            query = query.filter(models.AlarmChange.user_id == user)
        if project is not None:
            query = query.filter(models.AlarmChange.project_id == project)
        if alarm_type is not None:
            query = query.filter(models.AlarmChange.type == alarm_type)
        if severity is not None:
            query = query.filter(models.AlarmChange.severity == severity)
        if start_timestamp:
            if start_timestamp_op == 'gt':
                query = query.filter(
                    models.AlarmChange.timestamp > start_timestamp)
            else:
                query = query.filter(
                    models.AlarmChange.timestamp >= start_timestamp)
        if end_timestamp:
            if end_timestamp_op == 'le':
                query = query.filter(
                    models.AlarmChange.timestamp <= end_timestamp)
            else:
                query = query.filter(
                    models.AlarmChange.timestamp < end_timestamp)

        query = self._get_pagination_query(session, query, pagination,
                                           alarm_api_models.AlarmChange,
                                           models.AlarmChange)
        return self._retrieve_alarm_history(query)

    def record_alarm_change(self, alarm_change):
        """Record alarm change event."""
        session = self._engine_facade.get_session()
        with session.begin():
            alarm_change_row = models.AlarmChange(
                event_id=alarm_change['event_id'])
            alarm_change_row.update(alarm_change)
            session.add(alarm_change_row)

    def clear_expired_alarm_history_data(self, ttl, max_count=100):
        """Clear expired alarm history data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param ttl: Number of seconds to keep alarm history records for.
        :param max_count: Number of records to delete.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            alarm_history_q = (session.query(
                models.AlarmChange.event_id).filter(
                    models.AlarmChange.timestamp < end))
            event_ids = [i[0] for i in alarm_history_q.limit(max_count)]
            deleted_rows = session.query(models.AlarmChange).filter(
                models.AlarmChange.event_id.in_(event_ids)).delete(
                    synchronize_session="fetch")
            LOG.info("%d alarm histories are removed from database",
                     deleted_rows)

    def conditional_update(self, model, values, expected_values, filters=None):
        """Compare-and-swap conditional update SQLAlchemy implementation."""
        filters = filters or {}
        filters.update(expected_values)

        session = self._engine_facade.get_session()
        query = session.query(model)
        if filters:
            query = query.filter_by(**filters)

        update_args = {'synchronize_session': False}

        result = query.update(values, **update_args)
        return 0 != result

    @staticmethod
    def _row_to_quota_model(row):
        return alarm_api_models.Quota(
            project_id=row.project_id,
            resource=row.resource,
            limit=row.limit,
        )

    def _retrieve_quotas(self, query):
        return [self._row_to_quota_model(x) for x in query.all()]

    def get_quotas(self, project_id):
        """Get resource quota for the given project."""
        filters = {'project_id': project_id}
        session = self._engine_facade.get_session()
        query = session.query(models.Quota).filter_by(**filters)
        return self._retrieve_quotas(query)

    def set_quotas(self, project_id, quotas):
        """Set resource quota for the given user."""
        session = self._engine_facade.get_session()

        with session.begin():
            for q in quotas:
                values = {
                    'project_id': project_id,
                    'resource': q['resource'],
                }

                quota = session.query(models.Quota).filter_by(**values).first()
                if not quota:
                    new_quota = models.Quota(project_id=project_id,
                                             resource=q['resource'],
                                             limit=q['limit'])
                    session.add(new_quota)
                else:
                    values['limit'] = q['limit']
                    quota.update(values.copy())

        filters = {'project_id': project_id}
        query = session.query(models.Quota).filter_by(**filters)
        return self._retrieve_quotas(query)

    def delete_quotas(self, project_id):
        filters = {'project_id': project_id}
        session = self._engine_facade.get_session()
        session.query(models.Quota).filter_by(**filters).delete()
Example #4
0
class Connection(base.Connection):
    """Base Alarm Connection class for MongoDB driver."""
    CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES,
                                      COMMON_AVAILABLE_CAPABILITIES)

    STORAGE_CAPABILITIES = base.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )

    def upgrade(self):
        # create collection if not present
        if 'alarm' not in self.db.conn.collection_names():
            self.db.conn.create_collection('alarm')
        if 'alarm_history' not in self.db.conn.collection_names():
            self.db.conn.create_collection('alarm_history')

    def update_alarm(self, alarm):
        """Update alarm."""
        data = alarm.as_dict()

        self.db.alarm.update(
            {'alarm_id': alarm.alarm_id},
            {'$set': data},
            upsert=True)

        stored_alarm = self.db.alarm.find({'alarm_id': alarm.alarm_id})[0]
        del stored_alarm['_id']
        self._ensure_encapsulated_rule_format(stored_alarm)
        self._ensure_time_constraints(stored_alarm)
        return models.Alarm(**stored_alarm)

    create_alarm = update_alarm

    def delete_alarm(self, alarm_id):
        """Delete an alarm and its history data."""
        self.db.alarm.remove({'alarm_id': alarm_id})
        self.db.alarm_history.remove({'alarm_id': alarm_id})

    def record_alarm_change(self, alarm_change):
        """Record alarm change event."""
        self.db.alarm_history.insert(alarm_change.copy())

    def get_alarms(self, name=None, user=None, state=None, meter=None,
                   project=None, enabled=None, alarm_id=None,
                   alarm_type=None, severity=None, exclude=None):
        """Yields a lists of alarms that match filters.

        :param name: Optional name for alarm.
        :param user: Optional ID for user that owns the resource.
        :param state: Optional string for alarm state.
        :param meter: Optional string for alarms associated with meter.
        :param project: Optional ID for project that owns the resource.
        :param enabled: Optional boolean to list disable alarm.
        :param alarm_id: Optional alarm_id to return one alarm.
        :param alarm_type: Optional alarm type.
        :param severity: Optional alarm severity.
        :param exclude: Optional dict for inequality constraint.
        """
        q = {}
        if user is not None:
            q['user_id'] = user
        if project is not None:
            q['project_id'] = project
        if name is not None:
            q['name'] = name
        if enabled is not None:
            q['enabled'] = enabled
        if alarm_id is not None:
            q['alarm_id'] = alarm_id
        if state is not None:
            q['state'] = state
        if meter is not None:
            q['rule.meter_name'] = meter
        if alarm_type is not None:
            q['type'] = alarm_type
        if severity is not None:
            q['severity'] = severity
        if exclude is not None:
            for key, value in six.iteritems(exclude):
                q[key] = {'$ne': value}

        return self._retrieve_alarms(q,
                                     [("timestamp",
                                       pymongo.DESCENDING)],
                                     None)

    def get_alarm_changes(self, alarm_id, on_behalf_of,
                          user=None, project=None, alarm_type=None,
                          severity=None, start_timestamp=None,
                          start_timestamp_op=None, end_timestamp=None,
                          end_timestamp_op=None):
        """Yields list of AlarmChanges describing alarm history

        Changes are always sorted in reverse order of occurrence, given
        the importance of currency.

        Segregation for non-administrative users is done on the basis
        of the on_behalf_of parameter. This allows such users to have
        visibility on both the changes initiated by themselves directly
        (generally creation, rule changes, or deletion) and also on those
        changes initiated on their behalf by the alarming service (state
        transitions after alarm thresholds are crossed).

        :param alarm_id: ID of alarm to return changes for
        :param on_behalf_of: ID of tenant to scope changes query (None for
                             administrative user, indicating all projects)
        :param user: Optional ID of user to return changes for
        :param project: Optional ID of project to return changes for
        :param alarm_type: Optional change type
        :param severity: Optional change severity
        :param start_timestamp: Optional modified timestamp start range
        :param start_timestamp_op: Optional timestamp start range operation
        :param end_timestamp: Optional modified timestamp end range
        :param end_timestamp_op: Optional timestamp end range operation
        """
        q = dict(alarm_id=alarm_id)
        if on_behalf_of is not None:
            q['on_behalf_of'] = on_behalf_of
        if user is not None:
            q['user_id'] = user
        if project is not None:
            q['project_id'] = project
        if alarm_type is not None:
            q['type'] = alarm_type
        if severity is not None:
            q['severity'] = severity
        if start_timestamp or end_timestamp:
            ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
                                                          end_timestamp,
                                                          start_timestamp_op,
                                                          end_timestamp_op)
            if ts_range:
                q['timestamp'] = ts_range

        return self._retrieve_alarm_changes(q,
                                            [("timestamp",
                                              pymongo.DESCENDING)],
                                            None)

    def query_alarms(self, filter_expr=None, orderby=None, limit=None):
        """Return an iterable of model.Alarm objects."""
        return self._retrieve_data(filter_expr, orderby, limit,
                                   models.Alarm)

    def query_alarm_history(self, filter_expr=None, orderby=None, limit=None):
        """Return an iterable of model.AlarmChange objects."""
        return self._retrieve_data(filter_expr,
                                   orderby,
                                   limit,
                                   models.AlarmChange)

    def _retrieve_data(self, filter_expr, orderby, limit, model):
        if limit == 0:
            return []
        query_filter = {}
        orderby_filter = [("timestamp", pymongo.DESCENDING)]
        transformer = pymongo_utils.QueryTransformer()
        if orderby is not None:
            orderby_filter = transformer.transform_orderby(orderby)
        if filter_expr is not None:
            query_filter = transformer.transform_filter(filter_expr)

        retrieve = {models.Alarm: self._retrieve_alarms,
                    models.AlarmChange: self._retrieve_alarm_changes}
        return retrieve[model](query_filter, orderby_filter, limit)

    def _retrieve_alarms(self, query_filter, orderby, limit):
        if limit is not None:
            alarms = self.db.alarm.find(query_filter,
                                        limit=limit,
                                        sort=orderby)
        else:
            alarms = self.db.alarm.find(query_filter, sort=orderby)

        for alarm in alarms:
            a = {}
            a.update(alarm)
            del a['_id']
            self._ensure_encapsulated_rule_format(a)
            self._ensure_time_constraints(a)
            yield models.Alarm(**a)

    def _retrieve_alarm_changes(self, query_filter, orderby, limit):
        if limit is not None:
            alarms_history = self.db.alarm_history.find(query_filter,
                                                        limit=limit,
                                                        sort=orderby)
        else:
            alarms_history = self.db.alarm_history.find(
                query_filter, sort=orderby)

        for alarm_history in alarms_history:
            ah = {}
            ah.update(alarm_history)
            del ah['_id']
            yield models.AlarmChange(**ah)

    @classmethod
    def _ensure_encapsulated_rule_format(cls, alarm):
        """Ensure the alarm returned by the storage have the correct format.

        The previous format looks like:
        {
            'alarm_id': '0ld-4l3rt',
            'enabled': True,
            'name': 'old-alert',
            'description': 'old-alert',
            'timestamp': None,
            'meter_name': 'cpu',
            'user_id': 'me',
            'project_id': 'and-da-boys',
            'comparison_operator': 'lt',
            'threshold': 36,
            'statistic': 'count',
            'evaluation_periods': 1,
            'period': 60,
            'state': "insufficient data",
            'state_timestamp': None,
            'ok_actions': [],
            'alarm_actions': ['http://nowhere/alarms'],
            'insufficient_data_actions': [],
            'repeat_actions': False,
            'matching_metadata': {'key': 'value'}
            # or 'matching_metadata': [{'key': 'key', 'value': 'value'}]
        }
        """

        if isinstance(alarm.get('rule'), dict):
            return

        alarm['type'] = 'threshold'
        alarm['rule'] = {}
        alarm['matching_metadata'] = cls._decode_matching_metadata(
            alarm['matching_metadata'])
        for field in ['period', 'evaluation_periods', 'threshold',
                      'statistic', 'comparison_operator', 'meter_name']:
            if field in alarm:
                alarm['rule'][field] = alarm[field]
                del alarm[field]

        query = []
        for key in alarm['matching_metadata']:
            query.append({'field': key,
                          'op': 'eq',
                          'value': alarm['matching_metadata'][key],
                          'type': 'string'})
        del alarm['matching_metadata']
        alarm['rule']['query'] = query

    @staticmethod
    def _decode_matching_metadata(matching_metadata):
        if isinstance(matching_metadata, dict):
            # note(sileht): keep compatibility with alarm
            # with matching_metadata as a dict
            return matching_metadata
        else:
            new_matching_metadata = {}
            for elem in matching_metadata:
                new_matching_metadata[elem['key']] = elem['value']
            return new_matching_metadata

    @staticmethod
    def _ensure_time_constraints(alarm):
        """Ensures the alarm has a time constraints field."""
        if 'time_constraints' not in alarm:
            alarm['time_constraints'] = []