def get_metrics(self, uuids): if not uuids: return [] session = self.engine_facade.get_session() query = session.query(Metric).filter(Metric.id.in_(uuids)).options( sqlalchemy.orm.joinedload('resource')) metrics = list(query.all()) session.expunge_all() return metrics
def get_resource(self, resource_type, resource_id, with_metrics=False): resource_cls = self._resource_type_to_class(resource_type) session = self.engine_facade.get_session() q = session.query( resource_cls).filter( resource_cls.id == resource_id) if with_metrics: q = q.options(sqlalchemy.orm.joinedload('metrics')) r = q.first() session.expunge_all() return r
def get_metrics(self, uuids, active_only=True, with_resource=False): if not uuids: return [] session = self.engine_facade.get_session() query = session.query(Metric).filter(Metric.id.in_(uuids)) if active_only: query = query.filter(Metric.status == 'active') if with_resource: query = query.options(sqlalchemy.orm.joinedload('resource')) metrics = list(query.all()) session.expunge_all() return metrics
def create_archive_policy_rule(self, name, metric_pattern, archive_policy_name): apr = ArchivePolicyRule( name=name, archive_policy_name=archive_policy_name, metric_pattern=metric_pattern ) session = self.engine_facade.get_session() session.add(apr) try: session.flush() except exception.DBDuplicateEntry: raise indexer.ArchivePolicyRuleAlreadyExists(name) session.expunge_all() return apr
def create_archive_policy(self, archive_policy): ap = ArchivePolicy( name=archive_policy.name, back_window=archive_policy.back_window, definition=archive_policy.definition, aggregation_methods=list(archive_policy.aggregation_methods), ) session = self.engine_facade.get_session() session.add(ap) try: session.flush() except exception.DBDuplicateEntry: raise indexer.ArchivePolicyAlreadyExists(archive_policy.name) session.expunge_all() return ap
def list_metrics(self, user_id=None, project_id=None, details=False, **kwargs): session = self.engine_facade.get_session() q = session.query(Metric).filter(Metric.status == 'active') if user_id is not None: q = q.filter(Metric.created_by_user_id == user_id) if project_id is not None: q = q.filter(Metric.created_by_project_id == project_id) for attr in kwargs: q = q.filter(getattr(Metric, attr) == kwargs[attr]) if details: q = q.options(sqlalchemy.orm.joinedload('resource')) metrics = list(q.all()) session.expunge_all() return metrics
def create_metric(self, id, created_by_user_id, created_by_project_id, archive_policy_name, name=None, resource_id=None): m = Metric(id=id, created_by_user_id=created_by_user_id, created_by_project_id=created_by_project_id, archive_policy_name=archive_policy_name, name=name, resource_id=resource_id) session = self.engine_facade.get_session() session.add(m) try: session.flush() except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_archive_policy_name_archive_policy_name'): raise indexer.NoSuchArchivePolicy(archive_policy_name) raise session.expunge_all() return m
def create_resource(self, resource_type, id, created_by_user_id, created_by_project_id, user_id=None, project_id=None, started_at=None, ended_at=None, metrics=None, **kwargs): resource_cls = self._resource_type_to_class(resource_type) if (started_at is not None and ended_at is not None and started_at > ended_at): raise ValueError("Start timestamp cannot be after end timestamp") r = resource_cls( id=id, type=resource_type, created_by_user_id=created_by_user_id, created_by_project_id=created_by_project_id, user_id=user_id, project_id=project_id, started_at=started_at, ended_at=ended_at, **kwargs) session = self.engine_facade.get_session() with session.begin(): session.add(r) try: session.flush() except exception.DBDuplicateEntry: raise indexer.ResourceAlreadyExists(id) except exception.DBReferenceError as ex: raise indexer.ResourceValueError(r.type, ex.key, getattr(r, ex.key)) if metrics is not None: self._set_metrics_for_resource(session, r, metrics) # NOTE(jd) Force load of metrics :) r.metrics session.expunge_all() return r
def list_resources(self, resource_type='generic', attribute_filter=None, details=False, history=False, limit=None, marker=None, sorts=None): sorts = sorts or [] session = self.engine_facade.get_session() if history: target_cls = self._get_history_result_mapper(resource_type) else: target_cls = self._resource_type_to_class(resource_type) q = session.query(target_cls) if attribute_filter: engine = self.engine_facade.get_engine() try: f = QueryTransformer.build_filter(engine.dialect.name, target_cls, attribute_filter) except indexer.QueryAttributeError as e: # NOTE(jd) The QueryAttributeError does not know about # resource_type, so convert it raise indexer.ResourceAttributeError(resource_type, e.attribute) q = q.filter(f) # transform the api-wg representation to the oslo.db one sort_keys = [] sort_dirs = [] for sort in sorts: sort_key, __, sort_dir = sort.partition(":") sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir or 'asc') # paginate_query require at list one uniq column if 'id' not in sort_keys: sort_keys.append('id') sort_dirs.append('asc') if marker: resource_marker = self.get_resource(resource_type, marker) if resource_marker is None: raise indexer.InvalidPagination( "Invalid marker: `%s'" % marker) else: resource_marker = None try: q = oslo_db_utils.paginate_query(q, target_cls, limit=limit, sort_keys=sort_keys, marker=resource_marker, sort_dirs=sort_dirs) except (exception.InvalidSortKey, ValueError) as e: raise indexer.InvalidPagination(e) # Always include metrics q = q.options(sqlalchemy.orm.joinedload("metrics")) all_resources = q.all() if details: grouped_by_type = itertools.groupby( all_resources, lambda r: (r.revision != -1, r.type)) all_resources = [] for (is_history, type), resources in grouped_by_type: if type == 'generic': # No need for a second query all_resources.extend(resources) else: if is_history: target_cls = self._resource_type_to_class(type, "history") f = target_cls.revision.in_( [r.revision for r in resources]) else: target_cls = self._resource_type_to_class(type) f = target_cls.id.in_([r.id for r in resources]) q = session.query(target_cls).filter(f) # Always include metrics q = q.options(sqlalchemy.orm.joinedload('metrics')) all_resources.extend(q.all()) session.expunge_all() return all_resources
def update_resource(self, resource_type, resource_id, ended_at=_marker, metrics=_marker, append_metrics=False, **kwargs): now = utils.utcnow() resource_cls = self._resource_type_to_class(resource_type) resource_history_cls = self._resource_type_to_class(resource_type, "history") session = self.engine_facade.get_session() try: with session.begin(): # NOTE(sileht): We use FOR UPDATE that is not galera friendly, # but they are no other way to cleanly patch a resource and # store the history that safe when two concurrent calls are # done. q = session.query(resource_cls).filter( resource_cls.id == resource_id).with_for_update() r = q.first() if r is None: raise indexer.NoSuchResource(resource_id) # Build history rh = resource_history_cls() for col in sqlalchemy.inspect(resource_cls).columns: setattr(rh, col.name, getattr(r, col.name)) rh.revision_end = now session.add(rh) # Update the resource if ended_at is not _marker: # NOTE(jd) MySQL does not honor checks. I hate it. engine = self.engine_facade.get_engine() if engine.dialect.name == "mysql": if r.started_at is not None and ended_at is not None: if r.started_at > ended_at: raise indexer.ResourceValueError( resource_type, "ended_at", ended_at) r.ended_at = ended_at r.revision_start = now if kwargs: for attribute, value in six.iteritems(kwargs): if hasattr(r, attribute): setattr(r, attribute, value) else: raise indexer.ResourceAttributeError( r.type, attribute) if metrics is not _marker: if not append_metrics: session.query(Metric).filter( Metric.resource_id == resource_id, Metric.status == 'active').update( {"resource_id": None}) self._set_metrics_for_resource(session, r, metrics) except exception.DBConstraintError as e: if e.check_name == "ck_started_before_ended": raise indexer.ResourceValueError( resource_type, "ended_at", ended_at) raise # NOTE(jd) Force load of metrics – do it outside the session! r.metrics session.expunge_all() return r
def get_archive_policy_rule(self, name): session = self.engine_facade.get_session() ap = session.query(ArchivePolicyRule).get(name) session.expunge_all() return ap
def list_archive_policy_rules(self): session = self.engine_facade.get_session() aps = session.query(ArchivePolicyRule).all() session.expunge_all() return aps
def list_archive_policy_rules(self): session = self.engine_facade.get_session() aps = session.query(ArchivePolicyRule).order_by( ArchivePolicyRule.metric_pattern.desc()).all() session.expunge_all() return aps