def list_resources(self, resource_type='generic', attribute_filter=None, details=False, history=False): session = self.engine_facade.get_session() if history: target_cls = self._get_history_result_mapper(resource_type) else: target_cls = self._resource_type_to_class(resource_type) q = session.query(target_cls) if attribute_filter: engine = self.engine_facade.get_engine() try: f = QueryTransformer.build_filter(engine.dialect.name, target_cls, attribute_filter) except indexer.QueryAttributeError as e: # NOTE(jd) The QueryAttributeError does not know about # resource_type, so convert it raise indexer.ResourceAttributeError(resource_type, e.attribute) q = q.filter(f) # Always include metrics q = q.options(sqlalchemy.orm.joinedload("metrics")) q = q.order_by(target_cls.revision_start) all_resources = q.all() if details: grouped_by_type = itertools.groupby( all_resources, lambda r: (r.revision != -1, r.type)) all_resources = [] for (is_history, type), resources in grouped_by_type: if type == 'generic': # No need for a second query all_resources.extend(resources) else: if is_history: target_cls = self._resource_type_to_class(type, "history") f = target_cls.revision.in_( [r.revision for r in resources]) else: target_cls = self._resource_type_to_class(type) f = target_cls.id.in_([r.id for r in resources]) q = session.query(target_cls).filter(f) # Always include metrics q = q.options( sqlalchemy.orm.joinedload(target_cls.metrics)) all_resources.extend(q.all()) return all_resources
def delete_resource(self, resource_id): session = self.engine_facade.get_session() with session.begin(): # We are going to delete the resource; the on delete will set the # resource_id of the attached metrics to NULL, we just have to mark # their status as 'delete' session.query(Metric).filter( Metric.resource_id == resource_id).update( {"status": "delete"}) if session.query(Resource).filter( Resource.id == resource_id).delete() == 0: raise indexer.NoSuchResource(resource_id)
def create_lock(context, lock_key, session): try: session.query(models.ArtifactLock).filter_by(id=lock_key).one() except orm.exc.NoResultFound: lock = models.ArtifactLock() lock.id = lock_key lock.save(session=session) return lock.id msg = _("Cannot lock an item with key %s. " "Lock already acquired by other request") % lock_key raise exception.Conflict(msg)
def _set_metrics_for_resource(session, r, metrics): for name, value in six.iteritems(metrics): if isinstance(value, uuid.UUID): try: update = session.query(Metric).filter( Metric.id == value, (Metric.created_by_user_id == r.created_by_user_id), (Metric.created_by_project_id == r.created_by_project_id), ).update({"resource_id": r.id, "name": name}) except exception.DBDuplicateEntry: raise indexer.NamedMetricAlreadyExists(name) if update == 0: raise indexer.NoSuchMetric(value) else: ap_name = value['archive_policy_name'] m = Metric(id=uuid.uuid4(), created_by_user_id=r.created_by_user_id, created_by_project_id=r.created_by_project_id, archive_policy_name=ap_name, name=name, resource_id=r.id) session.add(m) try: session.flush() except exception.DBDuplicateEntry: raise indexer.NamedMetricAlreadyExists(name) except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_archive_policy_name_archive_policy_name'): raise indexer.NoSuchArchivePolicy(ap_name) raise session.expire(r, ['metrics'])
def process_update_network(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestNetworkExtension). filter_by(network_id=result['id']).one()) value = data.get('network_extension') if value and value != record.value: record.value = value result['network_extension'] = record.value
def process_update_port(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestPortExtension). filter_by(port_id=result['id']).one()) value = data.get('port_extension') if value and value != record.value: record.value = value result['port_extension'] = record.value
def process_update_address_scope(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestAddressScopeExtension). filter_by(address_scope_id=result['id']).one()) value = data.get('address_scope_extension') if value and value != record.value: record.value = value result['address_scope_extension'] = record.value
def process_update_subnetpool(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestSubnetPoolExtension). filter_by(subnetpool_id=result['id']).one()) value = data.get('subnetpool_extension') if value and value != record.value: record.value = value result['subnetpool_extension'] = record.value
def list_metrics(self, user_id=None, project_id=None): session = self.engine_facade.get_session() q = session.query(Metric) if user_id is not None: q = q.filter(Metric.created_by_user_id == user_id) if project_id is not None: q = q.filter(Metric.created_by_project_id == project_id) return q.all()
def get_resource(self, resource_type, resource_id, with_metrics=False): resource_cls = self._resource_type_to_class(resource_type) session = self.engine_facade.get_session() q = session.query( resource_cls).filter( resource_cls.id == resource_id) if with_metrics: q = q.options(sqlalchemy.orm.joinedload(resource_cls.metrics)) return q.first()
def get_metrics(self, uuids): if not uuids: return [] session = self.engine_facade.get_session() query = session.query(Metric).filter(Metric.id.in_(uuids)).options( sqlalchemy.orm.joinedload('resource')) metrics = list(query.all()) session.expunge_all() return metrics
def get_metrics(self, uuids): if not uuids: return [] session = self.engine_facade.get_session() query = session.query(Metric).filter(Metric.id.in_(uuids)).options( sqlalchemy.orm.joinedload( Metric.archive_policy)).options( sqlalchemy.orm.joinedload(Metric.resource)) return list(query.all())
def delete_archive_policy_rule(self, name): session = self.engine_facade.get_session() try: if session.query(ArchivePolicyRule).filter( ArchivePolicyRule.name == name).delete() == 0: raise indexer.NoSuchArchivePolicyRule(name) except exception.DBError as e: # TODO(prad): fix foreign key violations when oslo.db supports it if isinstance(e.inner_exception, sqlalchemy.exc.IntegrityError): raise indexer.ArchivePolicyRuleInUse(name)
def delete_archive_policy(self, name): session = self.engine_facade.get_session() try: if session.query(ArchivePolicy).filter( ArchivePolicy.name == name).delete() == 0: raise indexer.NoSuchArchivePolicy(name) except exception.DBError as e: # TODO(jd) Add an exception in oslo.db to match foreign key # violations if isinstance(e.inner_exception, sqlalchemy.exc.IntegrityError): raise indexer.ArchivePolicyInUse(name)
def delete_archive_policy(self, name): session = self.engine_facade.get_session() try: if session.query(ArchivePolicy).filter( ArchivePolicy.name == name).delete() == 0: raise indexer.NoSuchArchivePolicy(name) except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_archive_policy_name_archive_policy_name'): raise indexer.ArchivePolicyInUse(name) raise
def delete_resource(self, resource_id, delete_metrics=None): session = self.engine_facade.get_session() with session.begin(): q = session.query(Resource).filter( Resource.id == resource_id).options( sqlalchemy.orm.joinedload(Resource.metrics)) r = q.first() if r is None: raise indexer.NoSuchResource(resource_id) if delete_metrics is not None: delete_metrics(self.get_metrics([m.id for m in r.metrics])) q.delete()
def _set_metrics_for_resource(session, r, metrics): for name, metric_id in six.iteritems(metrics): try: update = session.query(Metric).filter( Metric.id == metric_id, Metric.created_by_user_id == r.created_by_user_id, Metric.created_by_project_id == r.created_by_project_id, ).update({"resource_id": r.id, "name": name}) except exception.DBDuplicateEntry: raise indexer.NamedMetricAlreadyExists(name) if update == 0: raise indexer.NoSuchMetric(metric_id) session.expire(r, ['metrics'])
def get_metrics(self, uuids, active_only=True, with_resource=False): if not uuids: return [] session = self.engine_facade.get_session() query = session.query(Metric).filter(Metric.id.in_(uuids)) if active_only: query = query.filter(Metric.status == 'active') if with_resource: query = query.options(sqlalchemy.orm.joinedload('resource')) metrics = list(query.all()) session.expunge_all() return metrics
def revision_tag_get_all(revision_id, session=None): """Return list of tags for a revision. :param revision_id: ID corresponding to ``Revision`` DB object. :param tag: Name of the revision tag. :param session: Database session object. :returns: List of tags for ``revision_id``, ordered by the tag name by default. """ session = session or get_session() tags = session.query(models.RevisionTag)\ .filter_by(revision_id=revision_id)\ .order_by(models.RevisionTag.tag)\ .all() return [t.to_dict() for t in tags]
def get_by_instance(self, instance_id, session=None): session = self.get_session(session) try: query = session.query( models.VmExpire ).filter_by(instance_id=instance_id) entity = query.one() except sa_orm.exc.NoResultFound: LOG.debug("Not found for %s", instance_id) entity = None _raise_entity_not_found(self._do_entity_name(), instance_id) return entity
def list_metrics(self, user_id=None, project_id=None, details=False, **kwargs): session = self.engine_facade.get_session() q = session.query(Metric).filter(Metric.status == 'active') if user_id is not None: q = q.filter(Metric.created_by_user_id == user_id) if project_id is not None: q = q.filter(Metric.created_by_project_id == project_id) for attr in kwargs: q = q.filter(getattr(Metric, attr) == kwargs[attr]) if details: q = q.options(sqlalchemy.orm.joinedload('resource')) metrics = list(q.all()) session.expunge_all() return metrics
def revision_tag_create(revision_id, tag, data=None, session=None): """Create a revision tag. If a tag already exists by name ``tag``, the request is ignored. :param revision_id: ID corresponding to ``Revision`` DB object. :param tag: Name of the revision tag. :param data: Dictionary of data to be associated with tag. :param session: Database session object. :returns: The tag that was created if not already present in the database, else None. :raises RevisionTagBadFormat: If data is neither None nor dictionary. """ session = session or get_session() tag_model = models.RevisionTag() if data is None: data = {} if data and not isinstance(data, dict): raise errors.RevisionTagBadFormat(data=data) try: with session.begin(): tag_model.update({ 'tag': tag, 'data': data, 'revision_id': revision_id }) tag_model.save(session=session) resp = tag_model.to_dict() except db_exception.DBDuplicateEntry: # Update the revision tag if it already exists. LOG.debug( 'Tag %s already exists for revision_id %s. Attempting to ' 'update the entry.', tag, revision_id) try: tag_to_update = session.query(models.RevisionTag)\ .filter_by(tag=tag, revision_id=revision_id)\ .one() except sa_orm.exc.NoResultFound: raise errors.RevisionTagNotFound(tag=tag, revision=revision_id) tag_to_update.update({'data': data}) tag_to_update.save(session=session) resp = tag_to_update.to_dict() return resp
def bucket_get_all(session=None, **filters): """Return list of all buckets. :param session: Database session object. :returns: List of dictionary representations of retrieved buckets. """ session = session or get_session() buckets = session.query(models.Bucket)\ .all() result = [] for bucket in buckets: revision_dict = bucket.to_dict() if utils.deepfilter(revision_dict, **filters): result.append(bucket) return result
def get_entities(self, expiration_filter=None, session=None): """Get all entities :param session: existing db session reference. If None, gets session. :param expiration_filter: timestamp to compare expiration date with :returns: list of matching entities found otherwise returns empty list if no entity exists for a given project. """ session = self.get_session(session) query = session.query(models.VmExpire) if expiration_filter: query = query.filter(models.VmExpire.expire < expiration_filter) LOG.debug(query) if query: return query.all() else: return []
def revision_get_all(session=None, **filters): """Return list of all revisions. :param session: Database session object. :returns: List of dictionary representations of retrieved revisions. """ session = session or get_session() revisions = session.query(models.Revision)\ .all() result = [] for revision in revisions: revision_dict = revision.to_dict() if utils.deepfilter(revision_dict, **filters): revision_dict['documents'] = _update_revision_history( revision_dict['documents']) result.append(revision_dict) return result
def revision_tag_get(revision_id, tag, session=None): """Retrieve tag details. :param revision_id: ID corresponding to ``Revision`` DB object. :param tag: Name of the revision tag. :param session: Database session object. :returns: None :raises RevisionTagNotFound: If ``tag`` for ``revision_id`` was not found. """ session = session or get_session() try: tag = session.query(models.RevisionTag)\ .filter_by(tag=tag, revision_id=revision_id)\ .one() except sa_orm.exc.NoResultFound: raise errors.RevisionTagNotFound(tag=tag, revision=revision_id) return tag.to_dict()
def create_or_update(context, artifact_id, values, session): with session.begin(): _drop_protected_attrs(models.Artifact, values) if artifact_id is None: # create new artifact artifact = models.Artifact() artifact.id = values.pop('id') else: # update the existing artifact artifact = _get(context, None, artifact_id, session) if 'version' in values: values['version'] = semver_db.parse(values['version']) if 'tags' in values: tags = values.pop('tags') artifact.tags = _do_tags(artifact, tags) if 'properties' in values: properties = values.pop('properties', {}) artifact.properties = _do_properties(artifact, properties) if 'blobs' in values: blobs = values.pop('blobs') artifact.blobs = _do_blobs(artifact, blobs) artifact.updated_at = timeutils.utcnow() if 'status' in values: if session.query(exists().where( and_(models.ArtifactBlob.status == 'saving', models.ArtifactBlob.artifact_id == artifact_id))).one()[0]: raise exception.Conflict( "You cannot change artifact status if it has " "uploading blobs.") if values['status'] == 'active': artifact.activated_at = timeutils.utcnow() artifact.update(values) artifact.save(session=session) LOG.debug("Response from the database was received.") return artifact.to_dict()
def model_query(context, model, *args, **kwargs): """Query helper for simpler session usage. :param session: if present, the session to use """ session = kwargs.get('session') or get_session() query = session.query(model, *args) read_deleted = kwargs.get('read_deleted', False) project_only = kwargs.get('project_only', True) if not read_deleted: query = query.filter_by(deleted=False) if project_only: # filter by project_id if hasattr(model, 'project_id'): query = query.filter_by(project_id=context.project_id) return query
def revision_get_latest(session=None): """Return the latest revision. :param session: Database session object. :returns: Dictionary representation of latest revision. :raises RevisionNotFound: if the latest revision was not found. """ session = session or get_session() latest_revision = session.query(models.Revision)\ .order_by(models.Revision.created_at.desc())\ .first() if not latest_revision: raise errors.RevisionNotFound(revision='latest') latest_revision = latest_revision.to_dict() latest_revision['documents'] = _update_revision_history( latest_revision['documents']) return latest_revision
def revision_get(revision_id=None, session=None): """Return the specified `revision_id`. :param revision_id: The ID corresponding to the ``Revision`` object. :param session: Database session object. :returns: Dictionary representation of retrieved revision. :raises RevisionNotFound: if the revision was not found. """ session = session or get_session() try: revision = session.query(models.Revision)\ .filter_by(id=revision_id)\ .one()\ .to_dict() except sa_orm.exc.NoResultFound: raise errors.RevisionNotFound(revision_id=revision_id) revision['documents'] = _update_revision_history(revision['documents']) return revision
def _do_artifacts_query(context, session): """Build the query to get all artifacts based on the context""" query = (session.query(models.Artifact).options( joinedload(models.Artifact.properties)).options( joinedload(models.Artifact.tags)).options( joinedload(models.Artifact.blobs))) # If admin, return everything. if context.is_admin: return query # If anonymous user, return only public artifacts. # However, if context.tenant has a value, return both # public and private artifacts of the owner. if context.tenant is not None: query = query.filter( or_(models.Artifact.owner == context.tenant, models.Artifact.visibility == 'public')) else: query = query.filter(models.Artifact.visibility == 'public') return query
def bucket_get_or_create(bucket_name, session=None): """Retrieve or create bucket. Retrieve the ``Bucket`` DB object by ``bucket_name`` if it exists or else create a new ``Bucket`` DB object by ``bucket_name``. :param bucket_name: Unique identifier used for creating or retrieving a bucket. :param session: Database session object. :returns: Dictionary representation of created/retrieved bucket. """ session = session or get_session() try: bucket = session.query(models.Bucket)\ .filter_by(name=bucket_name)\ .one() except sa_orm.exc.NoResultFound: bucket = models.Bucket() bucket.update({'name': bucket_name}) bucket.save(session=session) return bucket.to_dict()
def revision_get_latest(session=None): """Return the latest revision. :param session: Database session object. :returns: Dictionary representation of latest revision. """ session = session or get_session() latest_revision = session.query(models.Revision)\ .order_by(models.Revision.created_at.desc())\ .first() if latest_revision: latest_revision = latest_revision.to_dict() latest_revision['documents'] = _update_revision_history( latest_revision['documents']) else: # If the latest revision doesn't exist, assume an empty revision # history and return a dummy revision instead for the purposes of # revision rollback. latest_revision = {'documents': [], 'id': 0} return latest_revision
def create_lock(context, lock_key, session): """Try to create lock record.""" with session.begin(): existing = session.query(models.ArtifactLock).get(lock_key) if existing is None: try: lock = models.ArtifactLock() lock.id = lock_key lock.save(session=session) return lock.id except (sqlalchemy.exc.IntegrityError, db_exception.DBDuplicateEntry): msg = _("Cannot lock an item with key %s. " "Lock already acquired by other request") % lock_key raise exception.Conflict(msg) else: if timeutils.is_older_than(existing.acquired_at, 5): existing.acquired_at = timeutils.utcnow() existing.save(session) return existing.id else: msg = _("Cannot lock an item with key %s. " "Lock already acquired by other request") % lock_key raise exception.Conflict(msg)
def _do_artifacts_query(context, session): """Build the query to get all artifacts based on the context""" query = ( session.query(models.Artifact). options(joinedload(models.Artifact.properties)). options(joinedload(models.Artifact.tags)). options(joinedload(models.Artifact.blobs))) # If admin, return everything. if context.is_admin: return query # If anonymous user, return only public artifacts. # However, if context.tenant has a value, return both # public and private artifacts of the owner. if context.tenant is not None: query = query.filter( or_(models.Artifact.owner == context.tenant, models.Artifact.visibility == 'public')) else: query = query.filter( models.Artifact.visibility == 'public') return query
def document_get(session=None, raw_dict=False, **filters): """Retrieve a document from the DB. :param session: Database session object. :param raw_dict: Whether to retrieve the exact way the data is stored in DB if ``True``, else the way users expect the data. :param filters: Dictionary attributes (including nested) used to filter out revision documents. :returns: Dictionary representation of retrieved document. :raises: DocumentNotFound if the document wasn't found. """ session = session or get_session() # TODO(fmontei): Currently Deckhand doesn't support filtering by nested # JSON fields via sqlalchemy. For now, filter the documents using all # "regular" filters via sqlalchemy and all nested filters via Python. nested_filters = {} for f in filters.copy(): if any([x in f for x in ('.', 'schema')]): nested_filters.setdefault(f, filters.pop(f)) # Documents with the the same metadata.name and schema can exist across # different revisions, so it is necessary to order documents by creation # date, then return the first document that matches all desired filters. documents = session.query(models.Document)\ .filter_by(**filters)\ .order_by(models.Document.created_at.desc())\ .all() for doc in documents: d = doc.to_dict(raw_dict=raw_dict) if _apply_filters(d, **nested_filters): return d filters.update(nested_filters) raise errors.DocumentNotFound(document=filters)
def get_archive_policy_rule(self, name): session = self.engine_facade.get_session() ap = session.query(ArchivePolicyRule).get(name) session.expunge_all() return ap
def list_archive_policy_rules(self): session = self.engine_facade.get_session() aps = session.query(ArchivePolicyRule).all() session.expunge_all() return aps
def extend_address_scope_dict(self, session, address_scope, result): record = (session.query(TestAddressScopeExtension). filter_by(address_scope_id=result['id']).one_or_none()) result['address_scope_extension'] = record.value if record else ''
def storage_get(context, storage_id): model = Storage storage_instance = session.query(model) \ .filter(id==123) \ .first() return storage_instance
def update_resource(self, resource_type, resource_id, ended_at=_marker, metrics=_marker, append_metrics=False, **kwargs): now = utils.utcnow() resource_cls = self._resource_type_to_class(resource_type) resource_history_cls = self._resource_type_to_class(resource_type, "history") session = self.engine_facade.get_session() try: with session.begin(): # NOTE(sileht): We use FOR UPDATE that is not galera friendly, # but they are no other way to cleanly patch a resource and # store the history that safe when two concurrent calls are # done. q = session.query(resource_cls).filter( resource_cls.id == resource_id).with_for_update() r = q.first() if r is None: raise indexer.NoSuchResource(resource_id) # Build history rh = resource_history_cls() for col in sqlalchemy.inspect(resource_cls).columns: setattr(rh, col.name, getattr(r, col.name)) rh.revision_end = now session.add(rh) # Update the resource if ended_at is not _marker: # NOTE(jd) MySQL does not honor checks. I hate it. engine = self.engine_facade.get_engine() if engine.dialect.name == "mysql": if r.started_at is not None and ended_at is not None: if r.started_at > ended_at: raise indexer.ResourceValueError( resource_type, "ended_at", ended_at) r.ended_at = ended_at r.revision_start = now if kwargs: for attribute, value in six.iteritems(kwargs): if hasattr(r, attribute): setattr(r, attribute, value) else: raise indexer.ResourceAttributeError( r.type, attribute) if metrics is not _marker: if not append_metrics: session.query(Metric).filter( Metric.resource_id == resource_id, Metric.status == 'active').update( {"resource_id": None}) self._set_metrics_for_resource(session, r, metrics) except exception.DBConstraintError as e: if e.check_name == "ck_started_before_ended": raise indexer.ResourceValueError( resource_type, "ended_at", ended_at) raise # NOTE(jd) Force load of metrics – do it outside the session! r.metrics session.expunge_all() return r
def expunge_metric(self, id): session = self.engine_facade.get_session() if session.query(Metric).filter(Metric.id == id).delete == 0: raise indexer.NoSuchMetric(id)
def delete_blob_data(context, uri, session): """Delete blob data from database.""" with session.begin(): blob_data_id = uri[6:] session.query( models.ArtifactBlobData).filter_by(id=blob_data_id).delete()
def _create_artifact_count_query(context, session, list_all_artifacts): query = session.query( func.count(distinct(models.Artifact.id)).label("total_count")) return _apply_query_base_filters(query, context, list_all_artifacts)
def get_archive_policy(self, name): session = self.engine_facade.get_session() return session.query(ArchivePolicy).get(name)
def delete_lock(context, lock_id, session): try: session.query(models.ArtifactLock).filter_by(id=lock_id).delete() except orm.exc.NoResultFound: msg = _("Cannot delete a lock with id %s.") % lock_id raise exception.NotFound(msg)
def list_archive_policies(self): session = self.engine_facade.get_session() return session.query(ArchivePolicy).all()
def extend_subnetpool_dict(self, session, subnetpool_db, result): record = (session.query(TestSubnetPoolExtension). filter_by(subnetpool_id=result['id']).one_or_none()) result['subnetpool_extension'] = record.value if record else ''
def delete_archive_policy_rule(self, name): session = self.engine_facade.get_session() if session.query(ArchivePolicyRule).filter( ArchivePolicyRule.name == name).delete() == 0: raise indexer.NoSuchArchivePolicyRule(name)
def delete_lock(context, lock_id, session): with session.begin(): session.query(models.ArtifactLock).filter_by(id=lock_id).delete()
def delete(context, artifact_id, session): with session.begin(): session.query(models.Artifact).filter_by(id=artifact_id).delete()
def delete_metric(self, id): session = self.engine_facade.get_session() session.query(Metric).filter(Metric.id == id).delete() session.flush()
def print_volume(): volume_by_id= session.query(Volume) \ .filter(Volume.name == 'vmax_vol') \ .first() print(volume_by_id)
def list_resources(self, resource_type='generic', attribute_filter=None, details=False, history=False, limit=None, marker=None, sorts=None): sorts = sorts or [] session = self.engine_facade.get_session() if history: target_cls = self._get_history_result_mapper(resource_type) else: target_cls = self._resource_type_to_class(resource_type) q = session.query(target_cls) if attribute_filter: engine = self.engine_facade.get_engine() try: f = QueryTransformer.build_filter(engine.dialect.name, target_cls, attribute_filter) except indexer.QueryAttributeError as e: # NOTE(jd) The QueryAttributeError does not know about # resource_type, so convert it raise indexer.ResourceAttributeError(resource_type, e.attribute) q = q.filter(f) # transform the api-wg representation to the oslo.db one sort_keys = [] sort_dirs = [] for sort in sorts: sort_key, __, sort_dir = sort.partition(":") sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir or 'asc') # paginate_query require at list one uniq column if 'id' not in sort_keys: sort_keys.append('id') sort_dirs.append('asc') if marker: resource_marker = self.get_resource(resource_type, marker) if resource_marker is None: raise indexer.InvalidPagination( "Invalid marker: `%s'" % marker) else: resource_marker = None try: q = oslo_db_utils.paginate_query(q, target_cls, limit=limit, sort_keys=sort_keys, marker=resource_marker, sort_dirs=sort_dirs) except (exception.InvalidSortKey, ValueError) as e: raise indexer.InvalidPagination(e) # Always include metrics q = q.options(sqlalchemy.orm.joinedload("metrics")) all_resources = q.all() if details: grouped_by_type = itertools.groupby( all_resources, lambda r: (r.revision != -1, r.type)) all_resources = [] for (is_history, type), resources in grouped_by_type: if type == 'generic': # No need for a second query all_resources.extend(resources) else: if is_history: target_cls = self._resource_type_to_class(type, "history") f = target_cls.revision.in_( [r.revision for r in resources]) else: target_cls = self._resource_type_to_class(type) f = target_cls.id.in_([r.id for r in resources]) q = session.query(target_cls).filter(f) # Always include metrics q = q.options(sqlalchemy.orm.joinedload('metrics')) all_resources.extend(q.all()) session.expunge_all() return all_resources
def _do_build_get_query(self, entity_id, session): """Sub-class hook: build a retrieve query.""" query = session.query(models.VmExclude) query = query.filter_by(id=entity_id) return query
def delete_metric(self, id): session = self.engine_facade.get_session() if session.query(Metric).filter( Metric.id == id).update({"status": "delete"}) == 0: raise indexer.NoSuchMetric(id)
def get_one_net(): session = get_session() query = session.query(models.Net) return query.first()