def test_transfer_accept_with_detail_records(self, mock_notify, mock_type_get): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertEqual(volume.project_id, transfer['source_project_id']) self.assertIsNone(transfer['destination_project_id']) self.assertFalse(transfer['accepted']) # Get volume and snapshot quota before accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) with db_api.main_context_manager.reader.using(self.ctxt): xfer = db_api.model_query( self.ctxt, models.Transfer, read_deleted='yes').filter_by(id=transfer['id']).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id'])
def remove_services(args): load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf")) host = action_get(key="host") services = model_query({}, models.Service, read_deleted="no", session=get_session()) if host not in ("unused", "",): services = services.filter(models.Service.host == host) else: ands = [] for service in DEFAULT_SERVICES: ands.append(and_(models.Service.host != service)) services = services.filter(*ands) removed_services = [] ctxt = context.get_admin_context() for service in services.all(): log("Removing service:%d, hostname:%s" % (service.id, service.host)) try: if CompareOpenStackReleases(os_release("cinder")) >= "liberty": cinder_manage_remove(service.binary, service.host) else: db.service_destroy(ctxt, service.id) except: action_set({'traceback': traceback.format_exc()}) action_fail("Cannot remove service: %s" % service.host) else: removed_services.append(service.host) action_set({'removed': ",".join(removed_services)})
def remove_services(): load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf")) host = action_get(key="host") services = model_query({}, models.Service, read_deleted="no", session=get_session()) if host not in ("unused", "",): services = services.filter(models.Service.host == host) else: ands = [] for service in DEFAULT_SERVICES: ands.append(and_(models.Service.host != service)) services = services.filter(*ands) removed_services = [] ctxt = context.get_admin_context() for service in services.all(): log("Removing service:%d, hostname:%s" % (service.id, service.host)) try: if os_release("cinder") >= "liberty": cinder_manage_remove(service.binary, service.host) else: db.service_destroy(ctxt, service.id) except: action_set({'traceback': traceback.format_exc()}) action_fail("Cannot remove service: %s" % service.host) else: removed_services.append(service.host) action_set({'removed': ",".join(removed_services)})
def delete_snapshot(self, snapshot): if self.soft_deletes: LOG.debug('soft deleting snapshot %s', snapshot.id) self.db.snapshot_destroy(objects.CONTEXT, snapshot.id) else: LOG.debug('hard deleting snapshot %s', snapshot.id) query = sqla_api.model_query(objects.CONTEXT, models.Snapshot) query.filter_by(id=snapshot.id).delete() super(DBPersistence, self).delete_snapshot(snapshot)
def delete_volume(self, volume): if self.soft_deletes: LOG.debug('soft deleting volume %s', volume.id) self.db.volume_destroy(objects.CONTEXT, volume.id) else: LOG.debug('hard deleting volume %s', volume.id) query = sqla_api.model_query(objects.CONTEXT, models.Volume) query.filter_by(id=volume.id).delete() super(DBPersistence, self).delete_volume(volume)
def _get_reservations(self, ctxt, session, project_id, usage_id): """Get reservations for a given project and usage id.""" reservations = db_api.model_query(ctxt, models.Reservation, read_deleted="no", session=session).\ filter_by(project_id=project_id, usage_id=usage_id).\ with_for_update().\ all() return reservations
def set_volume(self, volume): changed = self.get_changed_fields(volume) if not changed: changed = self.get_fields(volume) extra_specs = changed.pop('extra_specs', None) qos_specs = changed.pop('qos_specs', None) # Since OVOs are not tracking QoS or Extra specs dictionary changes, # we only support setting QoS or Extra specs on creation or add them # later. if changed.get('volume_type_id'): vol_type_fields = { 'id': volume.volume_type_id, 'name': volume.volume_type_id, 'extra_specs': extra_specs, 'is_public': True } if qos_specs: res = self.db.qos_specs_create( objects.CONTEXT, { 'name': volume.volume_type_id, 'consumer': 'back-end', 'specs': qos_specs }) # Cinder is automatically generating an ID, replace it query = sqla_api.model_query(objects.CONTEXT, models.QualityOfServiceSpecs) query.filter_by(id=res['id']).update( {'id': volume.volume_type.qos_specs_id}) self.db.volume_type_create(objects.CONTEXT, vol_type_fields) else: if extra_specs is not None: self.db.volume_type_extra_specs_update_or_create( objects.CONTEXT, volume.volume_type_id, extra_specs) self.db.qos_specs_update( objects.CONTEXT, volume.volume_type.qos_specs_id, { 'name': volume.volume_type_id, 'consumer': 'back-end', 'specs': qos_specs }) # Create the volume if 'id' in changed: LOG.debug('set_volume creating %s', changed) try: self.db.volume_create(objects.CONTEXT, changed) changed = None except exception.DBDuplicateEntry: del changed['id'] if changed: LOG.debug('set_volume updating %s', changed) self.db.volume_update(objects.CONTEXT, volume.id, changed) super(DBPersistence, self).set_volume(volume)
def _get_reservations(self, context, project_id, usage_id): """Get reservations for a given project and usage id.""" reservations = (db_api.model_query( context, models.Reservation, read_deleted="no", ).filter_by(project_id=project_id, usage_id=usage_id).with_for_update().all()) return reservations
def delete_connection(self, connection): if self.soft_deletes: LOG.debug('soft deleting connection %s', connection.id) self.db.attachment_destroy(objects.CONTEXT, connection.id) else: LOG.debug('hard deleting connection %s', connection.id) query = sqla_api.model_query(objects.CONTEXT, models.VolumeAttachment) query.filter_by(id=connection.id).delete() super(DBPersistence, self).delete_connection(connection)
def delete_volume(self, volume): delete_type = (volume.volume_type_id != self.DEFAULT_TYPE.id and volume.volume_type_id) if self.soft_deletes: LOG.debug('soft deleting volume %s', volume.id) self.db.volume_destroy(objects.CONTEXT, volume.id) if delete_type: LOG.debug('soft deleting volume type %s', volume.volume_type_id) self.db.volume_destroy(objects.CONTEXT, volume.volume_type_id) if volume.volume_type.qos_specs_id: self.db.qos_specs_delete(objects.CONTEXT, volume.volume_type.qos_specs_id) else: LOG.debug('hard deleting volume %s', volume.id) for model in (models.VolumeMetadata, models.VolumeAdminMetadata): query = sqla_api.model_query(objects.CONTEXT, model) query.filter_by(volume_id=volume.id).delete() query = sqla_api.model_query(objects.CONTEXT, models.Volume) query.filter_by(id=volume.id).delete() if delete_type: LOG.debug('hard deleting volume type %s', volume.volume_type_id) query = sqla_api.model_query(objects.CONTEXT, models.VolumeTypeExtraSpecs) query.filter_by(volume_type_id=volume.volume_type_id).delete() query = sqla_api.model_query(objects.CONTEXT, models.VolumeType) query.filter_by(id=volume.volume_type_id).delete() query = sqla_api.model_query(objects.CONTEXT, models.QualityOfServiceSpecs) qos_id = volume.volume_type.qos_specs_id if qos_id: query.filter( sqla_api.or_( models.QualityOfServiceSpecs.id == qos_id, models.QualityOfServiceSpecs.specs_id == qos_id)).delete() super(DBPersistence, self).delete_volume(volume)
def _get_usages(self, context, resources, project_id): """Get data necessary to check out of sync quota usage. Returns a list QuotaUsage instances for the specific project """ usages = db_api.model_query( context, db_api.models.QuotaUsage, read_deleted="no", ).filter_by(project_id=project_id).with_for_update().all() return usages
def _top_usage(self, req, body): """ Return a list of project_id's with the most usage """ def get_limit(quota, resource_name): if quota: return quota.hard_limit return default_quotas.get(resource_name, 'None') # Get the user specified limit, else default to the top 200 projects limit = int(SafeDict(body).get('top-usage', {}).get('limit', 200)) result = [] # Get the context for this request context = req.environ['cinder.context'] # Verify the user accessing this resource is allowed? authorize_top_usage(context) # Get all the quota defaults default_quotas = QUOTAS.get_defaults(context) # Fetch the projects with the most usage rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter(models.QuotaUsage.resource == "gigabytes").\ order_by(models.QuotaUsage.in_use.desc()).limit(limit).all() for row in rows: # For each project, fetch the usage and used quotas = model_query(context, models.QuotaUsage, models.Quota, read_deleted="no").\ outerjoin(models.Quota, and_(models.QuotaUsage.project_id == models.Quota.project_id, models.QuotaUsage.resource == models.Quota.resource))\ .filter(models.QuotaUsage.project_id == row.project_id)\ .all() for usage, quota in quotas: result.append({ 'project_id': usage.project_id, 'resource': usage.resource, 'hard_limit': get_limit(quota, usage.resource), 'in_use': usage.in_use }) return dict(quotas=result)
def _top_usage(self, req, body): """ Return a list of project_id's with the most usage """ def get_limit(quota, resource_name): if quota: return quota.hard_limit return default_quotas.get(resource_name, 'None') # Get the user specified limit, else default to the top 200 projects limit = int(SafeDict(body).get('top-usage', {}).get('limit', 200)) result = [] # Get the context for this request context = req.environ['cinder.context'] # Verify the user accessing this resource is allowed? authorize_top_usage(context) # Get all the quota defaults default_quotas = QUOTAS.get_defaults(context) # Fetch the projects with the most usage rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter(models.QuotaUsage.resource == "gigabytes").\ order_by(models.QuotaUsage.in_use.desc()).limit(limit).all() for row in rows: # For each project, fetch the usage and used quotas = model_query(context, models.QuotaUsage, models.Quota, read_deleted="no").\ outerjoin(models.Quota, and_(models.QuotaUsage.project_id == models.Quota.project_id, models.QuotaUsage.resource == models.Quota.resource))\ .filter(models.QuotaUsage.project_id == row.project_id)\ .all() for usage, quota in quotas: result.append({ 'project_id': usage.project_id, 'resource': usage.resource, 'hard_limit': get_limit(quota, usage.resource), 'in_use': usage.in_use}) return dict(quotas=result)
def tearDown(self): sqla_api.model_query(self.context, sqla_api.models.Snapshot).delete() sqla_api.model_query(self.context, sqla_api.models.VolumeAttachment).delete() sqla_api.model_query(self.context, sqla_api.models.Volume).delete() sqla_api.get_session().query(dbms.KeyValue).delete() super(TestMemoryDBPersistence, self).tearDown()
def _rename_volume_host(currenthost, newhost): load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf")) services = model_query({}, models.Service, read_deleted="no", session=get_session()) services = services.filter(models.Service.host == currenthost) if services.all(): try: cinder_manage_volume_update_host(currenthost, newhost) except: action_set({'traceback': traceback.format_exc()}) action_fail("Cannot update host {}".format(currenthost)) else: action_fail( "Cannot update host attribute from {}, {} not found".format( currenthost, currenthost))
def _get_non_shared_target_hosts(ctxt): hosts = [] numvols_needing_update = 0 rpc.init(CONF) rpcapi = volume_rpcapi.VolumeAPI() services = objects.ServiceList.get_all_by_topic(ctxt, 'cinder-volume') for service in services: capabilities = rpcapi.get_capabilities(ctxt, service.host, True) if not capabilities.get('shared_targets', True): hosts.append(service.host) numvols_needing_update += db_api.model_query( ctxt, models.Volume).filter_by(shared_targets=True, service_uuid=service.uuid).count() return hosts, numvols_needing_update
def _get_non_shared_target_hosts(ctxt): hosts = [] numvols_needing_update = 0 rpc.init(CONF) rpcapi = volume_rpcapi.VolumeAPI() services = objects.ServiceList.get_all_by_topic(ctxt, constants.VOLUME_TOPIC) for service in services: capabilities = rpcapi.get_capabilities(ctxt, service.host, True) if not capabilities.get('shared_targets', True): hosts.append(service.host) numvols_needing_update += db_api.model_query( ctxt, models.Volume).filter_by( shared_targets=True, service_uuid=service.uuid).count() return hosts, numvols_needing_update
def _quota_usage(self, req, body): """ Return a list of all quotas in the db and how much of the quota is in use """ # Fetch the context for this request context = req.environ['cinder.context'] # Verify the user accessing this resource is allowed? authorize_quota_usage(context) rows = model_query(context, models.Quota, models.QuotaUsage, read_deleted="no").\ filter(models.QuotaUsage.project_id == models.Quota.project_id).\ filter(models.QuotaUsage.resource == models.Quota.resource).\ order_by(models.Quota.project_id).all() result = [{'project_id': quota.project_id, 'resource': quota.resource, 'hard_limit': quota.hard_limit, 'in_use': usage.in_use} for quota, usage in rows] return dict(quotas=result)
def _get_non_shared_target_hosts(ctxt): hosts = [] numvols_needing_update = 0 rpc.init(CONF) rpcapi = volume_rpcapi.VolumeAPI() services = objects.ServiceList.get_all_by_topic(ctxt, constants.VOLUME_TOPIC) for service in services: capabilities = rpcapi.get_capabilities(ctxt, service.host, True) # Select only non iSCSI connections and iSCSI that are explicit if (capabilities.get('storage_protocol') != 'iSCSI' or not capabilities.get('shared_targets', True)): hosts.append(service.host) numvols_needing_update += db_api.model_query( ctxt, models.Volume).filter_by( shared_targets=True, service_uuid=service.uuid).count() return hosts, numvols_needing_update
def test_transfer_accept(self): volume = utils.create_volume(self.ctxt) xfer_id = self._create_transfer(volume['id'], volume['project_id']) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) xfer = db.transfer_get(nctxt.elevated(), xfer_id) self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertFalse(xfer['accepted']) self.assertIsNone(xfer['destination_project_id']) db.transfer_accept(nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) xfer = db_api.model_query( nctxt.elevated(), models.Transfer, read_deleted='yes').filter_by(id=xfer_id).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id'])
def _get_non_shared_target_hosts(ctxt): hosts = [] numvols_needing_update = 0 rpc.init(CONF) rpcapi = volume_rpcapi.VolumeAPI() services = objects.ServiceList.get_all_by_topic(ctxt, constants.VOLUME_TOPIC) for service in services: capabilities = rpcapi.get_capabilities(ctxt, service.host, True) # Select only non iSCSI connections and iSCSI that are explicit if (capabilities.get('storage_protocol') != 'iSCSI' or not capabilities.get('shared_targets', True)): hosts.append(service.host) numvols_needing_update += db_api.model_query( ctxt, models.Volume).filter_by(shared_targets=True, service_uuid=service.uuid).count() return hosts, numvols_needing_update
def test_transfer_accept(self): volume = utils.create_volume(self.ctxt) xfer_id = self._create_transfer(volume['id'], volume['project_id']) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) xfer = db.transfer_get(nctxt.elevated(), xfer_id) self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertFalse(xfer['accepted']) self.assertIsNone(xfer['destination_project_id']) db.transfer_accept(nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) xfer = db_api.model_query( nctxt.elevated(), models.Transfer, read_deleted='yes' ).filter_by(id=xfer_id).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id'])
def _get_quota_projects(self, context, project_id): """Get project ids that have quota_usage entries.""" if project_id: model = models.QuotaUsage # If the project does not exist if not context.session.query(db_api.sql.exists().where( db_api.and_( model.project_id == project_id, ~model.deleted, ), )).scalar(): print( 'Project id %s has no quota usage. Nothing to do.' % project_id, ) return [] return [project_id] projects = db_api.model_query( context, models.QuotaUsage, read_deleted="no").with_entities('project_id').distinct().all() project_ids = [row.project_id for row in projects] return project_ids
def test_transfer_accept_with_detail_records(self, mock_notify): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertEqual(volume.project_id, transfer['source_project_id']) self.assertIsNone(transfer['destination_project_id']) self.assertFalse(transfer['accepted']) # Get volume and snapshot quota before accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) xfer = db_api.model_query(self.ctxt, models.Transfer, read_deleted='yes' ).filter_by(id=transfer['id']).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id'])
def model_query(context, model, *args, **kwargs): """ Wrappers the Base Model Query to provide PowerVC-specific logic """ return cinder_db.model_query(context, model, *args, **kwargs)