def tearDown(self): sqla_api.model_query(self.context, sqla_api.models.Snapshot).delete() sqla_api.model_query(self.context, sqla_api.models.VolumeAttachment).delete() sqla_api.model_query(self.context, sqla_api.models.Volume).delete() sqla_api.get_session().query(dbms.KeyValue).delete() super(TestMemoryDBPersistence, self).tearDown()
def test_set_key_values(self): res = sqla_api.get_session().query(dbms.KeyValue).all() self.assertListEqual([], res) expected = [dbms.KeyValue(key='key', value='value')] self.persistence.set_key_value(expected[0]) actual = sqla_api.get_session().query(dbms.KeyValue).all() self.assertListEqualObj(expected, actual)
def _ibm_volume_get_all_except_key_query(context, key=None, session=None): # Find all the ids in volumes that has is_boot_volume key in # volume_metadata table. Create a subquery. if not session: # it takes ~ 4s to get_session for the first time. session = cinder_db.get_session() with session.begin(): boot_vols = model_query(context, cinder_models.Volume.id, session=session).\ join("volume_metadata").\ filter(cinder_models.VolumeMetadata.key == key).\ subquery() # Filter out all the boot volumes if cinder_db.is_admin_context(context): return model_query(context, cinder_models.Volume, session=session).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_admin_metadata')).\ filter(not_(cinder_models.Volume.id.in_(boot_vols))) else: return model_query(context, cinder_models.Volume, session=session).\ options(joinedload('volume_metadata')).\ filter(not_(cinder_models.Volume.id.in_(boot_vols)))
def test_default_volume_type_missing_in_db(self): """Ensures proper exception raised if default volume type is not in database. """ session = db_api.get_session() default_vol_type = volume_types.get_default_volume_type() self.assertEqual(default_vol_type, {})
def remove_services(args): load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf")) host = action_get(key="host") services = model_query({}, models.Service, read_deleted="no", session=get_session()) if host not in ("unused", "",): services = services.filter(models.Service.host == host) else: ands = [] for service in DEFAULT_SERVICES: ands.append(and_(models.Service.host != service)) services = services.filter(*ands) removed_services = [] ctxt = context.get_admin_context() for service in services.all(): log("Removing service:%d, hostname:%s" % (service.id, service.host)) try: if CompareOpenStackReleases(os_release("cinder")) >= "liberty": cinder_manage_remove(service.binary, service.host) else: db.service_destroy(ctxt, service.id) except: action_set({'traceback': traceback.format_exc()}) action_fail("Cannot remove service: %s" % service.host) else: removed_services.append(service.host) action_set({'removed': ",".join(removed_services)})
def set_key_value(self, key_value): session = sqla_api.get_session() with session.begin(): kv = self._get_kv(key_value.key, session) kv = kv[0] if kv else KeyValue(key=key_value.key) kv.value = key_value.value session.add(kv)
def remove_services(): load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf")) host = action_get(key="host") services = model_query({}, models.Service, read_deleted="no", session=get_session()) if host not in ("unused", "",): services = services.filter(models.Service.host == host) else: ands = [] for service in DEFAULT_SERVICES: ands.append(and_(models.Service.host != service)) services = services.filter(*ands) removed_services = [] ctxt = context.get_admin_context() for service in services.all(): log("Removing service:%d, hostname:%s" % (service.id, service.host)) try: if os_release("cinder") >= "liberty": cinder_manage_remove(service.binary, service.host) else: db.service_destroy(ctxt, service.id) except: action_set({'traceback': traceback.format_exc()}) action_fail("Cannot remove service: %s" % service.host) else: removed_services.append(service.host) action_set({'removed': ",".join(removed_services)})
def _get_kv(self, key=None, session=None): session = session or sqla_api.get_session() query = session.query(KeyValue) if key is None: return query.all() res = query.filter_by(key=key).first() return [res] if res else []
def ibm_hmc_delete(context, hmc_uuid, session=None): """ Removes an existing HMC instance from the Database """ if not session: session = cinder_db.get_session() with session.begin(): query = model_query(context, paxes_models.HmcDTO, session=session) hmc = query.filter_by(hmc_uuid=hmc_uuid).first() if hmc: hmc.delete(session=session)
def _get_kv(self, key=None, session=None): session = session or sqla_api.get_session() query = session.query(KeyValue) if key is not None: query = query.filter_by(key=key) res = query.all() # If we want to use the result as an ORM if session: return res return [objects.KeyValue(r.key, r.value) for r in res]
def storage_node_update(context, storage_id, values, session=None): """Updates an existing Storage Node in the Database.""" if not session: session = cinder_db.get_session() with session.begin(): node_ref = storage_node_get(context, storage_id, session=session) if node_ref: node_ref.update(values) node_ref.save(session=session) return node_ref
def ibm_hmc_update(context, hmc_uuid, values, session=None): """ Updates an existing HMC instance in the Database """ if not session: session = cinder_db.get_session() with session.begin(): query = model_query(context, paxes_models.HmcDTO, session=session) hmc_ref = query.filter_by(hmc_uuid=hmc_uuid).first() values = _encode_hmc_values(values) hmc_ref.update(values) hmc_ref.save(session=session) return _decode_hmc_values(hmc_ref)
def volume_restricted_metadata_delete(context, volume_id, key): session = cinder_db.get_session() # Ensure that it exists, throw exception if it doesn't _volume_restricted_metadata_get_item(context, volume_id, key, session) # Flag the item as deleted. _volume_restricted_metadata_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def ibm_hmc_clusters_delete(context, hmc_uuid, host_name, session=None): """ Dissociates an existing Cluster from an HMC in the Database """ if not session: session = cinder_db.get_session() with session.begin(): query = model_query( context, paxes_models.HmcClustersDTO, session=session) query = query.filter_by(hmc_uuid=hmc_uuid, host_name=host_name) hmc_cluster = query.first() if hmc_cluster: hmc_cluster.delete(session=session)
def ___get_active_by_window_metadata(self, context, period_start, period_stop=None, project_id=None, metadata=None, use_slave=False): """Simulate bottom most layer :param context: wsgi context :param period_start: Datetime :param period_stop: Datetime :param project_id: String|None :param metadata: Dict|None :param use_slave: Boolean """ if metadata: aliases = [aliased(models.VolumeMetadata) for i in metadata] else: aliases = [] session = get_session(use_slave=use_slave) query = session.query( models.Volume, *aliases ) query = query.filter(or_(models.Volume.terminated_at == null(), models.Volume.terminated_at > period_start)) if period_stop: query = query.filter(models.Volume.launched_at < period_stop) if project_id: query = query.filter_by(project_id=project_id) if metadata: for keypair, alias in zip(metadata.items(), aliases): query = query.filter(alias.key == keypair[0]) query = query.filter(alias.value == keypair[1]) query = query.filter(alias.volume_id == models.Volume.id) query = query.filter(or_( alias.deleted_at == null(), alias.deleted_at == models.Volume.deleted_at )) volumes = [] for tup in query.all(): # If no metadata filters, then no aliases. if aliases: volume = tup[0] else: volume = tup volumes.append(dict(volume)) return volumes
def onboard_task_volume_update(context, task_id, vol_uuid, values, session=None): """Updates the Volume record for the given On-board Task""" values = dict([(k, v) for k, v in values.iteritems() if k is not None]) if not session: session = cinder_db.get_session() with session.begin(): query = model_query( context, paxes_models.OnboardTaskVolumeDTO, session=session) task_ref = query.filter_by(task_id=task_id, volume_uuid=vol_uuid).first() task_ref.update(values) task_ref.save(session=session) return task_ref
def volume_restricted_metadata_update_or_create(context, volume_id, metadata): session = cinder_db.get_session() metadata_ref = None for key, value in metadata.iteritems(): try: metadata_ref = _volume_restricted_metadata_get_item( context, volume_id, key, session) except exception.VolumeMetadataNotFound: metadata_ref = paxes_models.VolumeRestrictedMetadataDTO() metadata_ref.update({"key": key, "value": value, "volume_id": volume_id, "deleted": False}) metadata_ref.save(session=session) return metadata
def _rename_volume_host(currenthost, newhost): load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf")) services = model_query({}, models.Service, read_deleted="no", session=get_session()) services = services.filter(models.Service.host == currenthost) if services.all(): try: cinder_manage_volume_update_host(currenthost, newhost) except: action_set({'traceback': traceback.format_exc()}) action_fail("Cannot update host {}".format(currenthost)) else: action_fail( "Cannot update host attribute from {}, {} not found".format( currenthost, currenthost))
def onboard_task_update(context, task_id, values, session=None): """Updates the given On-board Task in the DB.""" values = dict([(k, v) for k, v in values.iteritems() if v is not None]) status = values.get('status', '') #If this is a final status, then set the end date/time if status == 'completed' or status == 'failed': values['ended'] = timeutils.utcnow() if not session: session = cinder_db.get_session() with session.begin(): query = model_query( context, paxes_models.OnboardTaskDTO, session=session) task_ref = query.filter_by(id=task_id).first() task_ref.update(values) task_ref.save(session=session) return task_ref
def service_delete(context, service_id, session=None): """Deletes both a Service and the Storage Node from the Database.""" if not session: session = cinder_db.get_session() with session.begin(): #First we want to cleanup the actual Service entry from the DB query = model_query( context, cinder_models.Service, session=session) service = query.filter_by(id=service_id).first() if service: service.delete(session=session) #Then we want to cleanup the related Storage Node entry from the DB query = model_query( context, paxes_models.StorageNodeDTO, session=session) storage_node = query.filter_by(service_id=service_id).first() if storage_node: storage_node.delete(session=session)
def onboard_task_delete(context, task_id, session=None): """Deletes the given On-board Task from the DB.""" if not session: session = cinder_db.get_session() with session.begin(): #We need to cleanup both the Task and Task Server tables query1 = model_query( context, paxes_models.OnboardTaskDTO, session=session) query2 = model_query( context, paxes_models.OnboardTaskVolumeDTO, session=session) #Filter both tables on the Task ID that was given as input task = query1.filter_by(task_id=task_id).first() task_volumes = query2.filter_by(task_id=task_id).all() #Make sure to delete from the Task Server table first for task_volume in task_volumes: task_volume.delete(session=session) if task: task.delete(session=session)
def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() self.engine = db_api.get_engine() self.session = db_api.get_session() self.conn = self.engine.connect() self.volumes = sqlalchemyutils.get_table( self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table( self.engine, "volume_metadata") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.conn.execute(ins_stmt) # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago old = datetime.now() - timedelta(days=20) older = datetime.now() - timedelta(days=60) make_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) self.conn.execute(make_old) self.conn.execute(make_older) self.conn.execute(make_meta_old) self.conn.execute(make_meta_older)
def _get_quota_projects(self, ctxt, project_id): """Get project ids that have quota_usage entries.""" if project_id: model = models.QuotaUsage session = db_api.get_session() # If the project does not exist if not session.query(db_api.sql.exists().where( db_api.and_(model.project_id == project_id, ~model.deleted))).scalar(): print('Project id %s has no quota usage. Nothing to do.' % project_id) return [] return [project_id] projects = db_api.model_query(context, models.QuotaUsage, read_deleted="no").\ with_entities('project_id').\ distinct().\ all() project_ids = [row.project_id for row in projects] return project_ids
def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() self.engine = db_api.get_engine() self.session = db_api.get_session() self.conn = self.engine.connect() self.volumes = sqlalchemyutils.get_table( self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table( self.engine, "volume_metadata") self.vol_types = sqlalchemyutils.get_table( self.engine, "volume_types") # The volume_type_projects table has a FK of volume_type_id self.vol_type_proj = sqlalchemyutils.get_table( self.engine, "volume_type_projects") self.snapshots = sqlalchemyutils.get_table( self.engine, "snapshots") self.sm = sqlalchemyutils.get_table( self.engine, "snapshot_metadata") self.vgm = sqlalchemyutils.get_table( self.engine, "volume_glance_metadata") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values( volume_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.vol_types.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vol_type_proj.insert().\ values(volume_type_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.snapshots.insert().values( id=uuidstr, volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.sm.insert().values(snapshot_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values( snapshot_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) make_vol_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_types_old = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_types_older = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_type_proj_old = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_type_proj_older = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_old = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_older = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_meta_old = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_meta_older = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_glance_meta_old = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_glance_meta_older = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_glance_meta_old = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_glance_meta_older = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) self.conn.execute(make_vol_old) self.conn.execute(make_vol_older) self.conn.execute(make_vol_meta_old) self.conn.execute(make_vol_meta_older) self.conn.execute(make_vol_types_old) self.conn.execute(make_vol_types_older) self.conn.execute(make_vol_type_proj_old) self.conn.execute(make_vol_type_proj_older) self.conn.execute(make_snap_old) self.conn.execute(make_snap_older) self.conn.execute(make_snap_meta_old) self.conn.execute(make_snap_meta_older) self.conn.execute(make_vol_glance_meta_old) self.conn.execute(make_vol_glance_meta_older) self.conn.execute(make_snap_glance_meta_old) self.conn.execute(make_snap_glance_meta_older)
def test_get_all_volume_types(self): """Ensures that all volume types can be retrieved.""" session = db_api.get_session() total_volume_types = session.query(models.VolumeTypes).count() vol_types = volume_types.get_all_types(self.ctxt) self.assertEqual(total_volume_types, len(vol_types))
def delete_key_value(self, key_value): query = sqla_api.get_session().query(KeyValue) query.filter_by(key=key_value.key).delete()
def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() self.engine = db_api.get_engine() self.session = db_api.get_session() self.conn = self.engine.connect() self.volumes = sqlalchemyutils.get_table(self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table(self.engine, "volume_metadata") self.vol_types = sqlalchemyutils.get_table(self.engine, "volume_types") # The volume_type_projects table has a FK of volume_type_id self.vol_type_proj = sqlalchemyutils.get_table(self.engine, "volume_type_projects") self.snapshots = sqlalchemyutils.get_table(self.engine, "snapshots") self.sm = sqlalchemyutils.get_table(self.engine, "snapshot_metadata") self.vgm = sqlalchemyutils.get_table(self.engine, "volume_glance_metadata") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values(volume_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.vol_types.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vol_type_proj.insert().\ values(volume_type_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.snapshots.insert().values(id=uuidstr, volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.sm.insert().values(snapshot_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values(snapshot_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) make_vol_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_types_old = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_types_older = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_type_proj_old = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_type_proj_older = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_old = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_older = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_meta_old = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_meta_older = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_glance_meta_old = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_glance_meta_older = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_glance_meta_old = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_glance_meta_older = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) self.conn.execute(make_vol_old) self.conn.execute(make_vol_older) self.conn.execute(make_vol_meta_old) self.conn.execute(make_vol_meta_older) self.conn.execute(make_vol_types_old) self.conn.execute(make_vol_types_older) self.conn.execute(make_vol_type_proj_old) self.conn.execute(make_vol_type_proj_older) self.conn.execute(make_snap_old) self.conn.execute(make_snap_older) self.conn.execute(make_snap_meta_old) self.conn.execute(make_snap_meta_older) self.conn.execute(make_vol_glance_meta_old) self.conn.execute(make_vol_glance_meta_older) self.conn.execute(make_snap_glance_meta_old) self.conn.execute(make_snap_glance_meta_older)
def _check_sync(self, project_id, do_fix): """Check the quotas and reservations optionally fixing them.""" ctxt = context.get_admin_context() # Get the quota usage types and their sync methods resources = quota.QUOTAS.resources # Get all project ids that have quota usage. Method doesn't lock # projects, since newly added projects should not be out of sync and # projects removed will just turn nothing on the quota usage. projects = self._get_quota_projects(ctxt, project_id) session = db_api.get_session() action_msg = ' - fixed' if do_fix else '' discrepancy = False # NOTE: It's important to always get the quota first and then the # reservations to prevent deadlocks with quota commit and rollback from # running Cinder services. for project in projects: with session.begin(): print('Processing quota usage for project %s' % project) # We only want to sync existing quota usage rows usages = self._get_usages(ctxt, session, resources, project) # Check for duplicated entries (bug#1484343) usages, duplicates_found = self._check_duplicates( ctxt, session, usages, do_fix) if duplicates_found: discrepancy = True # Check quota and reservations for usage in usages: resource_name = usage.resource # Get the correct value for this quota usage resource updates = db_api._get_sync_updates(ctxt, project, session, resources, resource_name) in_use = updates[resource_name] if in_use != usage.in_use: print( '\t%s: invalid usage saved=%s actual=%s%s' % (resource_name, usage.in_use, in_use, action_msg)) discrepancy = True if do_fix: usage.in_use = in_use reservations = self._get_reservations( ctxt, session, project, usage.id) num_reservations = sum(r.delta for r in reservations if r.delta > 0) if num_reservations != usage.reserved: print('\t%s: invalid reserved saved=%s actual=%s%s' % (resource_name, usage.reserved, num_reservations, action_msg)) discrepancy = True if do_fix: usage.reserved = num_reservations print('Action successfully completed') return discrepancy
def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() self.engine = db_api.get_engine() self.session = db_api.get_session() self.conn = self.engine.connect() self.volumes = sqlalchemyutils.get_table( self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table( self.engine, "volume_metadata") self.vol_types = sqlalchemyutils.get_table( self.engine, "volume_types") # The volume_type_projects table has a FK of volume_type_id self.vol_type_proj = sqlalchemyutils.get_table( self.engine, "volume_type_projects") self.snapshots = sqlalchemyutils.get_table( self.engine, "snapshots") self.sm = sqlalchemyutils.get_table( self.engine, "snapshot_metadata") self.vgm = sqlalchemyutils.get_table( self.engine, "volume_glance_metadata") self.qos = sqlalchemyutils.get_table( self.engine, "quality_of_service_specs") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr, volume_type_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values( volume_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.vol_types.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vol_type_proj.insert().\ values(volume_type_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.snapshots.insert().values( id=uuidstr, volume_id=uuidstr, volume_type_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.sm.insert().values(snapshot_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values( snapshot_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.qos.insert().values( id=uuidstr, key='QoS_Specs_Name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.vol_types.insert().values( id=uuid.uuid4().hex, qos_specs_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.qos.insert().values( id=uuid.uuid4().hex, specs_id=uuidstr, key='desc', value='test') self.conn.execute(ins_stmt) # Set 5 of them deleted # 2 are 60 days ago, 2 are 20 days ago, one is just now. now = timeutils.utcnow() old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) make_vol_now = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_meta_now = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_types_now = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_types_old = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_types_older = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_type_proj_now = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_type_proj_old = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_type_proj_older = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_snap_now = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_snap_old = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_snap_older = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_snap_meta_now = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_snap_meta_old = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_snap_meta_older = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_glance_meta_now = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_glance_meta_old = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_glance_meta_older = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_snap_glance_meta_now = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_snap_glance_meta_old = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_snap_glance_meta_older = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_qos_now = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[0:1])).values(deleted_at=now, deleted=True) make_qos_old = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old, deleted=True) make_qos_older = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older, deleted=True) make_qos_child_record_now = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[0:1])).values( deleted_at=now, deleted=True) make_qos_child_record_old = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values( deleted_at=old, deleted=True) make_qos_child_record_older = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values( deleted_at=older, deleted=True) make_vol_types1_now = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[0:1])).values( deleted_at=now, deleted=True) make_vol_types1_old = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values( deleted_at=old, deleted=True) make_vol_types1_older = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values( deleted_at=older, deleted=True) self.conn.execute(make_vol_now) self.conn.execute(make_vol_old) self.conn.execute(make_vol_older) self.conn.execute(make_vol_meta_now) self.conn.execute(make_vol_meta_old) self.conn.execute(make_vol_meta_older) self.conn.execute(make_vol_types_now) self.conn.execute(make_vol_types_old) self.conn.execute(make_vol_types_older) self.conn.execute(make_vol_type_proj_now) self.conn.execute(make_vol_type_proj_old) self.conn.execute(make_vol_type_proj_older) self.conn.execute(make_snap_now) self.conn.execute(make_snap_old) self.conn.execute(make_snap_older) self.conn.execute(make_snap_meta_now) self.conn.execute(make_snap_meta_old) self.conn.execute(make_snap_meta_older) self.conn.execute(make_vol_glance_meta_now) self.conn.execute(make_vol_glance_meta_old) self.conn.execute(make_vol_glance_meta_older) self.conn.execute(make_snap_glance_meta_now) self.conn.execute(make_snap_glance_meta_old) self.conn.execute(make_snap_glance_meta_older) self.conn.execute(make_qos_now) self.conn.execute(make_qos_old) self.conn.execute(make_qos_older) self.conn.execute(make_qos_child_record_now) self.conn.execute(make_qos_child_record_old) self.conn.execute(make_qos_child_record_older) self.conn.execute(make_vol_types1_now) self.conn.execute(make_vol_types1_old) self.conn.execute(make_vol_types1_older)