def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume["id"]) snapshot_id = self._create_snapshot(volume["id"])["id"] self.volume.create_snapshot(self.context, volume["id"], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) self.volume.delete_snapshot(self.context, snapshot_id) snap = db.snapshot_get(context.get_admin_context(read_deleted="yes"), snapshot_id) self.assertEquals(snap["status"], "deleted") self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume["id"])
def test_transfer_accept_with_snapshots(self): volume_id = utils.create_volume(self.ctxt)['id'] snapshot_id1 = utils.create_snapshot(self.ctxt, volume_id, status='available')['id'] snapshot_id2 = utils.create_snapshot(self.ctxt, volume_id, status='available')['id'] xfer_id = self._create_transfer(volume_id) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) db.transfer_accept(nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) self.assertEqual(fake.PROJECT2_ID, db.snapshot_get(nctxt, snapshot_id1)['project_id']) self.assertEqual(fake.PROJECT2_ID, db.snapshot_get(nctxt, snapshot_id2)['project_id'])
def test_invalid_status_for_snapshot(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'available' volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) snapshot = db.snapshot_create(ctx, { 'status': 'available', 'volume_id': volume['id'] }) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'attaching' is not a valid status for snapshots req.body = jsonutils.dumps( {'os-reset_status': { 'status': 'attaching' }}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot['id']) # status is still 'available' self.assertEqual(snapshot['status'], 'available')
def test_update_encryption_key_id(self, mock_barbican_client): vol = self.create_volume() snap_ids = [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID] for snap_id in snap_ids: tests_utils.create_snapshot(self.context, vol.id, id=snap_id) # Barbican's secret.store() returns a URI that contains the # secret's key ID at the end. secret_ref = 'http://some/path/' + fake.ENCRYPTION_KEY_ID mock_secret = mock.MagicMock() mock_secret.store.return_value = secret_ref mock_barbican_client.return_value.secrets.create.return_value \ = mock_secret migration.migrate_fixed_key(self.my_vols, conf=self.conf) vol_db = db.volume_get(self.context, vol.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id']) for snap_id in snap_ids: snap_db = db.snapshot_get(self.context, snap_id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db['encryption_key_id'])
def to_primitive(self, context): backup_device = (db.snapshot_get(context, self.snapshot.id) if self.is_snapshot else db.volume_get(context, self.volume.id)) primitive = {'backup_device': backup_device, 'secure_enabled': self.secure_enabled, 'is_snapshot': self.is_snapshot} return primitive
def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual( snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) self.volume.delete_snapshot(self.context, snapshot_id) snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'), snapshot_id) self.assertEquals(snap['status'], 'deleted') self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def _update_snapshot_status(self, req, id, body): """Update database fields related to status of a snapshot. Intended for creation of snapshots, so snapshot state must start as 'creating' and be changed to 'available', 'creating', or 'error'. """ context = req.environ['cinder.context'] authorize(context, 'update_snapshot_status') LOG.debug("body: %s" % body) try: status = body['os-update_snapshot_status']['status'] except KeyError: msg = _("'status' must be specified.") raise webob.exc.HTTPBadRequest(explanation=msg) # Allowed state transitions status_map = {'creating': ['creating', 'available', 'error'], 'deleting': ['deleting', 'error_deleting']} current_snapshot = db.snapshot_get(context, id) if current_snapshot['status'] not in status_map: msg = _("Snapshot status %(cur)s not allowed for " "update_snapshot_status") % { 'cur': current_snapshot['status']} raise webob.exc.HTTPBadRequest(explanation=msg) if status not in status_map[current_snapshot['status']]: msg = _("Provided snapshot status %(provided)s not allowed for " "snapshot with status %(current)s.") % \ {'provided': status, 'current': current_snapshot['status']} raise webob.exc.HTTPBadRequest(explanation=msg) update_dict = {'id': id, 'status': status} progress = body['os-update_snapshot_status'].get('progress', None) if progress: # This is expected to be a string like '73%' msg = _('progress must be an integer percentage') try: integer = int(progress[:-1]) except ValueError: raise webob.exc.HTTPBadRequest(explanation=msg) if integer < 0 or integer > 100 or progress[-1] != '%': raise webob.exc.HTTPBadRequest(explanation=msg) update_dict.update({'progress': progress}) LOG.info("Updating snapshot %(id)s with info %(dict)s" % {'id': id, 'dict': update_dict}) db.snapshot_update(context, id, update_dict) return webob.Response(status_int=202)
def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) self.volume.delete_snapshot(self.context, snapshot_id) snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'), snapshot_id) self.assertEquals(snap['status'], 'deleted') self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def test_invalid_status_for_snapshot(self): ctx = context.RequestContext("admin", "fake", True) volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": "", "size": 1}) snapshot = db.snapshot_create(ctx, {"status": "available", "volume_id": volume["id"]}) resp = self._issue_snapshot_reset(ctx, snapshot, {"status": "attaching"}) self.assertEqual(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot["id"]) self.assertEqual(snapshot["status"], "available")
def test_transfer_accept_with_snapshots_invalid_status(self): volume_id = utils.create_volume(self.ctxt)['id'] snapshot_id1 = utils.create_snapshot(self.ctxt, volume_id, status='available')['id'] snapshot_id2 = utils.create_snapshot(self.ctxt, volume_id)['id'] xfer_id = self._create_transfer(volume_id) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) self.assertRaises(exception.InvalidSnapshot, db.transfer_accept, nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) self.assertEqual(fake.PROJECT_ID, db.snapshot_get(self.ctxt, snapshot_id1)['project_id']) self.assertEqual(fake.PROJECT_ID, db.snapshot_get(self.ctxt, snapshot_id2)['project_id']) self.assertEqual('awaiting-transfer', db.volume_get(self.ctxt, volume_id)['status'])
def _update_snapshot_status(self, req, id, body): """Update database fields related to status of a snapshot. Intended for creation of snapshots, so snapshot state must start as 'creating' and be changed to 'available', 'creating', or 'error'. """ context = req.environ['cinder.context'] authorize(context, 'update_snapshot_status') LOG.debug("body: %s" % body) status = body['os-update_snapshot_status']['status'] # Allowed state transitions status_map = {'creating': ['creating', 'available', 'error'], 'deleting': ['deleting', 'error_deleting']} current_snapshot = db.snapshot_get(context, id) if current_snapshot['status'] not in status_map: msg = _("Snapshot status %(cur)s not allowed for " "update_snapshot_status") % { 'cur': current_snapshot['status']} raise webob.exc.HTTPBadRequest(explanation=msg) if status not in status_map[current_snapshot['status']]: msg = _("Provided snapshot status %(provided)s not allowed for " "snapshot with status %(current)s.") % \ {'provided': status, 'current': current_snapshot['status']} raise webob.exc.HTTPBadRequest(explanation=msg) update_dict = {'id': id, 'status': status} progress = body['os-update_snapshot_status'].get('progress', None) if progress: # This is expected to be a string like '73%' msg = _('progress must be an integer percentage') try: integer = int(progress[:-1]) except ValueError: raise webob.exc.HTTPBadRequest(explanation=msg) if integer < 0 or integer > 100 or progress[-1] != '%': raise webob.exc.HTTPBadRequest(explanation=msg) update_dict.update({'progress': progress}) LOG.info("Updating snapshot %(id)s with info %(dict)s" % {'id': id, 'dict': update_dict}) db.snapshot_update(context, id, update_dict) return webob.Response(status_int=202)
def test_volume_api_update_snapshot(self): # create raw snapshot volume = self._create_volume() snapshot = self._create_snapshot(volume['id']) self.assertEquals(snapshot['display_name'], None) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update_snapshot(self.context, snapshot, update_dict) # read changes from db snap = db.snapshot_get(context.get_admin_context(), snapshot['id']) self.assertEquals(snap['display_name'], 'test update name')
def test_create_snapshot_online_novafailure(self): (mox, drv) = self._mox, self._driver volume = self._simple_volume() volume['status'] = 'in-use' hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) info_path = '%s.info' % volume_path ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = {'name': 'test snap (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt} snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) mox.StubOutWithMock(drv, '_execute') mox.StubOutWithMock(drv, '_create_qcow2_snap_file') mox.StubOutWithMock(drv, '_nova') mox.StubOutWithMock(db, 'snapshot_get') mox.StubOutWithMock(drv, '_write_info_file') drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path) create_info = {'snapshot_id': snap_ref['id'], 'type': 'qcow2', 'new_file': snap_file} drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info) snap_ref['status'] = 'creating' snap_ref['progress'] = '0%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '50%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '99%' snap_ref['status'] = 'error' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_info = {'active': snap_file, self.SNAP_UUID: snap_file} drv._write_info_file(info_path, snap_info) mox.ReplayAll() self.assertRaises(exception.GlusterfsException, drv.create_snapshot, snap_ref)
def test_create_snapshot_online(self): (mox, drv) = self._mox, self._driver volume = self._simple_volume() volume['status'] = 'in-use' hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) info_path = '%s.info' % volume_path ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = { 'name': 'test snap (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt, 'status': 'asdf', 'progress': 'asdf' } snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) mox.StubOutWithMock(drv, '_execute') mox.StubOutWithMock(drv, '_create_qcow2_snap_file') mox.StubOutWithMock(db, 'snapshot_get') mox.StubOutWithMock(drv, '_write_info_file') mox.StubOutWithMock(drv, '_nova') drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path) create_info = { 'snapshot_id': snap_ref['id'], 'type': 'qcow2', 'new_file': snap_file } drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info) snap_ref['status'] = 'creating' snap_ref['progress'] = '0%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '50%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '90%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_info = {'active': snap_file, self.SNAP_UUID: snap_file} drv._write_info_file(info_path, snap_info) mox.ReplayAll() drv.create_snapshot(snap_ref)
def create_snapshot(self, context, volume_id, snapshot_id): snapshot = db.snapshot_get(context, snapshot_id) context = context.elevated() client = LunrClient(snapshot, logger=LOG) params = {'volume': snapshot['volume_id']} try: client.backups.create(snapshot['id'], **params) except LunrError, e: LOG.debug(_('error creating snapshot %s'), snapshot_id) # Don't leave an error'd snapshot around, the raise here # will notify the caller of the error (See Github Issue #322) db.snapshot_destroy(context, snapshot['id']) raise
def _update_snapshot_status(self, req, id, body): """Update database fields related to status of a snapshot. Intended for creation of snapshots, so snapshot state must start as 'creating' and be changed to 'available', 'creating', or 'error'. """ context = req.environ["cinder.context"] authorize(context, "update_snapshot_status") LOG.debug("body: %s" % body) status = body["os-update_snapshot_status"]["status"] # Allowed state transitions status_map = {"creating": ["creating", "available", "error"], "deleting": ["deleting", "error_deleting"]} current_snapshot = db.snapshot_get(context, id) if current_snapshot["status"] not in status_map: msg = _("Snapshot status %(cur)s not allowed for " "update_snapshot_status") % { "cur": current_snapshot["status"] } raise webob.exc.HTTPBadRequest(explanation=msg) if status not in status_map[current_snapshot["status"]]: msg = _("Provided snapshot status %(provided)s not allowed for " "snapshot with status %(current)s.") % { "provided": status, "current": current_snapshot["status"], } raise webob.exc.HTTPBadRequest(explanation=msg) update_dict = {"id": id, "status": status} progress = body["os-update_snapshot_status"].get("progress", None) if progress: # This is expected to be a string like '73%' msg = _("progress must be an integer percentage") try: integer = int(progress[:-1]) except ValueError: raise webob.exc.HTTPBadRequest(explanation=msg) if integer < 0 or integer > 100 or progress[-1] != "%": raise webob.exc.HTTPBadRequest(explanation=msg) update_dict.update({"progress": progress}) LOG.info("Updating snapshot %(id)s with info %(dict)s" % {"id": id, "dict": update_dict}) db.snapshot_update(context, id, update_dict) return webob.Response(status_int=202)
def test_invalid_status_for_snapshot(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'available', 'volume_id': volume['id']}) resp = self._issue_snapshot_reset(ctx, snapshot, {'status': 'attaching'}) self.assertEqual(400, resp.status_int) snapshot = db.snapshot_get(ctx, snapshot['id']) self.assertEqual('available', snapshot['status'])
def test_delete_snapshot_in_error_deleting_status(self): volume_type = volume_types.get_default_volume_type() volume = tests_utils.create_volume(self.context, volume_type_id=volume_type['id']) snapshot = tests_utils.create_snapshot(self.context, volume['id'], status='error_deleting') # explicitly set volume_type_id in stub snapshot['volume_type_id'] = volume_type['id'] api = lunr_api.API() api.delete_snapshot(self.context, snapshot) post_delete = db.snapshot_get(self.context, snapshot['id']) self.assertIsNot(snapshot['status'], post_delete['status']) self.assertEqual(post_delete['status'], 'deleting')
def test_snapshot_reset_status(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', 'volume_id': volume['id']}) resp = self._issue_snapshot_reset(ctx, snapshot, {'status': 'error'}) self.assertEqual(resp.status_int, 202) snapshot = db.snapshot_get(ctx, snapshot['id']) self.assertEqual(snapshot['status'], 'error')
def test_invalid_status_for_snapshot(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'available', 'volume_id': volume['id']}) resp = self._issue_snapshot_reset(ctx, snapshot, {'status': 'attaching'}) self.assertEqual(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot['id']) self.assertEqual(snapshot['status'], 'available')
def create_snapshot(self, context, volume_id, snapshot_id): snapshot = db.snapshot_get(context, snapshot_id) context = context.elevated() client = LunrClient(snapshot, logger=LOG) params = { 'volume': snapshot['volume_id'] } try: client.backups.create(snapshot['id'], **params) except LunrError, e: LOG.debug(_('error creating snapshot %s'), snapshot_id) # Don't leave an error'd snapshot around, the raise here # will notify the caller of the error (See Github Issue #322) db.snapshot_destroy(context, snapshot['id']) raise
def test_delete_snapshot_conflict_with_force(self): volume_type = volume_types.get_default_volume_type() volume = tests_utils.create_volume(self.context, volume_type_id=volume_type['id']) # create conflicting snapshot tests_utils.create_snapshot(self.context, volume['id']) snapshot = tests_utils.create_snapshot(self.context, volume['id'], status='available') # explicitly set volume_type_id in stub snapshot['volume_type_id'] = volume_type['id'] api = lunr_api.API() api.delete_snapshot(self.context, snapshot, force=True) post_delete = db.snapshot_get(self.context, snapshot['id']) self.assertIsNot(snapshot['status'], post_delete['status']) self.assertEqual(post_delete['status'], 'deleting')
def test_cant_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" volume = self._create_volume() self.volume.create_volume(self.context, volume["id"]) snapshot_id = self._create_snapshot(volume["id"])["id"] self.volume.create_snapshot(self.context, volume["id"], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) volume["status"] = "available" volume["host"] = "fakehost" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume["id"])
def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume["id"]) snapshot_id = self._create_snapshot(volume["id"])["id"] self.volume.create_snapshot(self.context, volume["id"], snapshot_id) snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id) volume_api = cinder.volume.api.API() snapshot["status"] = "badstatus" self.assertRaises(exception.InvalidVolume, volume_api.delete_snapshot, self.context, snapshot) snapshot["status"] = "error" self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume["id"])
def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id) volume_api = cinder.volume.api.API() snapshot['status'] = 'badstatus' self.assertRaises(exception.InvalidVolume, volume_api.delete_snapshot, self.context, snapshot) snapshot['status'] = 'error' self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def test_update_volume_encryption_key_id(self, mock_barbican_client, mock_get_barbican_key_id): vol = self.create_volume() snap_ids = [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID] for snap_id in snap_ids: tests_utils.create_snapshot(self.context, vol.id, id=snap_id) mock_get_barbican_key_id.return_value = fake.ENCRYPTION_KEY_ID migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) vol_db = db.volume_get(self.context, vol.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id']) for snap_id in snap_ids: snap_db = db.snapshot_get(self.context, snap_id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db['encryption_key_id'])
def test_delete_busy_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() volume_id = volume["id"] self.volume.create_volume(self.context, volume_id) snapshot_id = self._create_snapshot(volume_id)["id"] self.volume.create_snapshot(self.context, volume_id, snapshot_id) self.mox.StubOutWithMock(self.volume.driver, "delete_snapshot") self.volume.driver.delete_snapshot(mox.IgnoreArg()).AndRaise(exception.SnapshotIsBusy) self.mox.ReplayAll() self.volume.delete_snapshot(self.context, snapshot_id) snapshot_ref = db.snapshot_get(self.context, snapshot_id) self.assertEqual(snapshot_id, snapshot_ref.id) self.assertEqual("available", snapshot_ref.status) self.mox.UnsetStubs() self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_id)
def test_cant_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual( snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def test_snapshot_reset_status(self): # admin context ctx = context.RequestContext("admin", "fake", True) # snapshot in 'error_deleting' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {"status": "error_deleting", "volume_id": volume["id"]}) req = webob.Request.blank("/v1/fake/snapshots/%s/action" % snapshot["id"]) req.method = "POST" req.headers["content-type"] = "application/json" # request status of 'error' req.body = jsonutils.dumps({"os-reset_status": {"status": "error"}}) # attach admin context to request req.environ["cinder.context"] = ctx resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 202) snapshot = db.snapshot_get(ctx, snapshot["id"]) # status changed to 'error' self.assertEquals(snapshot["status"], "error")
def test_invalid_status_for_snapshot(self): # admin context ctx = context.RequestContext("admin", "fake", True) # snapshot in 'available' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {"status": "available", "volume_id": volume["id"]}) req = webob.Request.blank("/v1/fake/snapshots/%s/action" % snapshot["id"]) req.method = "POST" req.headers["content-type"] = "application/json" # 'attaching' is not a valid status for snapshots req.body = jsonutils.dumps({"os-reset_status": {"status": "attaching"}}) # attach admin context to request req.environ["cinder.context"] = ctx resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot["id"]) # status is still 'available' self.assertEquals(snapshot["status"], "available")
def test_update_progress(self): ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'error_deleting' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-update_progress': 'progress!'}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 202) snapshot = db.snapshot_get(ctx, snapshot['id']) # status changed to 'error' self.assertEquals(snapshot['progress'], 'progress!')
def test_update_progress(self): ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'error_deleting' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-update_progress': 'progress!'}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) snapshot = db.snapshot_get(ctx, snapshot['id']) # status changed to 'error' self.assertEqual(snapshot['progress'], 'progress!')
def delete_snapshot(self, context, snapshot_id): snapshot = db.snapshot_get(context, snapshot_id) context = context.elevated() LOG.debug(_("snapshot %s: deleting"), snapshot['name']) reserve_opts = {'snapshots': -1} volume = db.volume_get(context, snapshot['volume_id']) try: QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reservations = QUOTAS.reserve(context, **reserve_opts) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting snapshot")) client = LunrClient(snapshot, logger=LOG) try: client.backups.delete(snapshot['id']) except LunrError, e: # ignore Not Found on delete_snapshot. Don't wait on status. if e.code == 404: db.snapshot_destroy(context, snapshot['id']) LOG.debug(_("snapshot %s: deleted successfully"), snapshot['name']) elif e.code == 409: db.snapshot_update(context, snapshot['id'], {'status': 'available'}) LOG.debug(_("snapshot %s: snapshot is busy"), snapshot['name']) if reservations: QUOTAS.rollback(context, reservations) raise else: LOG.debug(_('error deleting snapshot %s'), snapshot['id']) db.snapshot_update(context, snapshot['id'], {'status': 'error_deleting'}) if reservations: QUOTAS.rollback(context, reservations) raise
def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id) volume_api = cinder.volume.api.API() snapshot['status'] = 'badstatus' self.assertRaises(exception.InvalidSnapshot, volume_api.delete_snapshot, self.context, snapshot) snapshot['status'] = 'error' self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def test_delete_busy_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) snapshot_id = self._create_snapshot(volume_id)['id'] self.volume.create_snapshot(self.context, volume_id, snapshot_id) self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') self.volume.driver.delete_snapshot(mox.IgnoreArg()).AndRaise( exception.SnapshotIsBusy(snapshot_name='fake')) self.mox.ReplayAll() self.volume.delete_snapshot(self.context, snapshot_id) snapshot_ref = db.snapshot_get(self.context, snapshot_id) self.assertEqual(snapshot_id, snapshot_ref.id) self.assertEqual("available", snapshot_ref.status) self.mox.UnsetStubs() self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_id)
def test_cant_delete_volume_with_snapshots(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id']) self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def test_snapshot_reset_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'error_deleting' volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', 'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) snapshot = db.snapshot_get(ctx, snapshot['id']) # status changed to 'error' self.assertEqual('error', snapshot['status'], 'error')
def test_invalid_status_for_snapshot(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'available' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {'status': 'available', 'volume_id': volume['id']}) req = webob.Request.blank('/v1/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'attaching' is not a valid status for snapshots req.body = jsonutils.dumps({'os-reset_status': {'status': 'attaching'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot['id']) # status is still 'available' self.assertEquals(snapshot['status'], 'available')
def test_create_snapshot_during_encryption_key_migration(self): fixed_key_id = '00000000-0000-0000-0000-000000000000' volume = tests_utils.create_volume(self.context, **self.volume_params) volume['encryption_key_id'] = fixed_key_id volume_id = volume['id'] self.volume.create_volume(self.context, volume) kwargs = {'encryption_key_id': fixed_key_id} snapshot = create_snapshot(volume['id'], **kwargs) self.assertEqual(fixed_key_id, snapshot.encryption_key_id) db.volume_update(self.context, volume_id, {'encryption_key_id': fake.ENCRYPTION_KEY_ID}) self.volume.create_snapshot(self.context, snapshot) snap_db = db.snapshot_get(self.context, snapshot.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db.encryption_key_id) # cleanup resource snapshot.destroy() db.volume_destroy(self.context, volume_id)
def _volume_export_snapshot(self, req, id, body): """Export a snapshot to a file.""" context = req.environ['cinder.context'] LOG.debug("body: %s", body) current_snapshot = db.snapshot_get(context, id) if current_snapshot['status'] not in {'available'}: msg = _("Snapshot status %(cur)s not allowed for " "export_snapshot") % { 'cur': current_snapshot['status'] } raise webob.exc.HTTPBadRequest(explanation=msg) LOG.info("Exporting snapshot %(id)s", {'id': id}) try: snapshot = self.volume_api.get_snapshot(context, id) except exception.SnapshotNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) try: response = self.volume_api.export_snapshot(context, snapshot) except exception.InvalidSnapshot as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except ValueError as error: raise webob.exc.HTTPBadRequest(explanation=six.text_type(error)) except rpc_client.RemoteError as error: msg = "%(err_type)s: %(err_msg)s" % { 'err_type': error.exc_type, 'err_msg': error.value } raise webob.exc.HTTPBadRequest(explanation=msg) return {WRS_SNAP_EXPORT: response}
def test_delete_snapshot_online_novafailure(self): """Delete the newest snapshot.""" (mox, drv) = self._mox, self._driver volume = self._simple_volume() volume['status'] = 'in-use' ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = { 'name': 'test snap to delete (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt } hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) info_path = '%s.info' % volume_path snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) mox.StubOutWithMock(drv, '_execute') mox.StubOutWithMock(drv, '_nova') mox.StubOutWithMock(drv, '_read_info_file') mox.StubOutWithMock(drv, '_write_info_file') mox.StubOutWithMock(os.path, 'exists') mox.StubOutWithMock(drv, '_get_backing_file_for_path') mox.StubOutWithMock(db, 'snapshot_get') snap_info = {'active': snap_file, self.SNAP_UUID: snap_file} drv._read_info_file(info_path).AndReturn(snap_info) os.path.exists(snap_path).AndReturn(True) drv._read_info_file(info_path, empty_if_missing=True).\ AndReturn(snap_info) asdfqemu_img_info_output = """image: %s file format: qcow2 virtual size: 1.0G (1073741824 bytes) disk size: 173K backing file: %s """ % (snap_file, volume_file) delete_info = { 'type': 'qcow2', 'merge_target_file': None, 'file_to_merge': volume_file, 'volume_id': self.VOLUME_UUID } drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info) drv._get_backing_file_for_path(snap_path).AndReturn(volume_file) drv._read_info_file(info_path).AndReturn(snap_info) drv._read_info_file(info_path).AndReturn(snap_info) snap_ref['status'] = 'deleting' snap_ref['progress'] = '0%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '50%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['status'] = 'error_deleting' snap_ref['progress'] = '90%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) drv._write_info_file(info_path, snap_info) drv._execute('rm', '-f', volume_path, run_as_root=True) mox.ReplayAll() self.assertRaises(exception.GlusterfsException, drv.delete_snapshot, snap_ref)
def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume['id']) self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual( snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) msg = test_notifier.NOTIFICATIONS[2] self.assertEquals(msg['event_type'], 'snapshot.create.start') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'creating', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) msg = test_notifier.NOTIFICATIONS[3] self.assertEquals(msg['event_type'], 'snapshot.create.end') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'creating', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) self.volume.delete_snapshot(self.context, snapshot_id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 6) msg = test_notifier.NOTIFICATIONS[4] self.assertEquals(msg['event_type'], 'snapshot.delete.start') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'available', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) msg = test_notifier.NOTIFICATIONS[5] self.assertEquals(msg['event_type'], 'snapshot.delete.end') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'available', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'), snapshot_id) self.assertEquals(snap['status'], 'deleted') self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" volume = self._create_volume() self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume['id']) self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) msg = test_notifier.NOTIFICATIONS[2] self.assertEquals(msg['event_type'], 'snapshot.create.start') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'creating', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) msg = test_notifier.NOTIFICATIONS[3] self.assertEquals(msg['event_type'], 'snapshot.create.end') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'creating', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) self.volume.delete_snapshot(self.context, snapshot_id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 6) msg = test_notifier.NOTIFICATIONS[4] self.assertEquals(msg['event_type'], 'snapshot.delete.start') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'available', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) msg = test_notifier.NOTIFICATIONS[5] self.assertEquals(msg['event_type'], 'snapshot.delete.end') expected = { 'created_at': 'DONTCARE', 'deleted': '', 'display_name': None, 'snapshot_id': snapshot_id, 'status': 'available', 'tenant_id': 'fake', 'user_id': 'fake', 'volume_id': volume['id'], 'volume_size': 0, 'availability_zone': 'nova' } self.assertDictMatch(msg['payload'], expected) snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'), snapshot_id) self.assertEquals(snap['status'], 'deleted') self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id'])
def get_by_id(cls, context, id): db_snapshot = db.snapshot_get(context, id) return cls._from_db_object(context, cls(context), db_snapshot, expected_attrs=['metadata'])
def _create_snapshot_online(self, snapshot, backing_filename, new_snap_path): # Perform online snapshot via Nova context = snapshot['context'] self._do_create_snapshot(snapshot, backing_filename, new_snap_path) connection_info = { 'type': 'qcow2', 'new_file': os.path.basename(new_snap_path), 'snapshot_id': snapshot['id'] } try: result = self._nova.create_volume_snapshot(context, snapshot['volume_id'], connection_info) LOG.debug('nova call result: %s' % result) except Exception as e: LOG.error(_('Call to Nova to create snapshot failed')) LOG.exception(e) raise e # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 600 while True: s = db.snapshot_get(context, snapshot['id']) if s['status'] == 'creating': if s['progress'] == '90%': # Nova tasks completed successfully break time.sleep(increment) seconds_elapsed += increment elif s['status'] == 'error': msg = _('Nova returned "error" status ' 'while creating snapshot.') raise exception.RemoteFSException(msg) LOG.debug('Status of snapshot %(id)s is now %(status)s' % { 'id': snapshot['id'], 'status': s['status'] }) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for creation of snapshot %s.') % snapshot['id'] raise exception.RemoteFSException(msg)
def _create_snapshot(self, snapshot): """Create a snapshot. If volume is attached, call to Nova to create snapshot, providing a qcow2 file. Otherwise, create locally with qemu-img. A file named volume-<uuid>.info is stored with the volume data and is a JSON table which contains a mapping between Cinder snapshot UUIDs and filenames, as these associations will change as snapshots are deleted. Basic snapshot operation: 1. Initial volume file: volume-1234 2. Snapshot created: volume-1234 <- volume-1234.aaaa volume-1234.aaaa becomes the new "active" disk image. If the volume is not attached, this filename will be used to attach the volume to a VM at volume-attach time. If the volume is attached, the VM will switch to this file as part of the snapshot process. Note that volume-1234.aaaa represents changes after snapshot 'aaaa' was created. So the data for snapshot 'aaaa' is actually in the backing file(s) of volume-1234.aaaa. This file has a qcow2 header recording the fact that volume-1234 is its backing file. Delta changes since the snapshot was created are stored in this file, and the backing file (volume-1234) does not change. info file: { 'active': 'volume-1234.aaaa', 'aaaa': 'volume-1234.aaaa' } 3. Second snapshot created: volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb volume-1234.bbbb now becomes the "active" disk image, recording changes made to the volume. info file: { 'active': 'volume-1234.bbbb', 'aaaa': 'volume-1234.aaaa', 'bbbb': 'volume-1234.bbbb' } 4. First snapshot deleted: volume-1234 <- volume-1234.aaaa(* now with bbbb's data) volume-1234.aaaa is removed (logically) from the snapshot chain. The data from volume-1234.bbbb is merged into it. (*) Since bbbb's data was committed into the aaaa file, we have "removed" aaaa's snapshot point but the .aaaa file now represents snapshot with id "bbbb". info file: { 'active': 'volume-1234.bbbb', 'bbbb': 'volume-1234.aaaa' (* changed!) } 5. Second snapshot deleted: volume-1234 volume-1234.bbbb is removed from the snapshot chain, as above. The base image, volume-1234, becomes the active image for this volume again. If in-use, the VM begins using the volume-1234.bbbb file immediately as part of the snapshot delete process. info file: { 'active': 'volume-1234' } For the above operations, Cinder handles manipulation of qcow2 files when the volume is detached. When attached, Cinder creates and deletes qcow2 files, but Nova is responsible for transitioning the VM between them and handling live transfers of data between files as required. """ status = snapshot['volume']['status'] if status not in ['available', 'in-use']: msg = _('Volume status must be "available" or "in-use"' ' for snapshot. (is %s)') % status raise exception.InvalidVolume(msg) if status == 'in-use': # Perform online snapshot via Nova context = snapshot['context'] backing_filename = self.get_active_image_from_info( snapshot['volume']) path_to_disk = self._local_path_volume(snapshot['volume']) new_snap_path = '%s.%s' % (self._local_path_volume( snapshot['volume']), snapshot['id']) self._create_qcow2_snap_file(snapshot, backing_filename, new_snap_path) connection_info = { 'type': 'qcow2', 'new_file': os.path.basename(new_snap_path), 'snapshot_id': snapshot['id'] } try: result = self._nova.create_volume_snapshot( context, snapshot['volume_id'], connection_info) LOG.debug('nova call result: %s' % result) except Exception as e: LOG.error(_('Call to Nova to create snapshot failed')) LOG.exception(e) raise e # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 600 while True: s = db.snapshot_get(context, snapshot['id']) if s['status'] == 'creating': if s['progress'] == '90%': # Nova tasks completed successfully break time.sleep(increment) seconds_elapsed += increment elif s['status'] == 'error': msg = _('Nova returned "error" status ' 'while creating snapshot.') raise exception.GlusterfsException(msg) LOG.debug('Status of snapshot %(id)s is now %(status)s' % { 'id': snapshot['id'], 'status': s['status'] }) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for creation of snapshot %s.') % snapshot['id'] raise exception.GlusterfsException(msg) info_path = self._local_path_volume(snapshot['volume']) + '.info' snap_info = self._read_info_file(info_path, empty_if_missing=True) snap_info['active'] = os.path.basename(new_snap_path) snap_info[snapshot['id']] = os.path.basename(new_snap_path) self._write_info_file(info_path, snap_info) return LOG.debug('create snapshot: %s' % snapshot) LOG.debug('volume id: %s' % snapshot['volume_id']) path_to_disk = self._local_path_volume(snapshot['volume']) self._create_snapshot_offline(snapshot, path_to_disk)
def _delete_snapshot_online(self, context, snapshot, info): # Update info over the course of this method # active file never changes info_path = self._local_path_volume(snapshot['volume']) + '.info' snap_info = self._read_info_file(info_path) if info['active_file'] == info['snapshot_file']: # blockRebase/Pull base into active # info['base'] => snapshot_file file_to_delete = info['base_file'] if info['base_id'] is None: # Passing base=none to blockRebase ensures that # libvirt blanks out the qcow2 backing file pointer new_base = None else: new_base = info['new_base_file'] snap_info[info['base_id']] = info['snapshot_file'] delete_info = { 'file_to_merge': new_base, 'merge_target_file': None, # current 'type': 'qcow2', 'volume_id': snapshot['volume']['id'] } del (snap_info[snapshot['id']]) else: # blockCommit snapshot into base # info['base'] <= snapshot_file # delete record of snapshot file_to_delete = info['snapshot_file'] delete_info = { 'file_to_merge': info['snapshot_file'], 'merge_target_file': info['base_file'], 'type': 'qcow2', 'volume_id': snapshot['volume']['id'] } del (snap_info[snapshot['id']]) try: self._nova.delete_volume_snapshot(context, snapshot['id'], delete_info) except Exception as e: LOG.error(_('Call to Nova delete snapshot failed')) LOG.exception(e) raise e # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 7200 while True: s = db.snapshot_get(context, snapshot['id']) if s['status'] == 'deleting': if s['progress'] == '90%': # Nova tasks completed successfully break else: msg = ('status of snapshot %s is ' 'still "deleting"... waiting') % snapshot['id'] LOG.debug(msg) time.sleep(increment) seconds_elapsed += increment else: msg = _('Unable to delete snapshot %(id)s, ' 'status: %(status)s.') % { 'id': snapshot['id'], 'status': s['status'] } raise exception.GlusterfsException(msg) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for deletion of snapshot %(id)s.') %\ {'id': snapshot['id']} raise exception.GlusterfsException(msg) # Write info file updated above self._write_info_file(info_path, snap_info) # Delete stale file path_to_delete = os.path.join( self._local_volume_dir(snapshot['volume']), file_to_delete) self._execute('rm', '-f', path_to_delete, run_as_root=True)
def create_volume(self, context, volume_id, snapshot_id=None, image_id=None, source_volid=None, **kwargs): context = context.elevated() volume = db.volume_get(context, volume_id) LOG.info(_("volume %s: creating"), volume['name']) model_update = {'host': 'lunr'} volume['host'] = 'lunr' try: # Try to get the volume type name, else use the default volume type volume_type_name = volume['volume_type']['name'] except (KeyError, TypeError): volume_type_name = CONF.lunr_default_volume_type # Using the default volume type name, # ask the db for the volume type id vtype_id = self._get_volume_type_id(volume_type_name) model_update['volume_type_id'] = vtype_id volume['volume_type_id'] = vtype_id db.volume_update(context, volume['id'], model_update) params = { 'name': volume['id'], 'size': volume['size'], 'volume_type_name': volume_type_name, } # Copy image to volume! if image_id: params['image_id'] = image_id image_service, image_id = glance.get_remote_image_service( context, image_id) image_meta = image_service.show(context, image_id) if image_meta: db.volume_glance_metadata_create(context, volume['id'], 'image_id', image_id) name = image_meta.get('name', None) if name: db.volume_glance_metadata_create(context, volume['id'], 'image_name', name) image_properties = image_meta.get('properties', {}) for key, value in image_properties.items(): db.volume_glance_metadata_create(context, volume['id'], key, value) # If this is a snapshot request, add the backup param if snapshot_id: params['backup'] = snapshot_id snapshot_ref = db.snapshot_get(context, snapshot_id) original_vref = db.volume_get(context, snapshot_ref['volume_id']) if original_vref['bootable']: db.volume_glance_metadata_copy_to_volume( context, volume_id, snapshot_id) db.volume_update(context, volume_id, {'bootable': True}) # If this is a clone request, add the source_volume_id param if source_volid: params['source_volume'] = source_volid source_vref = db.volume_get(context, source_volid) if source_vref['bootable']: db.volume_glance_metadata_copy_from_volume_to_volume( context, source_volid, volume_id) db.volume_update(context, volume_id, {'bootable': True}) try: resp = LunrClient(volume, logger=LOG).volumes.create( volume['id'], **params) except LunrError, e: LOG.debug('error creating volume %s', volume['id']) # Don't leave an error'd volume around, the raise here # will notify the caller of the error (See Github Issue #343) # Also, in Havana, TaskFlow will revert the quota increase. db.volume_destroy(context, volume['id']) raise e
def test_delete_snapshot_online_2(self): """Delete the middle snapshot.""" (mox, drv) = self._mox, self._driver volume = self._simple_volume() volume['status'] = 'in-use' ctxt = context.RequestContext('fake_user', 'fake_project') snap_ref = { 'name': 'test snap to delete (online)', 'volume_id': self.VOLUME_UUID, 'volume': volume, 'id': self.SNAP_UUID, 'context': ctxt } hashed = drv._get_hash_str(self.TEST_EXPORT1) volume_file = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, hashed, volume_file) info_path = '%s.info' % volume_path snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2) snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) snap_file_2 = '%s.%s' % (volume_file, self.SNAP_UUID_2) mox.StubOutWithMock(drv, '_execute') mox.StubOutWithMock(drv, '_nova') mox.StubOutWithMock(drv, '_read_info_file') mox.StubOutWithMock(drv, '_write_info_file') mox.StubOutWithMock(os.path, 'exists') mox.StubOutWithMock(drv, '_get_backing_file_for_path') mox.StubOutWithMock(db, 'snapshot_get') snap_info = { 'active': snap_file_2, self.SNAP_UUID: snap_file, self.SNAP_UUID_2: snap_file_2 } drv._read_info_file(info_path).AndReturn(snap_info) os.path.exists(snap_path).AndReturn(True) drv._read_info_file(info_path, empty_if_missing=True).\ AndReturn(snap_info) drv._get_backing_file_for_path(snap_path).AndReturn(volume_file) delete_info = { 'type': 'qcow2', 'merge_target_file': volume_file, 'file_to_merge': snap_file, 'volume_id': self.VOLUME_UUID } drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info) drv._read_info_file(info_path).AndReturn(snap_info) drv._read_info_file(info_path).AndReturn(snap_info) snap_ref['status'] = 'deleting' snap_ref['progress'] = '0%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '50%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) snap_ref['progress'] = '90%' db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) drv._write_info_file(info_path, snap_info) drv._execute('rm', '-f', snap_path, run_as_root=True) mox.ReplayAll() drv.delete_snapshot(snap_ref)
def create_snapshot(self, snapshot): """Create a snapshot. If volume is attached, call to Nova to create snapshot, providing a qcow2 file. Otherwise, create locally with qemu-img. A file named volume-<uuid>.info is stored with the volume data and is a JSON table which contains a mapping between Cinder snapshot UUIDs and filenames, as these associations will change as snapshots are deleted. Basic snapshot operation: 1. Initial volume file: volume-1234 2. Snapshot created: volume-1234 <- volume-1234.aaaa volume-1234.aaaa becomes the new "active" disk image. If the volume is not attached, this filename will be used to attach the volume to a VM at volume-attach time. If the volume is attached, the VM will switch to this file as part of the snapshot process. Note that volume-1234.aaaa represents changes after snapshot 'aaaa' was created. So the data for snapshot 'aaaa' is actually in the backing file(s) of volume-1234.aaaa. This file has a qcow2 header recording the fact that volume-1234 is its backing file. Delta changes since the snapshot was created are stored in this file, and the backing file (volume-1234) does not change. info file: { 'active': 'volume-1234.aaaa', 'aaaa': 'volume-1234.aaaa' } 3. Second snapshot created: volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb volume-1234.bbbb now becomes the "active" disk image, recording changes made to the volume. info file: { 'active': 'volume-1234.bbbb', 'aaaa': 'volume-1234.aaaa', 'bbbb': 'volume-1234.bbbb' } 4. First snapshot deleted: volume-1234 <- volume-1234.aaaa(* now with bbbb's data) volume-1234.aaaa is removed (logically) from the snapshot chain. The data from volume-1234.bbbb is merged into it. (*) Since bbbb's data was committed into the aaaa file, we have "removed" aaaa's snapshot point but the .aaaa file now represents snapshot with id "bbbb". info file: { 'active': 'volume-1234.bbbb', 'bbbb': 'volume-1234.aaaa' (* changed!) } 5. Second snapshot deleted: volume-1234 volume-1234.bbbb is removed from the snapshot chain, as above. The base image, volume-1234, becomes the active image for this volume again. If in-use, the VM begins using the volume-1234.bbbb file immediately as part of the snapshot delete process. info file: { 'active': 'volume-1234' } For the above operations, Cinder handles manipulation of qcow2 files when the volume is detached. When attached, Cinder creates and deletes qcow2 files, but Nova is responsible for transitioning the VM between them and handling live transfers of data between files as required. """ status = snapshot['volume']['status'] if status not in ['available', 'in-use']: msg = _('Volume status must be "available" or "in-use"' ' for snapshot. (is %s)') % status raise exception.InvalidVolume(msg) if status == 'in-use': # Perform online snapshot via Nova context = snapshot['context'] backing_filename = self.get_active_image_from_info( snapshot['volume']) path_to_disk = self._local_path_volume(snapshot['volume']) new_snap_path = '%s.%s' % ( self._local_path_volume(snapshot['volume']), snapshot['id']) self._create_qcow2_snap_file(snapshot, backing_filename, new_snap_path) connection_info = { 'type': 'qcow2', 'new_file': os.path.basename(new_snap_path), 'snapshot_id': snapshot['id'] } try: result = self._nova.create_volume_snapshot( context, snapshot['volume_id'], connection_info) LOG.debug(_('nova call result: %s') % result) except Exception as e: LOG.error(_('Call to Nova to create snapshot failed')) LOG.exception(e) raise e # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 600 while True: s = db.snapshot_get(context, snapshot['id']) if s['status'] == 'creating': if s['progress'] == '90%': # Nova tasks completed successfully break time.sleep(increment) seconds_elapsed += increment elif s['status'] == 'error': msg = _('Nova returned "error" status ' 'while creating snapshot.') raise exception.GlusterfsException(msg) LOG.debug(_('Status of snapshot %(id)s is now %(status)s') % { 'id': snapshot['id'], 'status': s['status'] }) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for creation of snapshot %s.') % snapshot['id'] raise exception.GlusterfsException(msg) info_path = self._local_path_volume(snapshot['volume']) + '.info' snap_info = self._read_info_file(info_path, empty_if_missing=True) snap_info['active'] = os.path.basename(new_snap_path) snap_info[snapshot['id']] = os.path.basename(new_snap_path) self._write_info_file(info_path, snap_info) return LOG.debug(_('create snapshot: %s') % snapshot) LOG.debug(_('volume id: %s') % snapshot['volume_id']) path_to_disk = self._local_path_volume(snapshot['volume']) snap_id = snapshot['id'] self._create_snapshot(snapshot, path_to_disk, snap_id)
def _delete_snapshot_online(self, context, snapshot, info): # Update info over the course of this method # active file never changes info_path = self._local_path_volume(snapshot['volume']) + '.info' snap_info = self._read_info_file(info_path) if info['active_file'] == info['snapshot_file']: # blockRebase/Pull base into active # info['base'] => snapshot_file file_to_delete = info['base_file'] delete_info = {'file_to_merge': info['base_file'], 'merge_target_file': None, # current 'type': 'qcow2', 'volume_id': snapshot['volume']['id']} del(snap_info[snapshot['id']]) else: # blockCommit snapshot into base # info['base'] <= snapshot_file # delete record of snapshot file_to_delete = info['snapshot_file'] delete_info = {'file_to_merge': info['snapshot_file'], 'merge_target_file': info['base_file'], 'type': 'qcow2', 'volume_id': snapshot['volume']['id']} del(snap_info[snapshot['id']]) try: self._nova.delete_volume_snapshot( context, snapshot['id'], delete_info) except Exception as e: LOG.error(_('Call to Nova delete snapshot failed')) LOG.exception(e) raise e # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 600 while True: s = db.snapshot_get(context, snapshot['id']) if s['status'] == 'deleting': if s['progress'] == '90%': # Nova tasks completed successfully break else: msg = _('status of snapshot %s is ' 'still "deleting"... waiting') % snapshot['id'] LOG.debug(msg) time.sleep(increment) seconds_elapsed += increment else: msg = _('Unable to delete snapshot %(id)s, ' 'status: %(status)s.') % {'id': snapshot['id'], 'status': s['status']} raise exception.GlusterfsException(msg) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for deletion of snapshot %(id)s.') %\ {'id': snapshot['id']} raise exception.GlusterfsException(msg) # Write info file updated above self._write_info_file(info_path, snap_info) # Delete stale file path_to_delete = os.path.join( self._local_volume_dir(snapshot['volume']), file_to_delete) self._execute('rm', '-f', path_to_delete, run_as_root=True)