def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = self._create_volume() volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) db.volume_attached(self.context, volume_id, instance_uuid, mountpoint) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") self.assertEqual(vol['mountpoint'], mountpoint) self.assertEqual(vol['instance_uuid'], instance_uuid) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) db.volume_detached(self.context, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id)
def test_create_delete_volume(self): """Test volume can be created and deleted.""" # Need to stub out reserve, commit, and rollback def fake_reserve(context, expire=None, **deltas): return ["RESERVATION"] def fake_commit(context, reservations): pass def fake_rollback(context, reservations): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) self.stubs.Set(QUOTAS, "commit", fake_commit) self.stubs.Set(QUOTAS, "rollback", fake_rollback) volume = self._create_volume() volume_id = volume['id'] self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume_id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) self.volume.delete_volume(self.context, volume_id) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEquals(vol['status'], 'deleted') self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id)
def test_init_host(self): """Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {"status": "backing-up"}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {"status": "restoring-backup"}) backup1_id = self._create_backup_db_entry(status="creating") backup2_id = self._create_backup_db_entry(status="restoring") backup3_id = self._create_backup_db_entry(status="deleting") self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual(vol1["status"], "available") vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual(vol2["status"], "error_restoring") backup1 = db.backup_get(self.ctxt, backup1_id) self.assertEqual(backup1["status"], "error") backup2 = db.backup_get(self.ctxt, backup2_id) self.assertEqual(backup2["status"], "available") self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3_id)
def _create_backup_db_entry( self, volume_id=_DEFAULT_VOLUME_ID, container="test-container", backup_id=fake.BACKUP_ID, parent_id=None, service_metadata=None, ): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) backup = { "id": backup_id, "size": 1, "container": container, "volume_id": volume_id, "parent_id": parent_id, "user_id": fake.USER_ID, "project_id": fake.PROJECT_ID, "service_metadata": service_metadata, } return db.backup_create(self.ctxt, backup)["id"]
def test_force_detach_volume(self): # admin context ctx = context.RequestContext("admin", "fake", True) # current status is available volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": ""}) # start service to handle rpc messages for attach requests self.start_service("volume", host="test") self.volume_api.reserve_volume(ctx, volume) self.volume_api.initialize_connection(ctx, volume, {}) mountpoint = "/dev/vbd" self.volume_api.attach(ctx, volume, fakes.FAKE_UUID, mountpoint) # volume is attached volume = db.volume_get(ctx, volume["id"]) self.assertEquals(volume["status"], "in-use") self.assertEquals(volume["instance_uuid"], fakes.FAKE_UUID) self.assertEquals(volume["mountpoint"], mountpoint) self.assertEquals(volume["attach_status"], "attached") # build request to force detach req = webob.Request.blank("/v1/fake/volumes/%s/action" % volume["id"]) req.method = "POST" req.headers["content-type"] = "application/json" # request status of 'error' req.body = jsonutils.dumps({"os-force_detach": None}) # attach admin context to request req.environ["cinder.context"] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 202) volume = db.volume_get(ctx, volume["id"]) # status changed to 'available' self.assertEquals(volume["status"], "available") self.assertEquals(volume["instance_uuid"], None) self.assertEquals(volume["mountpoint"], None) self.assertEquals(volume["attach_status"], "detached")
def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" instance_uuid = "12345678-1234-5678-1234-567812345678" mountpoint = "/dev/sdf" volume = self._create_volume() volume_id = volume["id"] self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(self.context, volume_id, instance_uuid, mountpoint) else: self.compute.attach_volume(self.context, instance_uuid, volume_id, mountpoint) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol["status"], "in-use") self.assertEqual(vol["attach_status"], "attached") self.assertEqual(vol["mountpoint"], mountpoint) self.assertEqual(vol["instance_uuid"], instance_uuid) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) if FLAGS.fake_tests: db.volume_detached(self.context, volume_id) else: pass self.compute.detach_volume(self.context, instance_uuid, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol["status"], "available") self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id)
def test_init_host(self): """Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) backup1 = self._create_backup_db_entry(status='creating') backup2 = self._create_backup_db_entry(status='restoring') backup3 = self._create_backup_db_entry(status='deleting') self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual(vol1['status'], 'available') vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual(vol2['status'], 'error_restoring') backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(backup1['status'], 'error') backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(backup2['status'], 'available') self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id)
def test_transfer_accept(self, mock_notify): svc = self.start_service('volume', host='test_host') tx_api = transfer_api.API() utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, '2', transfer['auth_key']) self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer['id'], 'wrong') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) db.volume_update(self.ctxt, '1', {'status': 'wrong'}) self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'}) # Because the InvalidVolume exception is raised in tx_api, so there is # only transfer.accept.start called and missing transfer.accept.end. calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")] mock_notify.assert_has_calls(calls) self.assertEqual(3, mock_notify.call_count) self.ctxt.user_id = 'new_user_id' self.ctxt.project_id = 'new_project_id' response = tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) volume = db.volume_get(self.ctxt, '1') self.assertEqual(volume['project_id'], 'new_project_id', 'Unexpected project id') self.assertEqual(volume['user_id'], 'new_user_id', 'Unexpected user id') self.assertEqual(volume['id'], response['volume_id'], 'Unexpected volume id in response.') self.assertEqual(transfer['id'], response['id'], 'Unexpected transfer id in response.') calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] mock_notify.assert_has_calls(calls) self.assertEqual(5, mock_notify.call_count) svc.stop()
def test_force_detach_host_attached_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = self._create_volume(ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.initialize_connection(ctx, volume, connector) mountpoint = '/dev/vbd' host_name = 'fake-host' attachment = self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro') # volume is attached volume = db.volume_get(ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertIsNone(attachment['instance_uuid']) self.assertEqual(host_name, attachment['attached_host']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('ro', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' body = {'os-force_detach': {'attachment_id': attachment['id'], 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) volume = db.volume_get(ctx, volume['id']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume['status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) # cleanup svc.stop()
def test_begin_roll_detaching_volume(self): """Test begin_detaching and roll_detaching functions.""" volume = self._create_volume() volume_api = cinder.volume.api.API() volume_api.begin_detaching(self.context, volume) volume = db.volume_get(self.context, volume['id']) self.assertEqual(volume['status'], "detaching") volume_api.roll_detaching(self.context, volume) volume = db.volume_get(self.context, volume['id']) self.assertEqual(volume['status'], "in-use")
def test_force_detach_instance_attached_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) mountpoint = '/dev/vbd' self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(ctx, volume['id']) self.assertEqual(volume['status'], 'in-use') self.assertEqual(volume['instance_uuid'], stubs.FAKE_UUID) self.assertIsNone(volume['attached_host']) self.assertEqual(volume['mountpoint'], mountpoint) self.assertEqual(volume['attach_status'], 'attached') admin_metadata = volume['volume_admin_metadata'] self.assertEqual(len(admin_metadata), 2) self.assertEqual(admin_metadata[0]['key'], 'readonly') self.assertEqual(admin_metadata[0]['value'], 'False') self.assertEqual(admin_metadata[1]['key'], 'attached_mode') self.assertEqual(admin_metadata[1]['value'], 'rw') conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual(conn_info['data']['access_mode'], 'rw') # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-force_detach': None}) # attach admin context to request req.environ['cinder.context'] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) # status changed to 'available' self.assertEqual(volume['status'], 'available') self.assertIsNone(volume['instance_uuid']) self.assertIsNone(volume['attached_host']) self.assertIsNone(volume['mountpoint']) self.assertEqual(volume['attach_status'], 'detached') admin_metadata = volume['volume_admin_metadata'] self.assertEqual(len(admin_metadata), 1) self.assertEqual(admin_metadata[0]['key'], 'readonly') self.assertEqual(admin_metadata[0]['value'], 'False') # cleanup svc.stop()
def test_init_host(self, mock_delete_volume, mock_delete_snapshot): """Test stuck volumes and backups. Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) vol3_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) vol4_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = 'available' temp_snap.save() backup1 = self._create_backup_db_entry(status='creating', volume_id=vol1_id) backup2 = self._create_backup_db_entry(status='restoring', volume_id=vol2_id) backup3 = self._create_backup_db_entry(status='deleting', volume_id=vol3_id) self._create_backup_db_entry(status='creating', volume_id=vol4_id, temp_volume_id=temp_vol_id) self._create_backup_db_entry(status='creating', volume_id=vol5_id, temp_snapshot_id=temp_snap.id) self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual(vol1['status'], 'available') vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual(vol2['status'], 'error_restoring') backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(backup1['status'], 'error') backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(backup2['status'], 'available') self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id) self.assertTrue(mock_delete_volume.called) self.assertTrue(mock_delete_snapshot.called)
def test_transfer_volume_create_delete(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at) response = tx_api.create(self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') tx_api.delete(self.ctxt, response['id']) volume = db.volume_get(self.ctxt, '1') self.assertEqual('available', volume['status'], 'Unexpected state')
def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(BackupCephTestCase, self).setUp() self.ctxt = context.get_admin_context() # Create volume. self.volume_size = 1 self.volume_id = str(uuid.uuid4()) self._create_volume_db_entry(self.volume_id, self.volume_size) self.volume = db.volume_get(self.ctxt, self.volume_id) # Create backup of volume. self.backup_id = str(uuid.uuid4()) self._create_backup_db_entry(self.backup_id, self.volume_id, self.volume_size) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) # Create alternate volume. self.alt_volume_id = str(uuid.uuid4()) self._create_volume_db_entry(self.alt_volume_id, self.volume_size) self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id) self.chunk_size = 1024 self.num_chunks = 128 self.data_length = self.num_chunks * self.chunk_size self.checksum = hashlib.sha256() # Create a file with some data in it. self.volume_file = tempfile.NamedTemporaryFile() self.addCleanup(self.volume_file.close) for _i in range(0, self.num_chunks): data = os.urandom(self.chunk_size) self.checksum.update(data) self.volume_file.write(data) self.volume_file.seek(0) # Always trigger an exception if a command is executed since it should # always be dealt with gracefully. At time of writing on rbd # export/import-diff is executed and if they fail we expect to find # alternative means of backing up. mock_exec = mock.Mock() mock_exec.side_effect = processutils.ProcessExecutionError self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec) # Ensure that time.time() always returns more than the last time it was # called to avoid div by zero errors. self.counter = float(0) self.callstack = []
def test_force_detach_host_attached_volume(self): # admin context ctx = context.RequestContext("admin", "fake", True) # current status is available volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": "", "size": 1}) connector = {"initiator": "iqn.2012-07.org.fake:01"} # start service to handle rpc messages for attach requests svc = self.start_service("volume", host="test") self.volume_api.reserve_volume(ctx, volume) mountpoint = "/dev/vbd" host_name = "fake-host" self.volume_api.attach(ctx, volume, None, host_name, mountpoint, "ro") # volume is attached volume = db.volume_get(ctx, volume["id"]) self.assertEqual(volume["status"], "in-use") self.assertIsNone(volume["instance_uuid"]) self.assertEqual(volume["attached_host"], host_name) self.assertEqual(volume["mountpoint"], mountpoint) self.assertEqual(volume["attach_status"], "attached") admin_metadata = volume["volume_admin_metadata"] self.assertEqual(len(admin_metadata), 2) self.assertEqual(admin_metadata[0]["key"], "readonly") self.assertEqual(admin_metadata[0]["value"], "False") self.assertEqual(admin_metadata[1]["key"], "attached_mode") self.assertEqual(admin_metadata[1]["value"], "ro") conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual(conn_info["data"]["access_mode"], "ro") # build request to force detach req = webob.Request.blank("/v2/fake/volumes/%s/action" % volume["id"]) req.method = "POST" req.headers["content-type"] = "application/json" # request status of 'error' req.body = jsonutils.dumps({"os-force_detach": None}) # attach admin context to request req.environ["cinder.context"] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume["id"]) # status changed to 'available' self.assertEqual(volume["status"], "available") self.assertIsNone(volume["instance_uuid"]) self.assertIsNone(volume["attached_host"]) self.assertIsNone(volume["mountpoint"]) self.assertEqual(volume["attach_status"], "detached") admin_metadata = volume["volume_admin_metadata"] self.assertEqual(len(admin_metadata), 1) self.assertEqual(admin_metadata[0]["key"], "readonly") self.assertEqual(admin_metadata[0]["value"], "False") # cleanup svc.stop()
def test_force_detach_instance_attached_volume(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' body = {'os-force_detach': {'attachment_id': attachment['id'], 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume['status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key'], 'readonly') self.assertEqual('False', admin_metadata[0]['value'])
def test_cleanup_one_restoring_backup_volume(self): """Test cleanup_one_volume for volume status 'restoring-backup'.""" mock_get_manager = self.mock_object(self.backup_mgr, "_get_manager") mock_get_manager.return_value = "fake_manager" volume_id = self._create_volume_db_entry(status="restoring-backup") volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual("error_restoring", volume["status"])
def test_cleanup_one_backing_up_volume(self): """Test cleanup_one_volume for volume status 'backing-up'.""" mock_get_manager = self.mock_object(self.backup_mgr, "_get_manager") mock_get_manager.return_value = "fake_manager" volume_id = self._create_volume_db_entry(status="backing-up", previous_status="available") volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual("available", volume["status"])
def test_create_volume_from_snapshot(self): """Test volume can be created from a snapshot.""" volume_src = self._create_volume() self.volume.create_volume(self.context, volume_src["id"]) snapshot_id = self._create_snapshot(volume_src["id"])["id"] self.volume.create_snapshot(self.context, volume_src["id"], snapshot_id) volume_dst = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst["id"], snapshot_id) self.assertEqual(volume_dst["id"], db.volume_get(context.get_admin_context(), volume_dst["id"]).id) self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), volume_dst["id"]).snapshot_id) self.volume.delete_volume(self.context, volume_dst["id"]) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src["id"])
def test_restore_encrypted_vol_to_none_type_source_type_available(self): fields = ["encryption_key_id"] container = {} db.volume_type_create(self.ctxt, {"id": "enc_vol_type_id", "name": "enc_vol_type"}) enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), "enc_vol_type_id", True) undef_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), None, False) self.bak_meta_api._save_vol_base_meta(container, enc_vol_id) self.bak_meta_api._restore_vol_base_meta( container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], undef_vol_id, fields ) self.assertEqual( db.volume_get(self.ctxt, undef_vol_id)["volume_type_id"], db.volume_get(self.ctxt, enc_vol_id)["volume_type_id"], )
def test_cleanup_one_restoring_backup_volume(self): """Test cleanup_one_volume for volume status 'restoring-backup'.""" mock_get_manager = self.mock_object( self.backup_mgr, '_get_manager') mock_get_manager.return_value = 'fake_manager' volume_id = self._create_volume_db_entry(status='restoring-backup') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('error_restoring', volume['status'])
def test_force_detach_instance_attached_volume(self): # admin context ctx = context.RequestContext("admin", "fake", True) # current status is available volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": "", "size": 1}) connector = {"initiator": "iqn.2012-07.org.fake:01"} # start service to handle rpc messages for attach requests svc = self.start_service("volume", host="test") self.volume_api.reserve_volume(ctx, volume) mountpoint = "/dev/vbd" attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, "rw") # volume is attached volume = db.volume_get(ctx, volume["id"]) self.assertEqual("in-use", volume["status"]) self.assertEqual(stubs.FAKE_UUID, attachment["instance_uuid"]) self.assertEqual(mountpoint, attachment["mountpoint"]) self.assertEqual("attached", attachment["attach_status"]) admin_metadata = volume["volume_admin_metadata"] self.assertEqual(2, len(admin_metadata)) self.assertEqual("readonly", admin_metadata[0]["key"]) self.assertEqual("False", admin_metadata[0]["value"]) self.assertEqual("attached_mode", admin_metadata[1]["key"]) self.assertEqual("rw", admin_metadata[1]["value"]) conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual("rw", conn_info["data"]["access_mode"]) # build request to force detach req = webob.Request.blank("/v2/fake/volumes/%s/action" % volume["id"]) req.method = "POST" req.headers["content-type"] = "application/json" # request status of 'error' body = {"os-force_detach": {"attachment_id": attachment["id"], "connector": connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ["cinder.context"] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(202, resp.status_int) volume = db.volume_get(ctx, volume["id"]) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, ctx, attachment["id"]) # status changed to 'available' self.assertEqual("available", volume["status"]) admin_metadata = volume["volume_admin_metadata"] self.assertEqual(1, len(admin_metadata)) self.assertEqual("readonly", admin_metadata[0]["key"], "readonly") self.assertEqual("False", admin_metadata[0]["value"]) # cleanup svc.stop()
def test_cleanup_one_backing_up_volume(self): """Test cleanup_one_volume for volume status 'backing-up'.""" mock_get_manager = self.mock_object( self.backup_mgr, '_get_manager') mock_get_manager.return_value = 'fake_manager' volume_id = self._create_volume_db_entry(status='backing-up', previous_status='available') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('available', volume['status'])
def test_update_encryption_key_id(self, mock_barbican_client): vol = self.create_volume() snap_ids = [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID] for snap_id in snap_ids: tests_utils.create_snapshot(self.context, vol.id, id=snap_id) # Barbican's secret.store() returns a URI that contains the # secret's key ID at the end. secret_ref = 'http://some/path/' + fake.ENCRYPTION_KEY_ID mock_secret = mock.MagicMock() mock_secret.store.return_value = secret_ref mock_barbican_client.return_value.secrets.create.return_value \ = mock_secret migration.migrate_fixed_key(self.my_vols, conf=self.conf) vol_db = db.volume_get(self.context, vol.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id']) for snap_id in snap_ids: snap_db = db.snapshot_get(self.context, snap_id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db['encryption_key_id'])
def test_restore_backup_with_bad_service(self): """Test error handling when attempting a restore of a backup with a different service to that used to create the backup """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup_id = self._create_backup_db_entry(status='restoring', volume_id=vol_id) def fake_restore_backup(context, backup, volume, backup_service): pass self.stubs.Set(self.backup_mgr.driver, 'restore_backup', fake_restore_backup) service = 'cinder.tests.backup.bad_service' db.backup_update(self.ctxt, backup_id, {'service': service}) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup_id, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual(vol['status'], 'error') backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(backup['status'], 'available')
def migrate_volume_to_host(self, context, topic, volume_id, host, force_host_copy, request_spec, filter_properties=None): """Ensure that the host exists and can accept the volume.""" self._wait_for_scheduler() def _migrate_volume_set_error(self, context, ex, request_spec): volume_state = {'volume_state': {'migration_status': None}} self._set_volume_state_and_notify('migrate_volume_to_host', volume_state, context, ex, request_spec) try: tgt_host = self.driver.host_passes_filters(context, host, request_spec, filter_properties) except exception.NoValidHost as ex: _migrate_volume_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_volume_set_error(self, context, ex, request_spec) else: volume_ref = db.volume_get(context, volume_id) volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref, tgt_host, force_host_copy)
def manage_existing(self, context, topic, volume_id, request_spec, filter_properties=None): """Ensure that the host exists and can accept the volume.""" self._wait_for_scheduler() def _manage_existing_set_error(self, context, ex, request_spec): volume_state = {'volume_state': {'status': 'error'}} self._set_volume_state_and_notify('manage_existing', volume_state, context, ex, request_spec) volume_ref = db.volume_get(context, volume_id) try: self.driver.host_passes_filters(context, volume_ref['host'], request_spec, filter_properties) except exception.NoValidHost as ex: _manage_existing_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _manage_existing_set_error(self, context, ex, request_spec) else: volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref, request_spec.get('ref'))
def test_force_delete_volume(self): """Test volume can be forced to delete.""" # create a volume and assign to host volume = self._create_volume() self.volume.create_volume(self.context, volume['id']) volume['status'] = 'error_deleting' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() # 'error_deleting' volumes can't be deleted self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) # delete with force volume_api.delete(self.context, volume, force=True) # status is deleting volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEquals(volume['status'], 'deleting') # clean up self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_exception(self): """Verify that create volume from image, the volume status is 'downloading'.""" dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'host': 'dummy'}) self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume_id, None, None, None, None, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], "error") # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def check_already_managed_volume(db, vol_name): """Check cinder db for already managed volume. :param db: database api parameter :param vol_name: volume name parameter :returns: bool -- return True, if db entry with specified volume name exist, otherwise return False """ vol_id = _extract_id(vol_name) try: if vol_id and uuid.UUID(vol_id, version=4): db.volume_get(context.get_admin_context(), vol_id) return True except (exception.VolumeNotFound, ValueError): return False return False
def test_restore_backup_with_bad_backup_status(self): """Test error handling when restoring a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup_id = self._create_backup_db_entry(status='available', volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup_id, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual(vol['status'], 'error') backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(backup['status'], 'error')
def test_restore_backup_with_driver_error(self, _mock_volume_restore): """Test error handling when an error occurs during backup restore.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup_id = self._create_backup_db_entry(status='restoring', volume_id=vol_id) _mock_volume_restore.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.restore_backup, self.ctxt, backup_id, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual(vol['status'], 'error_restoring') backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(backup['status'], 'available') self.assertTrue(_mock_volume_restore.called)
def create_snapshot(ctxt, volume_id, display_name='test_snapshot', display_description='this is a test snapshot', status='creating'): vol = db.volume_get(ctxt, volume_id) snap = {} snap['volume_id'] = volume_id snap['user_id'] = ctxt.user_id snap['project_id'] = ctxt.project_id snap['status'] = status snap['volume_size'] = vol['size'] snap['display_name'] = display_name snap['display_description'] = display_description return db.snapshot_create(ctxt, snap)
def _migrate_volume_exec(self, ctx, volume, host, expected_status, force_host_copy=False): # build request to migrate to host req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': force_host_copy}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) return volume
def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container=google_dr.CONF.backup_gcs_bucket, parent_id=None, status=None, service_metadata=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) kwargs = {'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'status': status, 'service_metadata': service_metadata, } backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup
def test_transfer_accept(self): svc = self.start_service('volume', host='test_host') tx_api = transfer_api.API() utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, '2', transfer['auth_key']) self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer['id'], 'wrong') db.volume_update(self.ctxt, '1', {'status': 'wrong'}) self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'}) self.ctxt.user_id = 'new_user_id' self.ctxt.project_id = 'new_project_id' response = tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) volume = db.volume_get(self.ctxt, '1') self.assertEqual(volume['project_id'], 'new_project_id', 'Unexpected project id') self.assertEqual(volume['user_id'], 'new_user_id', 'Unexpected user id') self.assertEqual(volume['id'], response['volume_id'], 'Unexpected volume id in response.') self.assertEqual(transfer['id'], response['id'], 'Unexpected transfer id in response.') svc.stop()
def test_reset_attached_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'attached'}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'available', 'attach_status': 'detached'}) self.assertEqual(202, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('detached', volume['attach_status']) self.assertEqual('available', volume['status'])
def test_reset_attached_status(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'attached'}) resp = self._issue_volume_reset(ctx, volume, {'status': 'available', 'attach_status': 'detached'}) self.assertEqual(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) self.assertEqual(volume['attach_status'], 'detached') self.assertEqual(volume['status'], 'available')
def test_create_backup_with_error(self, _mock_volume_backup): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) _mock_volume_backup.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual(vol['status'], 'available') backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(backup['status'], 'error') self.assertTrue(_mock_volume_backup.called)
def _migrate_volume_set_error(self, context, ex, request_spec): volume = db.volume_get(context, request_spec['volume_id']) if volume.status == 'maintenance': previous_status = (volume.previous_status or 'maintenance') volume_state = { 'volume_state': { 'migration_status': 'error', 'status': previous_status } } else: volume_state = {'volume_state': {'migration_status': 'error'}} self._set_volume_state_and_notify('migrate_volume_to_host', volume_state, context, ex, request_spec)
def test_create_volume_from_snapshot(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.volume.create_volume(self.context, volume_src['id']) snapshot = self._create_snapshot(volume_src['id']) snapshot_id = snapshot['id'] self.volume.create_snapshot(self.context, volume_src['id'], snapshot_id) self.assertTrue( os.path.exists(os.path.join(self.volumes_path, snapshot['name']))) volume_dst = test_utils.create_volume(self.context, host=CONF.host, snapshot_id=snapshot_id) self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) self.assertEqual( volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertEqual( snapshot_id, db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src['id'])
def test_volume_force_detach_raises_db_error(self): # In case of DB error 500 error code is returned to user # current status is available volume = self._create_volume(self.ctx, { 'provider_location': '', 'size': 1 }) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection( self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach volume_remote_error = messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' body = { 'os-force_detach': { 'attachment_id': 'fake', 'connector': connector } } req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request self.assertRaises(messaging.RemoteError, req.get_response, app())
def test_copy_image_to_encrypted_volume_failed_fetch( self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = { 'device': { 'path': local_path }, 'conn': { 'driver_volume_type': 'iscsi', 'data': {}, } } mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] raised_exception = exception.ImageUnacceptable(reason='fake', image_id=fake.IMAGE_ID) mock_fetch_to_raw.side_effect = raised_exception encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} self.assertRaises(exception.ImageUnacceptable, self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) mock_attach_volume.assert_called_once_with(self.context, volume, properties) mock_attach_encryptor.assert_called_once_with(self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with(self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2) mock_detach_encryptor.assert_called_once_with(attach_info, encryption) mock_detach_volume.assert_called_once_with(self.context, attach_info, volume, properties)
def test_create_cloned_volume(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume(volume_src) volume_dst = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume_dst['name']) self.assertFalse(os.path.exists(volumepath)) self.driver.create_cloned_volume(volume_dst, volume_src) self.assertEqual( volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertTrue(os.path.exists(volumepath)) self.driver.delete_volume(volume_src) self.driver.delete_volume(volume_dst)
def test_copy_volume_to_image_driver_not_initialized(self): # creating volume testdata db.volume_create(self.context, self.volume_attrs) # set initialized to False self.volume.driver._initialized = False # start test self.assertRaises(exception.DriverNotInitialized, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume.status)
def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check the status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ volume_id = 1 # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' # creating volume testdata db.volume_create( self.context, { 'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy' }) try: if not clone_error: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id)
def test_volume_force_detach_raises_remote_error(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(ctx, volume['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) conn_info = self.volume_api.initialize_connection(ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach volume_remote_error = \ messaging.RemoteError(exc_type='VolumeAttachmentNotFound') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps({'os-force_detach': {'attachment_id': 'fake'}}) # attach admin context to request req.environ['cinder.context'] = ctx # make request resp = req.get_response(app()) self.assertEqual(400, resp.status_int) # cleanup svc.stop()
def _create_volume_from_image(self, expected_status, fakeout_copy_image_to_volume=False): """Call copy image to volume, Test the status of volume after calling copying image to volume.""" def fake_local_path(volume): return dst_path def fake_copy_image_to_volume(context, volume, image_service, image_id): pass def fake_fetch_to_raw(context, image_service, image_id, vol_path): pass dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw) if fakeout_copy_image_to_volume: self.stubs.Set(self.volume, '_copy_image_to_volume', fake_copy_image_to_volume) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = 1 # creating volume testdata db.volume_create( self.context, { 'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy' }) try: self.volume.create_volume(self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_migrate_volume_generic_migrate_volume_completion_error(self): def fake_migrate_volume_completion(ctxt, volume, new_volume, error=False): db.volume_update(ctxt, volume['id'], {'migration_status': 'completing'}) raise processutils.ProcessExecutionError with mock.patch.object(self.volume.driver, 'migrate_volume'),\ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ as mock_create_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ mock.patch.object(self.volume, 'migrate_volume_completion')\ as mock_migrate_compl,\ mock.patch.object(self.volume.driver, 'create_export'), \ mock.patch.object(self.volume, '_attach_volume') \ as mock_attach, \ mock.patch.object(self.volume, '_detach_volume'), \ mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') \ as mock_get_connector_properties, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') \ as mock_get_capabilities: # Exception case at delete_volume # source_volume['migration_status'] is 'completing' mock_create_volume.side_effect = self._fake_create_volume mock_migrate_compl.side_effect = fake_migrate_volume_completion mock_get_connector_properties.return_value = {} mock_attach.side_effect = [{'device': {'path': 'bar'}}, {'device': {'path': 'foo'}}] mock_get_capabilities.return_value = {'sparse_copy_volume': True} volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) mock_copy.assert_called_once_with('foo', 'bar', 0, '1M', sparse=True)
def test_copy_volume_to_image_status_use(self): self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' # creating volume testdata volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status'])
def test_create_backup_with_error(self): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) backup_id = self._create_backup_db_entry(volume_id=vol_id) def fake_backup_volume(context, backup, backup_service): raise FakeBackupException('fake') self.stubs.Set(self.backup_mgr.driver, 'backup_volume', fake_backup_volume) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual(vol['status'], 'available') backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(backup['status'], 'error')
def _clone_volume_from_image(self, expected_status, clone_works=True): """Try to clone a volume from an image, and check the status afterwards. """ def fake_clone_image(volume, image_location, image_id): return {'provider_location': None}, True def fake_clone_error(volume, image_location, image_id): raise exception.CinderException() self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) if clone_works: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) else: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = 1 # creating volume testdata db.volume_create( self.context, { 'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy' }) try: if clone_works: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id)
def _test_copy_volume_to_image_with_image_volume( self, mock_copy, mock_create, mock_quota_commit, mock_quota_reserve): self.flags(glance_api_version=2) self.volume.driver.configuration.image_upload_use_cinder_backend = True self.addCleanup(fake_image.FakeImageService_reset) image_service = fake_image.FakeImageService() def add_location_wrapper(ctx, id, uri, metadata): try: volume = db.volume_get(ctx, id) self.assertEqual(ctx.project_id, volume['metadata']['image_owner']) except exception.VolumeNotFound: pass return image_service.add_location_orig(ctx, id, uri, metadata) image_service.add_location_orig = image_service.add_location image_service.add_location = add_location_wrapper image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' self.image_meta['id'] = image_id self.image_meta['status'] = 'queued' image_service.create(self.context, self.image_meta) # creating volume testdata self.volume_attrs['instance_uuid'] = None db.volume_create(self.context, self.volume_attrs) def fake_create(context, volume, **kwargs): db.volume_update(context, volume.id, {'status': 'available'}) mock_create.side_effect = fake_create # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # return create image image = image_service.show(self.context, image_id) image_service.delete(self.context, image_id) return image
def test_finish_volume_migration(self): ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) src_volume = testutils.create_volume(ctxt, host='src', status='migrating') dest_volume = testutils.create_volume(ctxt, host='dest', status='migration_target') db.finish_volume_migration(ctxt, src_volume['id'], dest_volume['id']) self.assertRaises(exception.VolumeNotFound, db.volume_get, ctxt, dest_volume['id']) src_volume = db.volume_get(ctxt, src_volume['id']) self.assertEqual(src_volume['host'], 'dest') self.assertEqual(src_volume['status'], 'migrating')
def test_update_volume_encryption_key_id(self, mock_barbican_client, mock_get_barbican_key_id): vol = self.create_volume() snap_ids = [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID] for snap_id in snap_ids: tests_utils.create_snapshot(self.context, vol.id, id=snap_id) mock_get_barbican_key_id.return_value = fake.ENCRYPTION_KEY_ID migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) vol_db = db.volume_get(self.context, vol.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id']) for snap_id in snap_ids: snap_db = db.snapshot_get(self.context, snap_id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db['encryption_key_id'])
def test_copy_volume_to_image_status_available(self): # creating volume testdata self.volume_attrs['instance_uuid'] = None volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status'])
def test_preattach_status_volume(self): """Ensure volume goes into pre-attaching state""" instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = db.volume_create(self.context, { 'size': 1, 'status': 'available' }) volume_id = volume['id'] volume_api = cinder.volume.api.API() volume_api.attach(self.context, volume, instance_uuid, mountpoint) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") self.assertEqual(vol['attach_status'], None) self.assertEqual(vol['instance_uuid'], None)
def test_copy_image_to_encrypted_volume_failed_attach_encryptor( self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() attach_info = { 'device': { 'path': 'dev/sda' }, 'conn': { 'driver_volume_type': 'iscsi', 'data': {}, } } mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] raised_exception = os_brick.exception.VolumeEncryptionNotSupported( volume_id="123", volume_type="abc") mock_attach_encryptor.side_effect = raised_exception self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported, self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with(self.context, volume, properties) mock_attach_encryptor.assert_called_once_with(self.context, attach_info, encryption) self.assertFalse(mock_fetch_to_raw.called) self.assertFalse(mock_detach_encryptor.called) mock_detach_volume.assert_called_once_with(self.context, attach_info, volume, properties, force=True)
def test_malformed_reset_status_body(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'size': 1}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # malformed request body req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # bad request self.assertEqual(resp.status_int, 400) volume = db.volume_get(ctx, volume['id']) # status is still 'available' self.assertEqual(volume['status'], 'available')
def test_create_backup(self): """Test normal backup creation""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup_id = self._create_backup_db_entry(volume_id=vol_id) def fake_backup_volume(context, backup, backup_service): pass self.stubs.Set(self.backup_mgr.driver, 'backup_volume', fake_backup_volume) self.backup_mgr.create_backup(self.ctxt, backup_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual(vol['status'], 'available') backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(backup['status'], 'available') self.assertEqual(backup['size'], vol_size)