def test_attachment_update_duplicate(self, mock_va_update, mock_db_upd): volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, deleted=0, **volume_params) tests_utils.attach_volume(self.context, vref.id, fake.UUID1, 'somehost', 'somemountpoint') # Update volume with another attachment tests_utils.attach_volume(self.context, vref.id, fake.UUID2, 'somehost2', 'somemountpoint2') vref.refresh() # This attachment will collide with the first connector = {'host': 'somehost'} vref.volume_attachment[0]['connector'] = {'host': 'somehost'} vref.volume_attachment[0]['connection_info'] = {'c': 'd'} with mock.patch('cinder.objects.Volume.get_by_id', return_value=vref): with mock.patch.object(self.volume_api.volume_rpcapi, 'attachment_update') as m_au: self.assertRaises(exception.InvalidVolume, self.volume_api.attachment_update, self.context, vref.volume_attachment[1], connector) m_au.assert_not_called() mock_va_update.assert_not_called() mock_db_upd.assert_not_called()
def test_retype_volume_exception_returns_volume_state( self, _mock_vol_attachment_get, _mock_vol_update): # Test NoValidHost exception behavior for retype. # Puts the volume in original state and eats the exception. volume = tests_utils.create_volume(self.context, status='retyping', previous_status='in-use') instance_uuid = '12345678-1234-5678-1234-567812345678' volume_attach = tests_utils.attach_volume(self.context, volume.id, instance_uuid, None, '/dev/fake') _mock_vol_attachment_get.return_value = [volume_attach] topic = 'fake_topic' request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3}, 'migration_policy': 'on-demand'} _mock_vol_update.return_value = {'status': 'in-use'} _mock_find_retype_host = mock.Mock( side_effect=exception.NoValidHost(reason="")) orig_retype = self.manager.driver.find_retype_host self.manager.driver.find_retype_host = _mock_find_retype_host self.manager.retype(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) _mock_find_retype_host.assert_called_once_with(self.context, request_spec, {}, 'on-demand') _mock_vol_update.assert_called_once_with(self.context, volume.id, {'status': 'in-use'}) self.manager.driver.find_retype_host = orig_retype
def test_migrate_volume_generic_attached_volume(self, volume_get, migrate_volume_completion, nova_api): attached_host = 'some-host' fake_volume_id = fake.VOLUME_ID fake_db_new_volume = {'status': 'available', 'id': fake_volume_id} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} fake_uuid = fakes.get_fake_uuid() update_server_volume = nova_api.return_value.update_server_volume volume_get.return_value = fake_new_volume volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) volume_attach = tests_utils.attach_volume(self.context, volume['id'], fake_uuid, attached_host, '/dev/vda') self.assertIsNotNone(volume_attach['volume_attachment'][0]['id']) self.assertEqual( fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid']) self.assertEqual('in-use', volume_attach['status']) self.volume._migrate_volume_generic(self.context, volume, host_obj, None) self.assertFalse(migrate_volume_completion.called) update_server_volume.assert_called_with(self.context, fake_uuid, volume['id'], fake_volume_id)
def test_retype_volume_exception_returns_volume_state( self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update ): # Test NoValidHost exception behavior for retype. # Puts the volume in original state and eats the exception. volume = tests_utils.create_volume(self.context, status="retyping", previous_status="in-use") instance_uuid = "12345678-1234-5678-1234-567812345678" volume_attach = tests_utils.attach_volume(self.context, volume.id, instance_uuid, None, "/dev/fake") _mock_vol_attachment_get.return_value = [volume_attach] reservations = mock.sentinel.reservations request_spec = { "volume_id": volume.id, "volume_type": {"id": 3}, "migration_policy": "on-demand", "quota_reservations": reservations, } _mock_vol_update.return_value = {"status": "in-use"} _mock_find_retype_host = mock.Mock(side_effect=exception.NoValidHost(reason="")) orig_retype = self.manager.driver.find_retype_host self.manager.driver.find_retype_host = _mock_find_retype_host self.manager.retype(self.context, volume, request_spec=request_spec, filter_properties={}) _mock_find_retype_host.assert_called_once_with(self.context, request_spec, {}, "on-demand") quota_rollback.assert_called_once_with(self.context, reservations) _mock_vol_update.assert_called_once_with(self.context, volume.id, {"status": "in-use"}) self.manager.driver.find_retype_host = orig_retype
def _test_migrate_volume_completion(self, status='available', instance_uuid=None, attached_host=None, retyping=False, previous_status='available'): initial_status = retyping and 'retyping' or status old_volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, status=initial_status, migration_status='migrating', previous_status=previous_status) attachment_id = None if status == 'in-use': vol = tests_utils.attach_volume(self.context, old_volume.id, instance_uuid, attached_host, '/dev/vda') self.assertEqual('in-use', vol['status']) attachment_id = vol['volume_attachment'][0]['id'] target_status = 'target:%s' % old_volume.id new_host = CONF.host + 'new' new_volume = tests_utils.create_volume(self.context, size=0, host=new_host, migration_status=target_status) with mock.patch.object(self.volume, 'detach_volume') as \ mock_detach_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as mock_delete_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') as mock_attach_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume'),\ mock.patch.object(self.volume.driver, 'attach_volume'): mock_attach_volume.side_effect = self.fake_attach_volume old_volume_host = old_volume.host new_volume_host = new_volume.host self.volume.migrate_volume_completion(self.context, old_volume, new_volume) after_new_volume = objects.Volume.get_by_id(self.context, new_volume.id) after_old_volume = objects.Volume.get_by_id(self.context, old_volume.id) if status == 'in-use': mock_detach_volume.assert_called_with(self.context, old_volume.id, attachment_id) attachments = db.volume_attachment_get_all_by_instance_uuid( self.context, instance_uuid) self.assertIsNotNone(attachments) self.assertEqual(attached_host, attachments[0]['attached_host']) self.assertEqual(instance_uuid, attachments[0]['instance_uuid']) else: self.assertFalse(mock_detach_volume.called) self.assertTrue(mock_delete_volume.called) # NOTE(sborkows): the migrate_volume_completion method alters # old and new volume objects, so we need to check the equality # between the former host value and the actual one. self.assertEqual(old_volume_host, after_new_volume.host) self.assertEqual(new_volume_host, after_old_volume.host)
def test_init_host_clears_uploads_in_use_volume(self, init_host_mock): """init_host will clean an in-use volume stuck in uploading.""" volume = tests_utils.create_volume(self.context, status='uploading', size=0, host=CONF.host) db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) fake_uuid = fakes.get_fake_uuid() tests_utils.attach_volume(self.context, volume.id, fake_uuid, 'fake_host', '/dev/vda') self.volume.init_host(service_id=mock.sentinel.service_id) init_host_mock.assert_called_once_with( service_id=mock.sentinel.service_id, added_to_cluster=None) volume.refresh() self.assertEqual("in-use", volume.status) self._assert_workers_are_removed()
def _create_attachment(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, admin_metadata={ 'attached_mode': 'ro' }, testcase_instance=self) volume = test_utils.attach_volume(self.project_member_context, volume.id, fake.INSTANCE_ID, 'fake_host', 'fake_mountpoint') return volume.volume_attachment[0].id
def test_api_migrate_volume_completion_from_swap_with_no_migration( self, swap_error): # This test validates that Cinder properly finishes the swap volume # status updates for the case that no migration has occurred instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2' attached_host = 'attached-host' orig_attached_vol = tests_utils.create_volume(self.context, size=0) orig_attached_vol = tests_utils.attach_volume(self.context, orig_attached_vol['id'], instance_uuid, attached_host, '/dev/vda') new_volume = tests_utils.create_volume(self.context, size=0) @mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') def _run_migration_completion(rpc_attach_volume, rpc_detach_volume): attachment = orig_attached_vol['volume_attachment'][0] attachment_id = attachment['id'] rpc_attach_volume.side_effect = self.fake_attach_volume vol_id = volume_api.API().migrate_volume_completion( self.context, orig_attached_vol, new_volume, swap_error) if swap_error: # When swap failed, we don't want to finish attachment self.assertFalse(rpc_detach_volume.called) self.assertFalse(rpc_attach_volume.called) else: # When no error, we should be finishing the attachment rpc_detach_volume.assert_called_with(self.context, orig_attached_vol, attachment_id) rpc_attach_volume.assert_called_with( self.context, new_volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], 'rw') self.assertEqual(new_volume['id'], vol_id) _run_migration_completion()
def test_api_migrate_volume_completion_from_swap_with_no_migration( self, swap_error): # This test validates that Cinder properly finishes the swap volume # status updates for the case that no migration has occurred instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2' attached_host = 'attached-host' orig_attached_vol = tests_utils.create_volume(self.context, size=0) orig_attached_vol = tests_utils.attach_volume( self.context, orig_attached_vol['id'], instance_uuid, attached_host, '/dev/vda') new_volume = tests_utils.create_volume(self.context, size=0) @mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') def _run_migration_completion(rpc_attach_volume, rpc_detach_volume): attachment = orig_attached_vol['volume_attachment'][0] attachment_id = attachment['id'] rpc_attach_volume.side_effect = self.fake_attach_volume vol_id = volume_api.API().migrate_volume_completion( self.context, orig_attached_vol, new_volume, swap_error) if swap_error: # When swap failed, we don't want to finish attachment self.assertFalse(rpc_detach_volume.called) self.assertFalse(rpc_attach_volume.called) else: # When no error, we should be finishing the attachment rpc_detach_volume.assert_called_with(self.context, orig_attached_vol, attachment_id) rpc_attach_volume.assert_called_with( self.context, new_volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], 'rw') self.assertEqual(new_volume['id'], vol_id) _run_migration_completion()
def test_migrate_volume_generic_attached_volume(self, volume_get, migrate_volume_completion, nova_api): attached_host = 'some-host' fake_volume_id = fake.VOLUME_ID fake_db_new_volume = {'status': 'available', 'id': fake_volume_id} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} fake_uuid = fakes.get_fake_uuid() update_server_volume = nova_api.return_value.update_server_volume volume_get.return_value = fake_new_volume volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) volume_attach = tests_utils.attach_volume( self.context, volume['id'], fake_uuid, attached_host, '/dev/vda') self.assertIsNotNone(volume_attach['volume_attachment'][0]['id']) self.assertEqual( fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid']) self.assertEqual('in-use', volume_attach['status']) self.volume._migrate_volume_generic(self.context, volume, host_obj, None) self.assertFalse(migrate_volume_completion.called) update_server_volume.assert_called_with(self.context, fake_uuid, volume['id'], fake_volume_id)
def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint, mode): tests_utils.attach_volume(ctxt, volume.id, instance_uuid, host_name, '/dev/vda')
def _test_migrate_volume_completion(self, status='available', instance_uuid=None, attached_host=None, retyping=False, previous_status='available'): initial_status = retyping and 'retyping' or status old_volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, status=initial_status, migration_status='migrating', previous_status=previous_status) attachment = None if status == 'in-use': vol = tests_utils.attach_volume(self.context, old_volume.id, instance_uuid, attached_host, '/dev/vda') self.assertEqual('in-use', vol['status']) attachment = vol['volume_attachment'][0] target_status = 'target:%s' % old_volume.id new_host = CONF.host + 'new' new_volume = tests_utils.create_volume(self.context, size=0, host=new_host, migration_status=target_status) with mock.patch.object(self.volume, 'detach_volume') as \ mock_detach_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as mock_delete_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') as mock_attach_volume,\ mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume'),\ mock.patch.object(self.volume.driver, 'attach_volume'): mock_attach_volume.side_effect = self.fake_attach_volume old_volume_host = old_volume.host new_volume_host = new_volume.host self.volume.migrate_volume_completion(self.context, old_volume, new_volume) after_new_volume = objects.Volume.get_by_id(self.context, new_volume.id) after_old_volume = objects.Volume.get_by_id(self.context, old_volume.id) if status == 'in-use': mock_detach_volume.assert_called_with(self.context, old_volume.id, attachment['id']) attachments = db.volume_attachment_get_all_by_instance_uuid( self.context, instance_uuid) mock_attach_volume.assert_called_once_with( self.context, old_volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], 'rw' ) self.assertIsNotNone(attachments) self.assertEqual(attached_host, attachments[0]['attached_host']) self.assertEqual(instance_uuid, attachments[0]['instance_uuid']) else: self.assertFalse(mock_detach_volume.called) self.assertTrue(mock_delete_volume.called) # NOTE(sborkows): the migrate_volume_completion method alters # old and new volume objects, so we need to check the equality # between the former host value and the actual one. self.assertEqual(old_volume_host, after_new_volume.host) self.assertEqual(new_volume_host, after_old_volume.host)