def test_copy_volume_to_image_status_available(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, "local_path", fake_local_path) image_id = "70a599e0-31e7-49b7-b260-868f441e862b" # creating volume testdata volume_id = 1 db.volume_create( self.context, { "id": volume_id, "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "display_description": "Test Desc", "size": 20, "status": "uploading", "instance_uuid": None, "host": "dummy", }, ) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume["status"], "available") finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_create_volume_from_image_exception(self): """Verify that create volume from image, the volume status is 'downloading'.""" dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, "local_path", lambda x: dst_path) image_id = "aaaaaaaa-0000-0000-0000-000000000000" # creating volume testdata volume_id = 1 db.volume_create( self.context, { "id": volume_id, "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "display_description": "Test Desc", "size": 20, "status": "creating", "host": "dummy", }, ) self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume_id, None, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume["status"], "error") # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" volume1 = service.Service('host1', 'nova-volume', 'volume', FLAGS.volume_manager) volume1.start() volume2 = service.Service('host2', 'nova-volume', 'volume', FLAGS.volume_manager) volume2.start() volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() host = self.scheduler.driver.schedule_create_volume(self.context, volume_id2) self.assertEqual(host, 'host2') volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2) dic = {'service_id': s_ref['id'], 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, 'cpu_info': ''}
def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" volume1 = service.Service("host1", "nova-volume", "volume", FLAGS.volume_manager) volume1.start() volume2 = service.Service("host2", "nova-volume", "volume", FLAGS.volume_manager) volume2.start() volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() host = self.scheduler.driver.schedule_create_volume(self.context, volume_id2) self.assertEqual(host, "host2") volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2) dic = { "service_id": s_ref["id"], "vcpus": 16, "memory_mb": 32, "local_gb": 100, "vcpus_used": 16, "memory_mb_used": 12, "local_gb_used": 10, "hypervisor_type": "qemu", "hypervisor_version": 12003, "cpu_info": "", }
def test_copy_volume_to_image_status_use(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) #image_id = '70a599e0-31e7-49b7-b260-868f441e862b' image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02', 'host': 'dummy'}) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'in-use') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_copy_volume_to_image_status_available(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_id = '70a599e0-31e7-49b7-b260-868f441e862b' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'instance_uuid': None, 'host': 'dummy'}) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'available') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_copy_volume_to_image_exception(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'in-use', 'host': 'dummy'}) try: # start test self.assertRaises(exception.ImageNotFound, self.volume.copy_volume_to_image, self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'available') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_too_many_gigabytes(self): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, "", "", None) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_create_volume_from_image_exception(self): """Verify that create volume from image, the volume status is 'downloading'.""" dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'host': 'dummy'}) self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume_id, None, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], "error") # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_create_volume_from_exact_sized_image(self): """Verify that an image which is exactly the same size as the volume, will work correctly.""" class _FakeImageService: def __init__(self, db_driver=None, image_service=None): pass def show(self, context, image_id): return {'size': 2 * 1024 * 1024 * 1024} image_id = '70a599e0-31e7-49b7-b260-868f441e862b' try: volume_id = None volume_api = nova.volume.api.API(image_service=_FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', image_id=1) volume_id = volume['id'] self.assertEqual(volume['status'], 'creating') finally: # cleanup db.volume_destroy(self.context, volume_id)
def test_notify_usage_exists(self): """Ensure 'exists' notification generates appropriate usage data.""" volume_id = self._create_volume() volume = db.volume_get(self.context, volume_id) volume_utils.notify_usage_exists(self.context, volume) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg["priority"], "INFO") self.assertEquals(msg["event_type"], "volume.exists") payload = msg["payload"] self.assertEquals(payload["tenant_id"], self.project_id) self.assertEquals(payload["user_id"], self.user_id) self.assertEquals(payload["snapshot_id"], self.snapshot_id) self.assertEquals(payload["volume_id"], volume.id) self.assertEquals(payload["size"], self.volume_size) for attr in ( "display_name", "created_at", "launched_at", "status", "audit_period_beginning", "audit_period_ending", ): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) db.volume_destroy(context.get_admin_context(), volume["id"])
def test_live_migration_src_check_volume_node_not_alive(self): """Raise exception when volume node is not alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, { 'instance_id': instance_id, 'size': 1 }) t1 = utils.utcnow() - datetime.timedelta(1) dic = { 'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0 } s_ref = db.service_create(self.context, dic) self.assertRaises(exception.VolumeServiceUnavailable, self.scheduler.driver.schedule_live_migration, self.context, instance_id, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id'])
def test_too_many_gigabytes(self): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, volume.API().create, self.context, size=10, name="", description="") for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_update_of_volume_display_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), display_name='c00l v0lum3') vol = db.volume_get(self.context, vol['id']) self.assertEqual('c00l v0lum3', vol['display_name']) db.volume_destroy(self.context, vol['id'])
def test_too_many_gigabytes(self): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, '', '', None) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, volume.API().create, self.context, size=10, name="", description="") for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_update_of_volume_wont_update_private_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), mountpoint='/not/here') vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) db.volume_destroy(self.context, vol['id'])
def test_update_of_volume_wont_update_private_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, cloud.id_to_ec2_id(vol['id'], 'vol-%08x'), mountpoint='/not/here') vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) db.volume_destroy(self.context, vol['id'])
def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, "", "", None) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_update_of_volume_display_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, cloud.id_to_ec2_id(vol['id'], 'vol-%08x'), display_name='c00l v0lum3') vol = db.volume_get(self.context, vol['id']) self.assertEqual('c00l v0lum3', vol['display_name']) db.volume_destroy(self.context, vol['id'])
def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, self.context, 10, '', '', None) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_too_many_gigabytes(self): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, self.cloud.create_volume, self.context, size=10) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, self.cloud.create_volume, self.context, size=10) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_parse_volume_info_raise_exception(self): """This shows how to test helper classes' methods.""" stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession("test_url", "root", "test_pass") helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() # oops, wrong mount point! self.assertRaises(volume_utils.StorageError, helper.parse_volume_info, vol["id"], "/dev/sd") db.volume_destroy(context.get_admin_context(), vol["id"])
def test_parse_volume_info_raise_exception(self): """This shows how to test helper classes' methods.""" stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() # oops, wrong mount point! self.assertRaises(volume_utils.StorageError, helper.parse_volume_info, vol['id'], '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id'])
def test_too_many_gigabytes(self): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, volume.API().create, self.context, size=10, name='', description='') for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_too_many_volumes(self): """Ensure that NoMoreTargets is raised when we run out of volumes.""" vols = [] total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume = self._create_volume() self.volume.create_volume(self.context, volume["id"]) vols.append(volume["id"]) volume = self._create_volume() self.assertRaises(db.NoMoreTargets, self.volume.create_volume, self.context, volume["id"]) db.volume_destroy(context.get_admin_context(), volume["id"]) for volume_id in vols: self.volume.delete_volume(self.context, volume_id)
def test_too_many_gigabytes(self): volume_ids = [] volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, self.context, size=10, snapshot_id=None, name='', description='') for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_too_many_volumes(self): volume_ids = [] for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, volume.API().create, self.context, size=10, name='', description='') for volume_id in volume_ids: db.volume_destroy(self.context, volume_id)
def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" volume1 = self.start_service("volume", host="host1") volume2 = self.start_service("volume", host="host2") volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() host = self.scheduler.driver.schedule_create_volume(self.context, volume_id2) self.assertEqual(host, "host2") volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2) volume1.kill() volume2.kill()
def test_too_many_volumes(self): """Ensure that NoMoreTargets is raised when we run out of volumes.""" vols = [] total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume_id = self._create_volume() self.volume.create_volume(self.context, volume_id) vols.append(volume_id) volume_id = self._create_volume() self.assertRaises(db.NoMoreTargets, self.volume.create_volume, self.context, volume_id) db.volume_destroy(context.get_admin_context(), volume_id) for volume_id in vols: self.volume.delete_volume(self.context, volume_id)
def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" volume1 = self.start_service('volume', host='host1') volume2 = self.start_service('volume', host='host2') volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() host = self.scheduler.driver.schedule_create_volume( self.context, volume_id2) self.assertEqual(host, 'host2') volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2) volume1.kill() volume2.kill()
def test_live_migration_raises_exception(self): """Confirms recover method is called when exceptions are raised.""" # Skip if non-libvirt environment if not self.lazy_load_library_exists(): return # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) instance_dict = { 'host': 'fake', 'state': power_state.RUNNING, 'state_description': 'running' } instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) vol_dict = {'status': 'migrating', 'size': 1} volume_ref = db.volume_create(self.context, vol_dict) db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], '/dev/fake') # Preparing mocks vdmock = self.mox.CreateMock(libvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', mox.IgnoreArg(), None, FLAGS.live_migration_bandwidth).\ AndRaise(libvirt.libvirtError('ERR')) def fake_lookup(instance_name): if instance_name == instance_ref.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', self.compute.recover_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) self.assertTrue(instance_ref['state_description'] == 'running') self.assertTrue(instance_ref['state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id'])
def test_create_iscsi_storage(self): """This shows how to test helper classes' methods.""" stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession("test_url", "root", "test_pass") helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() info = helper.parse_volume_info(vol["id"], "/dev/sdc") label = "SR-%s" % vol["id"] description = "Test-SR" sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = xenapi_fake.get_all("SR") self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol["id"])
def test_create_iscsi_storage(self): """This shows how to test helper classes' methods.""" stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() info = helper.parse_volume_info(vol['id'], '/dev/sdc') label = 'SR-%s' % vol['id'] description = 'Test-SR' sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = xenapi_fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id'])
def test_post_live_migration_working_correctly(self): """Confirm post_live_migration() works as expected correctly.""" dest = 'desthost' flo_addr = '1.2.1.2' # Preparing datas c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) db.instance_update(c, i_ref['id'], { 'state_description': 'migrating', 'state': power_state.PAUSED }) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, { 'address': '1.1.1.1', 'instance_id': instance_id }) fix_ref = db.fixed_ip_get_by_address(c, fix_addr) flo_ref = db.floating_ip_create(c, { 'address': flo_addr, 'fixed_ip_id': fix_ref['id'] }) # reload is necessary before setting mocks i_ref = db.instance_get(c, instance_id) # Preparing mocks self.mox.StubOutWithMock(self.compute.volume_manager, 'remove_compute_volume') for v in i_ref['volumes']: self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) # executing self.mox.ReplayAll() ret = self.compute.post_live_migration(c, i_ref, dest) # make sure every data is rewritten to dest i_ref = db.instance_get(c, i_ref['id']) c1 = (i_ref['host'] == dest) flo_refs = db.floating_ip_get_all_by_host(c, dest) c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) # post operaton self.assertTrue(c1 and c2) db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr)
def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) vol2 = db.volume_create(self.context, {}) result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x') result = self.cloud.describe_volumes(self.context, volume_id=[volume_id]) self.assertEqual(len(result['volumeSet']), 1) self.assertEqual( ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']), vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id'])
def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) vol2 = db.volume_create(self.context, {}) result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x') result = self.cloud.describe_volumes(self.context, volume_id=[volume_id]) self.assertEqual(len(result['volumeSet']), 1) self.assertEqual( cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']), vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id'])
def test_live_migration_raises_exception(self): """Confirms recover method is called when exceptions are raised.""" # Skip if non-libvirt environment if not self.lazy_load_library_exists(): return # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) instance_dict = {'host': 'fake', 'state': power_state.RUNNING, 'state_description': 'running'} instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) vol_dict = {'status': 'migrating', 'size': 1} volume_ref = db.volume_create(self.context, vol_dict) db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], '/dev/fake') # Preparing mocks vdmock = self.mox.CreateMock(libvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', mox.IgnoreArg(), None, FLAGS.live_migration_bandwidth).\ AndRaise(libvirt.libvirtError('ERR')) def fake_lookup(instance_name): if instance_name == instance_ref.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', self.compute.recover_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) self.assertTrue(instance_ref['state_description'] == 'running') self.assertTrue(instance_ref['state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id'])
def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" volume1 = service.Service('host1', 'nova-volume', 'volume', FLAGS.volume_manager) volume1.start() volume2 = service.Service('host2', 'nova-volume', 'volume', FLAGS.volume_manager) volume2.start() volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() host = self.scheduler.driver.schedule_create_volume( self.context, volume_id2) self.assertEqual(host, 'host2') volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2)
def test_post_live_migration_working_correctly(self): """Confirm post_live_migration() works as expected correctly.""" dest = 'desthost' flo_addr = '1.2.1.2' # Preparing datas c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) db.instance_update(c, i_ref['id'], {'state_description': 'migrating', 'state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', 'instance_id': instance_id}) fix_ref = db.fixed_ip_get_by_address(c, fix_addr) flo_ref = db.floating_ip_create(c, {'address': flo_addr, 'fixed_ip_id': fix_ref['id']}) # reload is necessary before setting mocks i_ref = db.instance_get(c, instance_id) # Preparing mocks self.mox.StubOutWithMock(self.compute.volume_manager, 'remove_compute_volume') for v in i_ref['volumes']: self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) # executing self.mox.ReplayAll() ret = self.compute.post_live_migration(c, i_ref, dest) # make sure every data is rewritten to dest i_ref = db.instance_get(c, i_ref['id']) c1 = (i_ref['host'] == dest) flo_refs = db.floating_ip_get_all_by_host(c, dest) c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) # post operaton self.assertTrue(c1 and c2) db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr)
def _clone_volume_from_image(self, expected_status, clone_works=True): """Try to clone a volume from an image, and check the status afterwards""" def fake_clone_image(volume, image_location): pass def fake_clone_error(volume, image_location): raise exception.NovaException() self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) if clone_works: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) else: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = 1 # creating volume testdata db.volume_create(self.context, {'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy'}) try: if clone_works: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.NovaException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id)
def _create_volume_from_image(self, expected_status, fakeout_copy_image_to_volume=False): """Call copy image to volume, Test the status of volume after calling copying image to volume.""" def fake_local_path(volume): return dst_path def fake_copy_image_to_volume(context, volume, image_id): pass dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) if fakeout_copy_image_to_volume: self.stubs.Set(self.volume, '_copy_image_to_volume', fake_copy_image_to_volume) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = 1 # creating volume testdata db.volume_create( self.context, { 'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy' }) try: self.volume.create_volume(self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" def fake_cast(ctxt, topic, msg): pass self.stubs.Set(rpc, 'cast', fake_cast) volume_id = self._create_volume() self.volume.create_volume(self.context, volume_id) db.volume_attached(self.context, volume_id, self.instance_id, '/dev/sda1') volume_api = volume.api.API() self.assertRaises(exception.ApiError, volume_api.create_snapshot, self.context, volume_id, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force( self.context, volume_id, 'fake_name', 'fake_description') db.snapshot_destroy(self.context, snapshot_ref['id']) db.volume_destroy(self.context, volume_id)
def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" def fake_cast(ctxt, topic, msg): pass self.stubs.Set(rpc, "cast", fake_cast) volume = self._create_volume() self.volume.create_volume(self.context, volume["id"]) db.volume_attached(self.context, volume["id"], self.instance_uuid, "/dev/sda1") volume_api = nova.volume.api.API() volume = volume_api.get(self.context, volume["id"]) self.assertRaises( exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, "fake_name", "fake_description" ) snapshot_ref = volume_api.create_snapshot_force(self.context, volume, "fake_name", "fake_description") db.snapshot_destroy(self.context, snapshot_ref["id"]) db.volume_destroy(self.context, volume["id"])
def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" volume1 = service.Service('host1', 'nova-volume', 'volume', FLAGS.volume_manager) volume1.start() volume2 = service.Service('host2', 'nova-volume', 'volume', FLAGS.volume_manager) volume2.start() volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() host = self.scheduler.driver.schedule_create_volume(self.context, volume_id2) self.assertEqual(host, 'host2') volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2)
def test_live_migration_src_check_volume_node_not_alive(self): """Raise exception when volume node is not alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) self.assertRaises(exception.VolumeServiceUnavailable, self.scheduler.driver.schedule_live_migration, self.context, instance_id, i_ref['host']) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id'])
def test_notify_usage_exists(self): """Ensure 'exists' notification generates appropriate usage data.""" volume_id = self._create_volume() volume = db.volume_get(self.context, volume_id) volume_utils.notify_usage_exists(self.context, volume) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'volume.exists') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['snapshot_id'], self.snapshot_id) self.assertEquals(payload['volume_id'], volume.id) self.assertEquals(payload['size'], self.volume_size) for attr in ('display_name', 'created_at', 'launched_at', 'status', 'audit_period_beginning', 'audit_period_ending'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) db.volume_destroy(context.get_admin_context(), volume['id'])
def test_scheduler_live_migration_with_volume(self): """scheduler_live_migration() works correctly as expected. Also, checks instance state is changed from 'running' -> 'migrating'. """ instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, dic) # cannot check 2nd argument b/c the addresses of instance object # is different. driver_i = self.scheduler.driver nocare = mox.IgnoreArg() self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), { "method": 'live_migration', "args": kwargs }) self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, dest=i_ref['host']) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id'])
t1 = datetime.datetime.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) try: self.scheduler.driver.schedule_live_migration(self.context, instance_id, i_ref['host']) except exception.Invalid, e: c = (e.message.find('volume node is not alive') >= 0) self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id']) def test_live_migration_src_check_compute_node_not_alive(self): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = datetime.datetime.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) try: self.scheduler.driver._live_migration_src_check(self.context, i_ref) except exception.Invalid, e: c = (e.message.find('is not alive') >= 0)