def _check_system_vms(self, system_vms, primary_storage_id): sf_active_volumes = sf_util.get_active_sf_volumes(self.sfe) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, primary_storage_id, self) for system_vm in system_vms: cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Ready') sf_root_volume = sf_util.check_and_get_sf_volume(sf_active_volumes, cs_root_volume.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, cs_root_volume, self) sf_util.check_size_and_iops(sf_root_volume, cs_root_volume, sf_volume_size, self) self._check_iops_against_iops_of_system_offering(cs_root_volume, self.testdata[TestData.systemOffering]) sf_util.check_vag(sf_root_volume, sf_vag_id, self) sr_name = sf_util.format_iqn(sf_root_volume.iqn) sf_util.check_xen_sr(sr_name, self.xen_session, self)
def _migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume, src_xen_session, dest_xen_session): self._verifyFields(cs_root_volume, src_sf_root_volume) self._verifyFields(cs_data_volume, src_sf_data_volume) virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id) cs_root_volume = self._get_updated_cs_volume(cs_root_volume.id) cs_data_volume = self._get_updated_cs_volume(cs_data_volume.id) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) self._verifyFields(cs_root_volume, dest_sf_root_volume) self._verifyFields(cs_data_volume, dest_sf_data_volume) self._verify_no_basic_volume_details() self._verify_different_volume_access_groups(src_sf_root_volume, dest_sf_root_volume) self._verify_different_volume_access_groups(src_sf_data_volume, dest_sf_data_volume) self._verify_same_account(src_sf_root_volume, dest_sf_root_volume) self._verify_same_account(src_sf_data_volume, dest_sf_data_volume) self._verifySfVolumeIds(src_sf_root_volume, dest_sf_root_volume) self._verifySfVolumeIds(src_sf_data_volume, dest_sf_data_volume) self._verify_xenserver_state(src_xen_session, src_sf_root_volume, dest_xen_session, dest_sf_root_volume) self._verify_xenserver_state(src_xen_session, src_sf_data_volume, dest_xen_session, dest_sf_data_volume) return dest_sf_root_volume, dest_sf_data_volume
def _fail_migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume, src_xen_session, dest_xen_session): self._verifyFields(cs_root_volume, src_sf_root_volume) self._verifyFields(cs_data_volume, src_sf_data_volume) class MigrationException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) try: virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id) raise MigrationException("The migration did not fail (as expected).") except MigrationException: raise except Exception: pass self._verify_no_basic_volume_details() cs_root_volume_refreshed = self._get_updated_cs_volume(cs_root_volume.id) cs_data_volume_refreshed = self._get_updated_cs_volume(cs_data_volume.id) self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume) self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume) sf_volumes = sf_util.get_not_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) self._verify_xenserver_state(dest_xen_session, dest_sf_root_volume, src_xen_session, src_sf_root_volume) self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume)
def test_01_storage_migrate_root_and_data_disks(self): src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_1.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True) self.cleanup.append(virtual_machine) cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0] sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage. _sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_root_volume.name, self) cs_data_volume = Volume.create(self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id) self.cleanup.append(cs_data_volume) cs_data_volume = virtual_machine.attach_volume(self.apiClient, cs_data_volume) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_data_volume.name, self) sf_root_volume, sf_data_volume = self._migrate_and_verify( virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_2, self.xen_session_1)
def test_01_storage_migrate_root_and_data_disks(self): src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_1.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True ) self.cleanup.append(virtual_machine) cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0] sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) cs_data_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id ) self.cleanup.append(cs_data_volume) cs_data_volume = virtual_machine.attach_volume( self.apiClient, cs_data_volume ) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) sf_root_volume, sf_data_volume = self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_2, self.xen_session_1)
def test_00_check_template_cache(self): if self.supports_resign == False: return sf_volumes = self._get_active_sf_volumes() sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self) self.assertEqual( len(sf_volume.volume_access_groups), 0, "The volume should not be in a VAG." ) sf_account_id = sf_volume.account_id sf_account = self.sfe.get_account_by_id(sf_account_id).account sf_account_name = sf_account.username self.assertEqual( sf_account_name.endswith("_1"), True, "The template cache volume's account does not end with '_1'." )
def _check_system_vms(self, system_vms, primary_storage_id): sf_active_volumes = sf_util.get_active_sf_volumes(self.sfe) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, primary_storage_id, self) for system_vm in system_vms: cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Ready') sf_root_volume = sf_util.check_and_get_sf_volume(sf_active_volumes, cs_root_volume.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, cs_root_volume, self) sf_util.check_size_and_iops(sf_root_volume, cs_root_volume, sf_volume_size, self) self._check_iops_against_iops_of_system_offering(cs_root_volume, self.testdata[TestData.systemOffering]) sf_util.check_vag(sf_root_volume, sf_vag_id, self) if TestData.hypervisor_type == TestData.xenServer: sr_name = sf_util.format_iqn(sf_root_volume.iqn) sf_util.check_xen_sr(sr_name, self.xen_session, self) elif TestData.hypervisor_type == TestData.kvm: list_hosts_response = list_hosts( self.apiClient, type="Routing" ) sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, self.testdata[TestData.kvm], self) else: self.assertTrue(False, "Invalid hypervisor type")
def _verify_managed_system_vm_deleted(self, cs_root_volume_name): sf_not_active_volumes = sf_util.get_not_active_sf_volumes(self.sfe) sf_root_volume = sf_util.check_and_get_sf_volume( sf_not_active_volumes, cs_root_volume_name, self) self.assertEqual(len(sf_root_volume.volume_access_groups), 0, "The volume should not be in a volume access group.") if TestData.hypervisor_type == TestData.xenServer: sr_name = sf_util.format_iqn(sf_root_volume.iqn) sf_util.check_xen_sr(sr_name, self.xen_session, self, False) elif TestData.hypervisor_type == TestData.kvm: list_hosts_response = list_hosts(self.apiClient, type="Routing") kvm_login = self.testdata[TestData.kvm] sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, kvm_login[TestData.username], kvm_login[TestData.password], self, False) else: self.assertTrue(False, "Invalid hypervisor type")
def _verify_managed_system_vm_deleted(self, cs_root_volume_name): sf_not_active_volumes = sf_util.get_not_active_sf_volumes(self.sfe) sf_root_volume = sf_util.check_and_get_sf_volume(sf_not_active_volumes, cs_root_volume_name, self) self.assertEqual( len(sf_root_volume.volume_access_groups), 0, "The volume should not be in a volume access group." ) if TestData.hypervisor_type == TestData.xenServer: sr_name = sf_util.format_iqn(sf_root_volume.iqn) sf_util.check_xen_sr(sr_name, self.xen_session, self, False) elif TestData.hypervisor_type == TestData.kvm: list_hosts_response = list_hosts( self.apiClient, type="Routing" ) kvm_login = self.testdata[TestData.kvm] sf_util.check_kvm_access_to_volume(sf_root_volume.iqn, list_hosts_response, kvm_login[TestData.username], kvm_login[TestData.password], self, False) else: self.assertTrue(False, "Invalid hypervisor type")
def _migrate_and_verify_one_disk_only(self, virtual_machine, dest_host, cs_volume, sf_account_id, src_sf_volume, src_xen_session, dest_xen_session): self._verifyFields(cs_volume, src_sf_volume) virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id) cs_volume = self._get_updated_cs_volume(cs_volume.id) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) dest_sf_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_volume.name, self) self._verifyFields(cs_volume, dest_sf_volume) self._verify_no_basic_volume_details() self._verify_different_volume_access_groups(src_sf_volume, dest_sf_volume) self._verify_same_account(src_sf_volume, dest_sf_volume) self._verifySfVolumeIds(src_sf_volume, dest_sf_volume) self._verify_xenserver_state(src_xen_session, src_sf_volume, dest_xen_session, dest_sf_volume) return dest_sf_volume
def test_01_attach_new_volume_to_stopped_VM(self): """Attach a volume to a stopped virtual machine, then start VM""" self.virtual_machine.stop(self.apiClient) new_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.cleanup.append(new_volume) self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) new_volume = self.virtual_machine.attach_volume(self.apiClient, new_volume) newvolume = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) self.virtual_machine.start(self.apiClient) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(newvolume.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, newvolume.name, self) sf_util.check_size_and_iops(sf_volume, newvolume, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) # Detach volume new_volume = self.virtual_machine.detach_volume(self.apiClient, new_volume)
def _fail_migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume, src_xen_session, dest_xen_session): self._verifyFields(cs_root_volume, src_sf_root_volume) self._verifyFields(cs_data_volume, src_sf_data_volume) class MigrationException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) try: virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id) raise MigrationException( "The migration did not fail (as expected).") except MigrationException: raise except Exception: pass self._verify_no_basic_volume_details() cs_root_volume_refreshed = self._get_updated_cs_volume( cs_root_volume.id) cs_data_volume_refreshed = self._get_updated_cs_volume( cs_data_volume.id) self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume) self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume) sf_volumes = sf_util.get_not_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_data_volume.name, self) self._verify_xenserver_state(dest_xen_session, dest_sf_root_volume, src_xen_session, src_sf_root_volume) self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume)
def _verify_managed_system_vm_deleted(self, cs_root_volume_name): sf_not_active_volumes = sf_util.get_not_active_sf_volumes(self.sfe) sf_root_volume = sf_util.check_and_get_sf_volume(sf_not_active_volumes, cs_root_volume_name, self) self.assertEqual( len(sf_root_volume.volume_access_groups), 0, "The volume should not be in a volume access group." ) sr_name = sf_util.format_iqn(sf_root_volume.iqn) sf_util.check_xen_sr(sr_name, self.xen_session, self, False)
def test_01_upload_and_download_snapshot(self): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=self.virtual_machine.id, listall=True ) sf_util.check_list(list_volumes_response, 1, self, "There should only be one volume in this list.") vm_root_volume = list_volumes_response[0] ### Perform tests related to uploading a QCOW2 file to secondary storage and then moving it to managed storage volume_name = "Volume-A" services = {"format": TestData.file_type, "diskname": volume_name} uploaded_volume = Volume.upload(self.apiClient, services, self.zone.id, account=self.account.name, domainid=self.account.domainid, url=TestData.volume_url, diskofferingid=self.disk_offering.id) self._wait_for_volume_state(uploaded_volume.id, "Uploaded") uploaded_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, uploaded_volume) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual( len(result), 1, TestUploadDownload.assertText ) install_path = self._get_install_path(result[0][TestData.install_path_index]) self._verify_uploaded_volume_present(install_path) uploaded_volume = self.virtual_machine.attach_volume( self.apiClient, uploaded_volume ) uploaded_volume = sf_util.check_and_get_cs_volume(self, uploaded_volume.id, volume_name, self) sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.") sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) self.assertNotEqual( len(sf_volumes), 0, "The length of the response for the SolidFire-volume query should not be zero." ) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, uploaded_volume.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, uploaded_volume, self) sf_util.check_size_and_iops(sf_volume, uploaded_volume, sf_volume_size, self) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_util.check_vag(sf_volume, sf_vag_id, self) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual( len(result), 0, TestUploadDownload.assertText2 ) self._verify_uploaded_volume_not_present(install_path) ### Perform tests related to extracting the contents of a volume on managed storage to a QCOW2 file ### and downloading the file try: # for data disk Volume.extract(self.apiClient, uploaded_volume.id, self.zone.id, TestData.download_mode) raise Exception("The volume extraction (for the data disk) did not fail (as expected).") except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise vm_root_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, vm_root_volume) try: # for root disk Volume.extract(self.apiClient, vm_root_volume.id, self.zone.id, TestData.download_mode) raise Exception("The volume extraction (for the root disk) did not fail (as expected).") except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise self.virtual_machine.stop(self.apiClient) self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (attached)") result = self._get_volume_store_ref_row(vm_root_volume_id) self.assertEqual( len(result), 0, TestUploadDownload.assertText2 ) self._extract_volume_and_verify(vm_root_volume_id, "Unable to locate the extracted file for the root disk") uploaded_volume = self.virtual_machine.detach_volume( self.apiClient, uploaded_volume ) self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (detached)") uploaded_volume = Volume(uploaded_volume.__dict__) uploaded_volume.delete(self.apiClient)
def test_01_upload_and_download_snapshot(self): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=self.virtual_machine.id, listall=True) sf_util.check_list(list_volumes_response, 1, self, "There should only be one volume in this list.") vm_root_volume = list_volumes_response[0] ### Perform tests related to uploading a QCOW2 file to secondary storage and then moving it to managed storage volume_name = "Volume-A" services = {"format": TestData.file_type, "diskname": volume_name} uploaded_volume = Volume.upload(self.apiClient, services, self.zone.id, account=self.account.name, domainid=self.account.domainid, url=TestData.volume_url, diskofferingid=self.disk_offering.id) self._wait_for_volume_state(uploaded_volume.id, "Uploaded") uploaded_volume_id = sf_util.get_cs_volume_db_id( self.dbConnection, uploaded_volume) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual(len(result), 1, TestUploadDownload.assertText) install_path = self._get_install_path( result[0][TestData.install_path_index]) self._verify_uploaded_volume_present(install_path) uploaded_volume = self.virtual_machine.attach_volume( self.apiClient, uploaded_volume) uploaded_volume = sf_util.check_and_get_cs_volume( self, uploaded_volume.id, volume_name, self) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.") sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) self.assertNotEqual( len(sf_volumes), 0, "The length of the response for the SolidFire-volume query should not be zero." ) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, uploaded_volume.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr( self.cs_api, uploaded_volume, self) sf_util.check_size_and_iops(sf_volume, uploaded_volume, sf_volume_size, self) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_util.check_vag(sf_volume, sf_vag_id, self) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual(len(result), 0, TestUploadDownload.assertText2) self._verify_uploaded_volume_not_present(install_path) ### Perform tests related to extracting the contents of a volume on managed storage to a QCOW2 file ### and downloading the file try: # for data disk Volume.extract(self.apiClient, uploaded_volume.id, self.zone.id, TestData.download_mode) raise Exception( "The volume extraction (for the data disk) did not fail (as expected)." ) except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise vm_root_volume_id = sf_util.get_cs_volume_db_id( self.dbConnection, vm_root_volume) try: # for root disk Volume.extract(self.apiClient, vm_root_volume.id, self.zone.id, TestData.download_mode) raise Exception( "The volume extraction (for the root disk) did not fail (as expected)." ) except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise self.virtual_machine.stop(self.apiClient) self._extract_volume_and_verify( uploaded_volume_id, "Unable to locate the extracted file for the data disk (attached)") result = self._get_volume_store_ref_row(vm_root_volume_id) self.assertEqual(len(result), 0, TestUploadDownload.assertText2) self._extract_volume_and_verify( vm_root_volume_id, "Unable to locate the extracted file for the root disk") uploaded_volume = self.virtual_machine.detach_volume( self.apiClient, uploaded_volume) self._extract_volume_and_verify( uploaded_volume_id, "Unable to locate the extracted file for the data disk (detached)") uploaded_volume = Volume(uploaded_volume.__dict__) uploaded_volume.delete(self.apiClient)
def test_08_delete_volume_was_attached(self): """Delete volume that was attached to a VM and is detached now""" self.virtual_machine.start(self.apiClient) ####################################### ####################################### # STEP 1: Create vol and attach to VM # ####################################### ####################################### new_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) volume_to_delete_later = new_volume self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) new_volume = self.virtual_machine.attach_volume(self.apiClient, new_volume) vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), "running", str(vm.state)) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) ####################################### ####################################### # STEP 2: Detach and delete volume # ####################################### ####################################### new_volume = self.virtual_machine.detach_volume(self.apiClient, new_volume) vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, None, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), "running", str(vm.state)) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual(len(sf_volume["volumeAccessGroups"]), 0, TestVolumes._volume_should_not_be_in_a_vag) self._check_xen_sr(sf_iscsi_name, False) volume_to_delete_later.delete(self.apiClient) list_volumes_response = list_volumes(self.apiClient, id=new_volume.id) self.assertEqual(list_volumes_response, None, "Check volume was deleted") sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self, False)
def test_06_attach_volume_to_stopped_VM(self): """Attach a volume to a stopped virtual machine, then start VM""" self.virtual_machine.stop(self.apiClient) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) ####################################### ####################################### # STEP 1: Attach volume to stopped VM # ####################################### ####################################### self.volume = self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "stopped", TestVolumes._vm_not_in_stopped_state_err_msg) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) self.virtual_machine.start(self.apiClient) vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name)
def test_07_destroy_expunge_VM_with_volume(self): """Destroy and expunge VM with attached volume""" ####################################### ####################################### # STEP 1: Create VM and attach volume # ####################################### ####################################### test_virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine2], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True, ) self.volume = test_virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(test_virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) ####################################### ####################################### # STEP 2: Destroy and Expunge VM # ####################################### ####################################### test_virtual_machine.delete(self.apiClient, True) self.attached = False vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) self.assertEqual(vol.virtualmachineid, None, "Check if attached to virtual machine") self.assertEqual(vol.vmname, None, "Check if VM was expunged") list_virtual_machine_response = list_virtual_machines(self.apiClient, id=test_virtual_machine.id) self.assertEqual(list_virtual_machine_response, None, "Check if VM was actually expunged") sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual(len(sf_volume["volumeAccessGroups"]), 0, TestVolumes._volume_should_not_be_in_a_vag) self._check_xen_sr(sf_iscsi_name, False)
def test_02_storage_migrate_root_and_data_disks(self): primarystorage2 = self.testdata[TestData.primaryStorage2] primary_storage_2 = StoragePool.create( self.apiClient, primarystorage2, clusterid=self.cluster_1.id ) primary_storage_3 = StoragePool.create( self.apiClient, primarystorage2, clusterid=self.cluster_2.id ) src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_3.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True ) cs_data_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id ) self.cleanup = [ virtual_machine, cs_data_volume, primary_storage_2, primary_storage_3 ] cs_data_volume = virtual_machine.attach_volume( self.apiClient, cs_data_volume ) sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) sf_data_volume = self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume, self.xen_session_2, self.xen_session_1)
def test_05_detach_vol_stopped_VM_start(self): """Detach volume from a stopped VM, then start.""" self.virtual_machine.start(self.apiClient) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) ####################################### ####################################### # STEP 1: Attach volume to running VM # ####################################### ####################################### self.volume = self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) ######################################### ######################################### # STEP 2: Detach volume from stopped VM # ######################################### ######################################### self.virtual_machine.stop(self.apiClient) self.volume = self.virtual_machine.detach_volume(self.apiClient, self.volume) self.attached = False vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, None, "The volume should not be attached to a VM.") self.assertEqual(vm.state.lower(), "stopped", TestVolumes._vm_not_in_stopped_state_err_msg) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual(len(sf_volume["volumeAccessGroups"]), 0, TestVolumes._volume_should_not_be_in_a_vag) self._check_xen_sr(sf_iscsi_name, False) ####################################### ####################################### # STEP 3: Start VM with detached vol # ####################################### ####################################### self.virtual_machine.start(self.apiClient) vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual(len(sf_volume["volumeAccessGroups"]), 0, TestVolumes._volume_should_not_be_in_a_vag) self._check_xen_sr(sf_iscsi_name, False)
def test_10_attach_more_than_one_disk_to_VM(self): """Attach more than one disk to a VM""" self.virtual_machine.start(self.apiClient) volume_2 = Volume.create( self.apiClient, self.testdata[TestData.volume_2], zoneid=self.zone.id, account=self.account.name, domainid=self.domain.id, diskofferingid=self.disk_offering.id, ) self.cleanup.append(volume_2) self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) ####################################### ####################################### # Step 1: Attach volumes to VM # ####################################### ####################################### self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) self.virtual_machine.attach_volume(self.apiClient, volume_2) vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_volume_2_size = sf_util.get_volume_size_with_hsr(self.cs_api, volume_2, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_2_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) self._check_xen_sr(sf_iscsi_name) sf_util.check_vag(sf_volume, sf_vag_id, self) sf_volume_2 = sf_util.check_and_get_sf_volume(sf_volumes, vol_2.name, self) sf_util.check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size, self) sf_iscsi_name_2 = sf_util.get_iqn(self.cs_api, volume_2, self) self._check_xen_sr(sf_iscsi_name_2) sf_util.check_vag(sf_volume_2, sf_vag_id, self) self.virtual_machine.detach_volume(self.apiClient, volume_2)
def test_09_attach_volumes_multiple_accounts(self): """Attach a data disk to a VM in one account and attach another data disk to a VM in another account""" self.virtual_machine.start(self.apiClient) ####################################### ####################################### # STEP 1: Create account, VM, and vol # ####################################### ####################################### test_account = Account.create(self.apiClient, self.testdata[TestData.testAccount], admin=1) self.cleanup.append(test_account) test_virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine2], accountid=test_account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True, ) test_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_2], zoneid=self.zone.id, account=test_account.name, domainid=self.domain.id, diskofferingid=self.disk_offering.id, ) self._check_and_get_cs_volume(test_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) ####################################### ####################################### # STEP 2: Attach volumes to VMs # ####################################### ####################################### self.volume = self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), "running", str(vm.state)) test_volume = test_virtual_machine.attach_volume(self.apiClient, test_volume) test_vol = self._check_and_get_cs_volume(test_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) test_vm = self._get_vm(test_virtual_machine.id) self.assertEqual(test_vol.virtualmachineid, test_vm.id, "Check if attached to virtual machine of other acct") self.assertEqual(test_vm.state.lower(), "running", str(test_vm.state)) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, vol, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) self._check_xen_sr(sf_iscsi_name) sf_util.check_vag(sf_volume, sf_vag_id, self) sf_test_account_id = sf_util.get_sf_account_id( self.cs_api, test_account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_test_volumes = self._get_active_sf_volumes(sf_test_account_id) sf_test_volume = sf_util.check_and_get_sf_volume(sf_test_volumes, test_vol.name, self) sf_test_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, test_vol, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_test_volume_size) sf_util.check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size, self) sf_test_iscsi_name = sf_util.get_iqn(self.cs_api, test_volume, self) self._check_xen_sr(sf_test_iscsi_name) sf_util.check_vag(sf_test_volume, sf_vag_id, self)
def test_02_attach_detach_attach_volume(self): '''Attach, detach, and attach volume to a running VM''' self.virtual_machine.start(self.apiClient) sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) ####################################### ####################################### # STEP 1: Attach volume to running VM # ####################################### ####################################### self.volume = self.virtual_machine.attach_volume( self.apiClient, self.volume ) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual( vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( vm.state.lower(), 'running', TestVolumes._vm_not_in_running_state_err_msg ) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) ######################################### ######################################### # STEP 2: Detach volume from running VM # ######################################### ######################################### self.volume = self.virtual_machine.detach_volume( self.apiClient, self.volume ) self.attached = False vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual( vol.virtualmachineid, None, "The volume should not be attached to a VM." ) self.assertEqual( vm.state.lower(), 'running', str(vm.state) ) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( len(sf_volume.volume_access_groups), 0, "The volume should not be in a VAG." ) self._check_xen_sr(sf_iscsi_name, False) ####################################### ####################################### # STEP 3: Attach volume to running VM # ####################################### ####################################### self.volume = self.virtual_machine.attach_volume( self.apiClient, self.volume ) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual( vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( vm.state.lower(), 'running', TestVolumes._vm_not_in_running_state_err_msg ) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name)
def test_vag_per_host_5(self): hosts = list_hosts(self.apiClient, clusterid=self.cluster.id) self.assertTrue( len(hosts) >= 2, "There needs to be at least two hosts.") unique_vag_ids = self._get_unique_vag_ids(hosts) self.assertTrue( len(hosts) == len(unique_vag_ids), "To run this test, each host should be in its own VAG.") primarystorage = self.testdata[TestData.primaryStorage] primary_storage = StoragePool.create( self.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=self.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor]) self.cleanup.append(primary_storage) self.virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True) root_volume = self._get_root_volume(self.virtual_machine) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, primary_storage.id, self, TestAddRemoveHosts._sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, root_volume.name, self) sf_vag_ids = sf_util.get_vag_ids(self.cs_api, self.cluster.id, primary_storage.id, self) sf_util.check_vags(sf_volume, sf_vag_ids, self) host = Host(hosts[0].__dict__) host_iqn = self._get_host_iqn(host) all_vags = sf_util.get_all_vags(self.sfe) host_vag = self._get_host_vag(host_iqn, all_vags) self.assertTrue(host_vag != None, "The host should be in a VAG.") host.delete(self.apiClient) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, root_volume.name, self) sf_util.check_vags(sf_volume, sf_vag_ids, self) all_vags = sf_util.get_all_vags(self.sfe) host_vag = self._get_host_vag(host_iqn, all_vags) self.assertTrue(host_vag == None, "The host should not be in a VAG.") details = { TestData.username: "******", TestData.password: "******", TestData.url: "http://" + host.ipaddress, TestData.podId: host.podid, TestData.zoneId: host.zoneid } host = Host.create(self.apiClient, self.cluster, details, hypervisor=host.hypervisor) self.assertTrue(isinstance(host, Host), "'host' is not a 'Host'.") hosts = list_hosts(self.apiClient, clusterid=self.cluster.id) unique_vag_ids = self._get_unique_vag_ids(hosts) self.assertTrue( len(hosts) == len(unique_vag_ids) + 1, "There should be one more host than unique VAG.")
def test_vag_per_host_5(self): hosts = list_hosts(self.apiClient, clusterid=self.cluster.id) self.assertTrue( len(hosts) >= 2, "There needs to be at least two hosts." ) unique_vag_ids = self._get_unique_vag_ids(hosts) self.assertTrue(len(hosts) == len(unique_vag_ids), "To run this test, each host should be in its own VAG.") primarystorage = self.testdata[TestData.primaryStorage] primary_storage = StoragePool.create( self.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=self.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) self.cleanup.append(primary_storage) self.virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True ) root_volume = self._get_root_volume(self.virtual_machine) sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, primary_storage.id, self, TestAddRemoveHosts._sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, root_volume.name, self) sf_vag_ids = sf_util.get_vag_ids(self.cs_api, self.cluster.id, primary_storage.id, self) sf_util.check_vags(sf_volume, sf_vag_ids, self) host = Host(hosts[0].__dict__) host_iqn = self._get_host_iqn(host) all_vags = sf_util.get_all_vags(self.sfe) host_vag = self._get_host_vag(host_iqn, all_vags) self.assertTrue(host_vag != None, "The host should be in a VAG.") host.delete(self.apiClient) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, root_volume.name, self) sf_util.check_vags(sf_volume, sf_vag_ids, self) all_vags = sf_util.get_all_vags(self.sfe) host_vag = self._get_host_vag(host_iqn, all_vags) self.assertTrue(host_vag == None, "The host should not be in a VAG.") details = { TestData.username: "******", TestData.password: "******", TestData.url: "http://" + host.ipaddress, TestData.podId : host.podid, TestData.zoneId: host.zoneid } host = Host.create( self.apiClient, self.cluster, details, hypervisor=host.hypervisor ) self.assertTrue( isinstance(host, Host), "'host' is not a 'Host'." ) hosts = list_hosts(self.apiClient, clusterid=self.cluster.id) unique_vag_ids = self._get_unique_vag_ids(hosts) self.assertTrue(len(hosts) == len(unique_vag_ids) + 1, "There should be one more host than unique VAG.")