def test_06_volumes_per_project(self): """Test Volumes limit per project """ # Validate the following # 1. set max no of volume per project to 1. # 2. Create 1 VM in this project # 4. Try to Create another VM in the project. It should give the user # an appropriate error that Volume limit is exhausted and an alert # should be generated. if self.hypervisor.lower() == 'lxc': if not find_storage_pool_type(self.apiclient, storagetype='rbd'): self.skipTest("RBD storage type is required for data volumes for LXC") self.project_1 = Project.create( self.api_client, self.services["project"], account=self.account.name, domainid=self.account.domainid ) self.cleanup.append(self.project_1) self.debug( "Updating volume resource limits for project: %s" % self.project_1.id) # Set usage_vm=1 for Account 1 update_resource_limit( self.apiclient, 2, # Volume max=1, projectid=self.project_1.id ) self.debug("Deploying VM for project: %s" % self.project_1.id) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, serviceofferingid=self.service_offering.id, projectid=self.project_1.id ) # Verify VM state self.assertEqual( virtual_machine_1.state, 'Running', "Check VM state is Running or not" ) # Exception should be raised for second volume with self.assertRaises(Exception): Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, diskofferingid=self.disk_offering.id, projectid=self.project_1.id ) return
def test_03_deploy_vm_project_limit_reached(self): """Test TTry to deploy VM with admin account where account has not used the resources but @ project they are not available # Validate the following # 1. Try to deploy VM with admin account where account has not used the # resources but @ project they are not available # 2. Deploy VM should error out saying ResourceAllocationException # with "resource limit exceeds""" self.virtualMachine = VirtualMachine.create( self.api_client, self.services["virtual_machine"], projectid=self.project.id, serviceofferingid=self.service_offering.id) try: projects = Project.list(self.apiclient, id=self.project.id, listall=True) except Exception as e: self.fail("failed to get projects list: %s" % e) self.assertEqual( validateList(projects)[0], PASS, "projects list validation failed") self.initialResourceCount = int(projects[0].primarystoragetotal) projectLimit = self.initialResourceCount + 3 self.debug("Setting up account and domain hierarchy") response = self.updatePrimaryStorageLimits(projectLimit=projectLimit) self.assertEqual(response[0], PASS, response[1]) self.services["volume"]["size"] = self.services["disk_offering"][ "disksize"] = 2 try: disk_offering = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering) Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, projectid=self.project.id, diskofferingid=disk_offering.id) except Exception as e: self.fail("Exception occurred: %s" % e) with self.assertRaises(Exception): Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, projectid=self.project.id, diskofferingid=disk_offering.id) return
def setUpClass(cls): testClient = super(TestMultipleVolumeAttach, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() cls._cleanup = [] # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services["mode"] = cls.zone.networktype cls.hypervisor = testClient.getHypervisorInfo() cls.invalidStoragePoolType = False cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"]) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["zoneid"] = cls.zone.id cls.services["template"] = template.id cls.services["diskofferingid"] = cls.disk_offering.id # Create VMs, VMs etc cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create(cls.apiclient, cls.services["service_offering"]) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"], ) # Create volumes (data disks) cls.volume1 = Volume.create( cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid ) cls.volume2 = Volume.create( cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid ) cls.volume3 = Volume.create( cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid ) cls.volume4 = Volume.create( cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid ) cls._cleanup = [cls.service_offering, cls.disk_offering, cls.account]
def test_03_deploy_vm_project_limit_reached(self): """Test TTry to deploy VM with admin account where account has not used the resources but @ project they are not available # Validate the following # 1. Try to deploy VM with admin account where account has not used the # resources but @ project they are not available # 2. Deploy VM should error out saying ResourceAllocationException # with "resource limit exceeds""" self.virtualMachine = VirtualMachine.create(self.api_client, self.services["virtual_machine"], projectid=self.project.id, diskofferingid=self.disk_offering.id, serviceofferingid=self.service_offering.id) try: projects = Project.list(self.apiclient, id=self.project.id, listall=True) except Exception as e: self.fail("failed to get projects list: %s" % e) self.assertEqual(validateList(projects)[0], PASS, "projects list validation failed") self.initialResourceCount = int(projects[0].primarystoragetotal) projectLimit = self.initialResourceCount + 3 self.debug("Setting up account and domain hierarchy") response = self.updatePrimaryStorageLimits(projectLimit=projectLimit) self.assertEqual(response[0], PASS, response[1]) self.services["volume"]["size"] = self.services["disk_offering"]["disksize"] = 2 try: disk_offering = DiskOffering.create(self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering) Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, projectid=self.project.id, diskofferingid=disk_offering.id) except Exception as e: self.fail("Exception occured: %s" % e) with self.assertRaises(Exception): Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, projectid=self.project.id, diskofferingid=disk_offering.id) return
def test_10_attach_detach_instances_with_glId(self): volume = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-GlId-%d" % random.randint(0, 100) }, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) vm = VirtualMachine.create( self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) vm.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient,virtualmachineid = vm.id, id = volume.id) list_root = list_volumes(self.apiclient,virtualmachineid = vm.id, type = "ROOT") self.assertIsNotNone(list, "Volume was not attached") self.assertIsNotNone(list_root, "ROOT volume is missing") self.helper.storpool_volume_globalid(list[0]) self.helper.storpool_volume_globalid(list_root[0]) vm.stop(self.apiclient, forced=True) detached = vm.detach_volume(self.apiclient, list[0]) self.assertIsNone(detached.virtualmachineid, "Volume was not detached from vm") Volume.delete(volume, self.apiclient) vm.delete(self.apiclient, expunge=True)
def test_25_vc_policy_attach_vol_global_id_vm_uuid(self): tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine4.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine4.id) vm_tags = vm[0].tags volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine4.id, ) self.assertTrue(len(volumes) == 1, "Volume length should be == 1") for v in volumes: self.helper.vc_policy_tags_global_id(v, vm_tags, False) volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine4.attach_volume(self.apiclient, volume) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine4.id) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine4.id, id=volume.id) self.assertTrue(len(volumes) == 1, "Volume length should be == 1") self.helper.vc_policy_tags_global_id(volumes[0], vm_tags, False) self._cleanup.append(volume)
def test_02_storage_migrate_root_and_data_disks(self): primarystorage2 = self.testdata[TestData.primaryStorage2] primary_storage_2 = StoragePool.create(self.apiClient, primarystorage2, clusterid=self.cluster_1.id) primary_storage_3 = StoragePool.create(self.apiClient, primarystorage2, clusterid=self.cluster_2.id) src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_3.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True) cs_data_volume = Volume.create(self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id) self.cleanup = [ virtual_machine, cs_data_volume, primary_storage_2, primary_storage_3 ] cs_data_volume = virtual_machine.attach_volume(self.apiClient, cs_data_volume) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage. _sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_data_volume.name, self) sf_data_volume = self._migrate_and_verify_one_disk_only( virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume, self.xen_session_2, self.xen_session_1)
def setUpClass(cls): cls.testClient = super(TestVolumes, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() == 'lxc': if not find_storage_pool_type(cls.api_client, storagetype='rbd'): raise unittest.SkipTest("RBD storage type is required for data volumes for LXC") cls.disk_offering = DiskOffering.create( cls.api_client, cls.services["disk_offering"] ) template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = template.id cls.services["virtual_machine"][ "diskofferingid"] = cls.disk_offering.id # Create VMs, VMs etc cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id ) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) cls.volume = Volume.create( cls.api_client, cls.services["volume"], zoneid=cls.zone.id, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offering.id ) cls._cleanup = [ cls.service_offering, cls.disk_offering, cls.account ]
def test_03_increase_volume_size_above_domain_limit(self): """Test increasing volume size above the domain limit # Validate the following # 1. Create a domain and its admin account # 2. Set domain primary storage limit more than (5 GB volume + template size of VM) # and less than (20 GB volume+ template size of VM) # 3. Deploy a VM without any disk offering (only root disk) # 4. Create a volume of 5 GB in the account and attach it to the VM # 5. Try to (resize) the volume to 20 GB # 6. Resize opearation should fail""" # Setting up account and domain hierarchy result = self.setupAccounts() self.assertEqual(result[0], PASS, result[1]) templateSize = (self.template.size / (1024**3)) domainLimit = ((templateSize + self.disk_offering_20_GB.disksize) - 1) response = self.updateResourceLimits(domainLimit=domainLimit) self.assertEqual(response[0], PASS, response[1]) apiclient = self.testClient.getUserApiClient( UserName=self.parentd_admin.name, DomainName=self.parentd_admin.domain) self.assertNotEqual( apiclient, FAILED, "Failed to get api client\ of account: %s" % self.parentd_admin.name) try: virtualMachine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id) volume = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.parentd_admin.name, domainid=self.parent_domain.id, diskofferingid=self.disk_offering_5_GB.id) virtualMachine.attach_volume(apiclient, volume=volume) expectedCount = (templateSize + self.disk_offering_5_GB.disksize) result = isDomainResourceCountEqualToExpectedCount( self.apiclient, self.parent_domain.id, expectedCount, RESOURCE_PRIMARY_STORAGE) self.assertFalse(result[0], result[1]) self.assertTrue(result[2], "Resource count does not match") except Exception as e: self.fail("Failed with exception: %s" % e) if self.hypervisor == str(XEN_SERVER).lower(): virtualMachine.stop(self.apiclient) with self.assertRaises(Exception): volume.resize(apiclient, diskofferingid=self.disk_offering_20_GB.id) return
def test_02_attach_new_volume_to_stopped_vm(self): '''Attach a volume to a stopped virtual machine, then start VM''' self.virtual_machine.stop(self.apiClient) new_volume = Volume.create(self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id) self.cleanup.append(new_volume) new_volume = self.virtual_machine.attach_volume( self.apiClient, new_volume) TestScaleIOVolumes._start_vm(self.virtual_machine) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vm.state.lower(), "running", TestScaleIOVolumes._vm_not_in_running_state_err_msg) # Detach volume new_volume = self.virtual_machine.detach_volume( self.apiClient, new_volume) self.assertEqual(new_volume.virtualmachineid, None, "The volume should not be attached to a VM.")
def test_02_destroy_allocated_volume(self): """Create volume, destroy it when expunge=false and expunge=true Steps: # 1. create volume, resource count increases. # 2. destroy volume (expunge = false), Exception happened. resource count no changes # 3. destroy volume (expunge = True), resource count of primary storage decreased with size of volume. """ # Create volume volume = Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.cleanup.append(volume) self.expectedCount = self.expectedCount + self.disk_offering.disksize self.volumeTotal = self.volumeTotal + 1 self.verify_resource_count_primary_storage(self.expectedCount, self.volumeTotal) # Destroy volume (expunge=False) with self.assertRaises(Exception): volume.destroy(self.apiclient) # Destroy volume (expunge=True) volume.destroy(self.apiclient, expunge=True) self.cleanup.remove(volume) self.expectedCount = self.expectedCount - self.disk_offering.disksize self.volumeTotal = self.volumeTotal - 1 self.verify_resource_count_primary_storage(self.expectedCount, self.volumeTotal)
def test_02_volume_attach_max(self): """Test attach volumes (more than max) to an instance """ # Validate the following # 1. Attach one more data volume to VM (Already 5 attached) # 2. Attach volume should fail # Create a volume and attach to VM volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume: %s for account: %s" % (volume.id, self.account.name)) # Check List Volume response for newly created volume list_volume_response = Volume.list(self.apiclient, id=volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM with self.assertRaises(Exception): self.debug("Trying to Attach volume: %s to VM: %s" % (volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiclient, volume) return
def test_09_delete_detached_volume(self): """Delete a Volume unattached to an VM """ # Validate the following # 1. volume should be deleted successfully and listVolume should not # contain the deleted volume details. # 2. "Delete Volume" menu item not shown under "Actions" menu. # (UI should not allow to delete the volume when it is attached # to instance by hiding the menu Item) self.debug("Delete Volume ID: %s" % self.volume.id) self.volume_1 = Volume.create( self.apiclient, self.services, account=self.account.name, domainid=self.account.domainid ) self.virtual_machine.attach_volume(self.apiClient, self.volume_1) self.virtual_machine.detach_volume(self.apiClient, self.volume_1) cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume_1.id self.apiClient.deleteVolume(cmd) list_volume_response = Volume.list(self.apiClient, id=self.volume_1.id, type="DATADISK") self.assertEqual(list_volume_response, None, "Check if volume exists in ListVolumes") return
def test_09_delete_detached_volume(self): """Delete a Volume unattached to an VM """ # Validate the following # 1. volume should be deleted successfully and listVolume should not # contain the deleted volume details. # 2. "Delete Volume" menu item not shown under "Actions" menu. # (UI should not allow to delete the volume when it is attached # to instance by hiding the menu Item) self.debug("Delete Volume ID: %s" % self.volume.id) self.volume_1 = Volume.create(self.apiclient, self.services, account=self.account.name, domainid=self.account.domainid) self.virtual_machine.attach_volume(self.apiClient, self.volume_1) self.virtual_machine.detach_volume(self.apiClient, self.volume_1) cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume_1.id self.apiClient.deleteVolume(cmd) list_volume_response = Volume.list(self.apiClient, id=self.volume_1.id, type='DATADISK') self.assertEqual(list_volume_response, None, "Check if volume exists in ListVolumes") return
def create_domain_account_user(parentDomain=None): domain = Domain.create(cls.api_client, cls.services["domain"], parentdomainid=parentDomain.id if parentDomain else None) cls._cleanup.append(domain) # Create an Account associated with domain account = Account.create(cls.api_client, cls.services["account"], domainid=domain.id) cls._cleanup.append(account) # Create an User, Project, Volume associated with account user = User.create(cls.api_client, cls.services["user"], account=account.name, domainid=account.domainid) cls._cleanup.append(user) project = Project.create(cls.api_client, cls.services["project"], account=account.name, domainid=account.domainid) cls._cleanup.append(project) volume = Volume.create(cls.api_client, cls.services["volume"], zoneid=cls.zone.id, account=account.name, domainid=account.domainid, diskofferingid=cls.disk_offering.id) cls._cleanup.append(volume) return {'domain':domain, 'account':account, 'user':user, 'project':project, 'volume':volume}
def test_04_delete_snapshot(self): """Test Delete Snapshot """ # 1. Snapshot the Volume # 2. Delete the snapshot # 3. Verify snapshot is removed by calling List Snapshots API # 4. Verify snapshot was removed from image store self.debug("Creating volume under account: %s" % self.account.name) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume: %s" % volume.id) self.debug("Attaching volume to vm: %s" % self.virtual_machine.id) self.virtual_machine.attach_volume(self.apiclient, volume) self.debug("Volume attached to vm") volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type="DATADISK", id=volume.id) self.assertEqual(isinstance(volumes, list), True, "Check list response returns a valid list") snapshot = Snapshot.create( self.apiclient, volumes[0].id, account=self.account.name, domainid=self.account.domainid ) snapshot.delete(self.apiclient) snapshots = list_snapshots(self.apiclient, id=snapshot.id) self.assertEqual(snapshots, None, "Check if result exists in list item call") self.assertFalse(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return
def test_13_snapshot_detached_vol_with_glid(self): volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine3.start(self.apiclient) self.virtual_machine3.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine3.id, id=volume.id) self.assertIsNotNone(list, "Volume was not attached") self.helper.storpool_volume_globalid(list[0]) self.virtual_machine3.stop(self.apiclient, forced=True) snapshot = Snapshot.create( self.apiclient, volume_id=volume.id, ) self.assertIsNotNone(snapshot, "Could not create snapshot") self.helper.storpool_snapshot_globalid(snapshot) self._cleanup.append(volume) self._cleanup.append(snapshot)
def test_07_migrate_vm_live_attach_disk_on_remote(self): global vm2 global data_disk_2 data_disk_2 = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-4"}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) cfg.logger.info("Created volume with ID: %s" % data_disk_2.id) self.virtual_machine_remote.attach_volume(self.apiclient, data_disk_2) destinationHost, vol_list = self.helper.get_destination_pools_hosts( vm2, self.host_remote) vm2 = self.helper.migrateVm(self.virtual_machine_remote, destinationHost) self.virtual_machine_remote.attach_volume(self.apiclient, self.volume_remote) destinationHost, vol_list = self.helper.get_destination_pools_hosts( vm2, self.host_remote) vm2 = self.helper.migrateVm(self.virtual_machine_remote, destinationHost)
def test_07_migrate_vm_live_attach_disk_on_remote(self): """ Add a data disk and migrate vm, data disk and root disk """ global vm2 global data_disk_2 data_disk_2 = Volume.create(self.apiclient, self.services, account=self.account.name, domainid=self.account.domainid) self.debug("Created volume with ID: %s" % data_disk_2.id) self.virtual_machine_on_remote.attach_volume(self.apiclient, data_disk_2) destinationHost, vol_list = self.helper.get_destination_pools_hosts( self.apiclient, vm2, self.host_remote) vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_on_remote, destinationHost) self.virtual_machine_on_remote.attach_volume(self.apiclient, self.volume_on_remote) destinationHost, vol_list = self.helper.get_destination_pools_hosts( self.apiclient, vm2, self.host_remote) vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_on_remote, destinationHost)
def test_02_migrate_vm_live_attach_disk_on_local(self): global vm global data_disk_1 data_disk_1 = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-4"}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) cfg.logger.info("Created volume with ID: %s" % data_disk_1.id) self.virtual_machine.attach_volume(self.apiclient, data_disk_1) # vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) # vm = vm[0] destinationHost, vol_list = self.helper.get_destination_pools_hosts( vm, self.host) vm = self.helper.migrateVm(self.virtual_machine, destinationHost) self.virtual_machine.attach_volume(self.apiclient, self.volume) destinationHost, vol_list = self.helper.get_destination_pools_hosts( vm, self.host) vm = self.helper.migrateVm(self.virtual_machine, destinationHost)
def test_02_volume_attach_max(self): """Test attach volumes (more than max) to an instance """ # Validate the following # 1. Attach one more data volume to VM (Already 5 attached) # 2. Attach volume should fail # Create a volume and attach to VM volume = Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.debug("Created volume: %s for account: %s" % (volume.id, self.account.name)) # Check List Volume response for newly created volume list_volume_response = Volume.list(self.apiclient, id=volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM with self.assertRaises(Exception): self.debug("Trying to Attach volume: %s to VM: %s" % (volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiclient, volume) return
def test_01_attach_datadisk_to_vm_on_zwps(self): """ Attach Data Disk To VM on ZWPS 1. Check if zwps storage pool exists. 2. Adding tag to zone wide primary storage 3. Launch a VM on ZWPS 4. Attach data disk to vm which is on zwps. 5. Verify disk is attached. """ # Step 1 if len( list(storagePool for storagePool in self.pools if storagePool.scope == "ZONE")) < 1: self.skipTest("There must be at least one zone wide \ storage pools available in the setup") # Adding tags to Storage Pools zone_no = 1 for storagePool in self.pools: if storagePool.scope == "ZONE": StoragePool.update(self.apiclient, id=storagePool.id, tags=[ZONETAG1[:-1] + repr(zone_no)]) zone_no += 1 self.vm = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_zone1.id, zoneid=self.zone.id) self.data_volume_created = Volume.create( self.userapiclient, self.testdata["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.cleanup.append(self.data_volume_created) # Step 2 self.vm.attach_volume(self.userapiclient, self.data_volume_created) data_volumes_list = Volume.list(self.userapiclient, id=self.data_volume_created.id, virtualmachineid=self.vm.id) data_volume = data_volumes_list[0] status = validateList(data_volume) # Step 3 self.assertEqual(status[0], PASS, "Check: Data if Disk is attached to VM") return
def setUpClass(cls): testClient = super(TestVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() cls._cleanup = [] # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = testClient.getHypervisorInfo() cls.invalidStoragePoolType = False cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"]) cls.resized_disk_offering = DiskOffering.create( cls.apiclient, cls.services["resized_disk_offering"]) cls.custom_resized_disk_offering = DiskOffering.create( cls.apiclient, cls.services["resized_disk_offering"], custom=True) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services[ "ostype"] cls.services["domainid"] = cls.domain.id cls.services["zoneid"] = cls.zone.id cls.services["template"] = template.id cls.services["diskofferingid"] = cls.disk_offering.id cls.services['resizeddiskofferingid'] = cls.resized_disk_offering.id cls.services[ 'customresizeddiskofferingid'] = cls.custom_resized_disk_offering.id # Create VMs, VMs etc cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["tiny"]) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"]) pools = StoragePool.list(cls.apiclient) # cls.assertEqual( # validateList(pools)[0], # PASS, # "storage pool list validation failed") cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls._cleanup = [ cls.resized_disk_offering, cls.custom_resized_disk_offering, cls.service_offering, cls.disk_offering, cls.volume, cls.account ]
def test_02_deploy_vm_account_limit_reached(self): """Test Try to deploy VM with admin account where account has used the resources but @ domain they are available""" self.virtualMachine = VirtualMachine.create( self.api_client, self.services["virtual_machine"], accountid=self.child_do_admin.name, domainid=self.child_do_admin.domainid, serviceofferingid=self.service_offering.id) accounts = Account.list(self.apiclient, id=self.child_do_admin.id) self.assertEqual( validateList(accounts)[0], PASS, "accounts list validation failed") self.initialResourceCount = int(accounts[0].primarystoragetotal) accountLimit = self.initialResourceCount + 3 self.debug("Setting up account and domain hierarchy") response = self.updatePrimaryStorageLimits(accountLimit=accountLimit) self.assertEqual(response[0], PASS, response[1]) self.services["volume"]["size"] = self.services["disk_offering"][ "disksize"] = 2 try: disk_offering = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering) Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.child_do_admin.name, domainid=self.child_do_admin.domainid, diskofferingid=disk_offering.id) except Exception as e: self.fail("failed to create volume: %s" % e) with self.assertRaises(Exception): Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.child_do_admin.name, domainid=self.child_do_admin.domainid, diskofferingid=disk_offering.id) return
def test_01_attach_new_volume_to_stopped_VM(self): """Attach a volume to a stopped virtual machine, then start VM""" self.virtual_machine.stop(self.apiClient) new_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.cleanup.append(new_volume) self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) new_volume = self.virtual_machine.attach_volume(self.apiClient, new_volume) newvolume = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) self.virtual_machine.start(self.apiClient) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(newvolume.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, newvolume.name, self) sf_util.check_size_and_iops(sf_volume, newvolume, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) # Detach volume new_volume = self.virtual_machine.detach_volume(self.apiClient, new_volume)
def test_11_migrate_volume_and_change_offering(self): # Validates the following # # 1. Creates a new Volume with a small disk offering # # 2. Migrates the Volume to another primary storage and changes the offering # # 3. Verifies the Volume has new offering when migrated to the new storage. small_offering = list_disk_offering(self.apiclient, name="Small")[0] large_offering = list_disk_offering(self.apiclient, name="Large")[0] volume = Volume.create(self.apiClient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=small_offering.id) self.debug("Created a small volume: %s" % volume.id) self.virtual_machine.attach_volume(self.apiclient, volume=volume) if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.stop(self.apiclient) pools = StoragePool.listForMigration(self.apiclient, id=volume.id) pool = None if pools and len(pools) > 0: pool = pools[0] else: raise self.skipTest( "Not enough storage pools found, skipping test") if hasattr(pool, 'tags'): StoragePool.update(self.apiclient, id=pool.id, tags="") self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id)) livemigrate = False if self.virtual_machine.hypervisor.lower( ) == "vmware" or self.virtual_machine.hypervisor.lower( ) == 'xenserver': livemigrate = True Volume.migrate(self.apiclient, volumeid=volume.id, storageid=pool.id, newdiskofferingid=large_offering.id, livemigrate=livemigrate) if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.start(self.apiclient) migrated_vol = Volume.list(self.apiclient, id=volume.id)[0] self.assertEqual(migrated_vol.diskofferingname, large_offering.name, "Offering name did not match with the new one ") return
def test10_add_vm_with_datera_storage_and_volume(self): primarystorage = self.testdata[TestData.primaryStorage] primary_storage = StoragePool.create( self.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=self.zone.id, clusterid=self.cluster.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) primary_storage_url = primarystorage[TestData.url] self._verify_attributes( primary_storage.id, primary_storage_url) self.cleanup.append(primary_storage) self.virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True ) self._validate_storage(primary_storage, self.virtual_machine) volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id ) virtual_machine.attach_volume( self.apiClient, volume ) storage_pools_response = list_storage_pools( self.apiClient, id=primary_storage.id) for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == primary_storage.id: xen_server_response = value self.assertNotEqual( int(storage_pools_response[0].disksizeused), int(xen_server_response['physical_utilisation']))
def setUpClass(cls): cls.testClient = super(TestVolumes, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls._cleanup = [] cls.unsupportedStorageType = False cls.hypervisor = cls.testClient.getHypervisorInfo() cls.disk_offering = DiskOffering.create( cls.api_client, cls.services["disk_offering"] ) cls._cleanup.append(cls.disk_offering) template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = template.id cls.services["virtual_machine"][ "diskofferingid"] = cls.disk_offering.id # Create VMs, VMs etc cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id ) cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) cls.volume = Volume.create( cls.api_client, cls.services["volume"], zoneid=cls.zone.id, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offering.id )
def test_02_increase_volume_size_above_account_limit(self): """Test increasing volume size above the account limit # Validate the following # 1. Create a domain and its admin account # 2. Set account primary storage limit more than (5 GB volume + template size of VM) # and less than (20 GB volume+ template size of VM) # 3. Deploy a VM without any disk offering (only root disk) # 4. Create a volume of 5 GB in the account and attach it to the VM # 5. Try to (resize) the volume to 20 GB # 6. Resize opearation should fail""" # Setting up account and domain hierarchy result = self.setupAccounts() self.assertEqual(result[0], PASS, result[1]) templateSize = (self.template.size / (1024**3)) accountLimit = ((templateSize + self.disk_offering_20_GB.disksize) - 1) response = self.updateResourceLimits(accountLimit=accountLimit) self.assertEqual(response[0], PASS, response[1]) apiclient = self.testClient.getUserApiClient( UserName=self.parentd_admin.name, DomainName=self.parentd_admin.domain) self.assertNotEqual(apiclient, FAILED, "Failed to get api client\ of account: %s" % self.parentd_admin.name) try: virtualMachine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id ) volume = Volume.create( apiclient,self.services["volume"],zoneid=self.zone.id, account=self.parentd_admin.name,domainid=self.parent_domain.id, diskofferingid=self.disk_offering_5_GB.id) virtualMachine.attach_volume(apiclient, volume=volume) expectedCount = (templateSize + self.disk_offering_5_GB.disksize) response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.parentd_admin.id) if response[0] == FAIL: raise Exception(response[1]) except Exception as e: self.fail("Failed with exception: %s" % e) if self.hypervisor == str(XEN_SERVER).lower(): virtualMachine.stop(self.apiclient) with self.assertRaises(Exception): volume.resize(apiclient, diskofferingid=self.disk_offering_20_GB.id) return
def test_11_resize_renamed_volume(self): '''Resize volume which was created with globalid than renamed with uuid''' volume_on_sp_1 = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) virtual_machine = VirtualMachine.create( self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, hostid = self.host_on_local_1.id, rootdisksize=10 ) virtual_machine.attach_volume(self.apiclient, volume_on_sp_1) self.assertEqual(VirtualMachine.RUNNING, virtual_machine.state, "Running") listvol = Volume.list( self.apiclient, virtualmachineid = virtual_machine.id, id= volume_on_sp_1.id ) volume = listvol[0] vol = sptypes.VolumeUpdateDesc(rename = volume.id) name = volume.path.split("/")[3] rename = self.spapi.volumeUpdate(volumeName = "~" + name, json = vol) #resize volume with it's global id self.helper.resizing_volume(volume, globalid=True) volume_with_global_id = self.spapi.volumeList(volumeName = "~" + name) #resize volume that was renamed with uuid self.helper.resizing_volume(volume, globalid=False) volume_with_uuid = self.spapi.volumeList(volumeName = volume.id) self.debug("volume_with_global_id %s" % volume_with_global_id) self.debug("volume_with_uuid %s" % volume_with_uuid) self.assertEqual(volume_with_global_id[0].name, volume_with_uuid[0].name, "Are not the same") cmd = destroyVirtualMachine.destroyVirtualMachineCmd() cmd.id = virtual_machine.id cmd.expunge = True self.apiclient.destroyVirtualMachine(cmd) volume_on_sp_1.delete(self.apiclient)
def create_volume(self): small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0] return Volume.create(self.apiclient, self.services, account=self.account.name, diskofferingid=small_disk_offering.id, domainid=self.account.domainid, zoneid=self.zone.id)
def test_02_deploy_vm_account_limit_reached(self): """Test Try to deploy VM with admin account where account has used the resources but @ domain they are available""" self.virtualMachine = VirtualMachine.create(self.api_client, self.services["virtual_machine"], accountid=self.child_do_admin.name, domainid=self.child_do_admin.domainid, diskofferingid=self.disk_offering.id, serviceofferingid=self.service_offering.id) accounts = Account.list(self.apiclient, id=self.child_do_admin.id) self.assertEqual(validateList(accounts)[0], PASS, "accounts list validation failed") self.initialResourceCount = int(accounts[0].primarystoragetotal) accountLimit = self.initialResourceCount + 3 self.debug("Setting up account and domain hierarchy") response = self.updatePrimaryStorageLimits(accountLimit=accountLimit) self.assertEqual(response[0], PASS, response[1]) self.services["volume"]["size"] = self.services["disk_offering"]["disksize"] = 2 try: disk_offering = DiskOffering.create(self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering) Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.child_do_admin.name, domainid=self.child_do_admin.domainid, diskofferingid=disk_offering.id) except Exception as e: self.fail("failed to create volume: %s" % e) with self.assertRaises(Exception): Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.child_do_admin.name, domainid=self.child_do_admin.domainid, diskofferingid=disk_offering.id) return
def test_01_migrateVolume(self): """ @Desc:Volume is not retaining same uuid when migrating from one storage to another. Step1:Create a volume/data disk Step2:Verify UUID of the volume Step3:Migrate the volume to another primary storage within the cluster Step4:Migrating volume to new primary storage should succeed Step5:volume UUID should not change even after migration """ vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list(self.apiclient, id=vol.id) self.assertEqual(validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id try: self.virtual_machine.attach_volume(self.apiclient, vol) except Exception as e: self.fail("Attaching data disk to vm failed with error %s" % e) pools = StoragePool.listForMigration(self.apiclient, id=vol.id) if not pools: self.skipTest( "No suitable storage pools found for volume migration.\ Skipping" ) self.assertEqual(validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration") pool = pools[0] self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id)) try: Volume.migrate(self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate="true") except Exception as e: self.fail("Volume migration failed with error %s" % e) migrated_vols = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, listall="true", type="DATADISK" ) self.assertEqual(validateList(migrated_vols)[0], PASS, "invalid volumes response after migration") migrated_vol_uuid = migrated_vols[0].id self.assertEqual( vol_uuid, migrated_vol_uuid, "Volume is not retaining same uuid when migrating from one\ storage to another", ) self.virtual_machine.detach_volume(self.apiclient, vol) self.cleanup.append(vol) return
def setUpClass(cls): testClient = super(TestVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() cls._cleanup = [] # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services["mode"] = cls.zone.networktype cls.hypervisor = testClient.getHypervisorInfo() cls.invalidStoragePoolType = False cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"]) cls.resized_disk_offering = DiskOffering.create(cls.apiclient, cls.services["resized_disk_offering"]) cls.custom_resized_disk_offering = DiskOffering.create( cls.apiclient, cls.services["resized_disk_offering"], custom=True ) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["zoneid"] = cls.zone.id cls.services["template"] = template.id cls.services["diskofferingid"] = cls.disk_offering.id cls.services["resizeddiskofferingid"] = cls.resized_disk_offering.id cls.services["customresizeddiskofferingid"] = cls.custom_resized_disk_offering.id # Create VMs, VMs etc cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create(cls.apiclient, cls.services["service_offerings"]["tiny"]) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"], ) pools = StoragePool.list(cls.apiclient) # cls.assertEqual( # validateList(pools)[0], # PASS, # "storage pool list validation failed") cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls._cleanup = [ cls.resized_disk_offering, cls.custom_resized_disk_offering, cls.service_offering, cls.disk_offering, cls.volume, cls.account, ]
def test_01_storage_migrate_root_and_data_disks(self): src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_1.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True ) self.cleanup.append(virtual_machine) cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0] sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) cs_data_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id ) self.cleanup.append(cs_data_volume) cs_data_volume = virtual_machine.attach_volume( self.apiClient, cs_data_volume ) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) sf_root_volume, sf_data_volume = self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_2, self.xen_session_1)
def _execute_migration_failure(self, compute_offering_id, disk_offering_id): src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=compute_offering_id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True) self.cleanup.append(virtual_machine) cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0] sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage. _sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_root_volume.name, self) cs_data_volume = Volume.create(self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=disk_offering_id) self.cleanup.append(cs_data_volume) cs_data_volume = virtual_machine.attach_volume(self.apiClient, cs_data_volume) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_data_volume.name, self) self._fail_migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
def test_create_volume_under_domain(self): """Create a volume under a non-root domain as non-root-domain user 1. Create a domain under ROOT 2. Create a user within this domain 3. As user in step 2. create a volume with standard disk offering 4. Ensure the volume is created in the domain and available to the user in his listVolumes call """ dom = Domain.create(self.apiclient, services={}, name="NROOT", parentdomainid=self.domain.id) self.cleanup.append(dom) self.assertTrue(dom is not None, msg="Domain creation failed") domuser = Account.create(apiclient=self.apiclient, services=self.services["account"], admin=False, domainid=dom.id) self.cleanup.insert(-2, domuser) self.assertTrue(domuser is not None) domapiclient = self.testClient.getUserApiClient(UserName=domuser.name, DomainName=dom.name) diskoffering = DiskOffering.list(self.apiclient) self.assertTrue(isinstance(diskoffering, list), msg="DiskOffering list is not a list?") self.assertTrue( len(diskoffering) > 0, "no disk offerings in the deployment") vol = Volume.create(domapiclient, services=self.services["volume"], zoneid=self.zone.id, account=domuser.name, domainid=dom.id, diskofferingid=diskoffering[0].id) self.assertTrue( vol is not None, "volume creation fails in domain %s as user %s" % (dom.name, domuser.name)) listed_vol = Volume.list(domapiclient, id=vol.id) self.assertTrue( listed_vol is not None and isinstance(listed_vol, list), "invalid response from listVolumes for volume %s" % vol.id) self.assertTrue( listed_vol[0].id == vol.id, "Volume returned by list volumes %s not matching with queried\ volume %s in domain %s" % (listed_vol[0].id, vol.id, dom.name))
def test_06_attachvolume_to_a_stopped_vm(self): """ Test Attach Volume To A Stopped VM """ list_vm_response = VirtualMachine.list(self.user_api_client, id=self.virtual_machine.id) self.assertEqual(list_vm_response[0].state, 'Stopped', msg="Check if VM is in Stopped state") custom_disk_offering = DiskOffering.list(self.user_api_client, name="custom") self.__class__.volume = Volume.create( self.user_api_client, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=custom_disk_offering[0].id, size=1) # Check List Volume response for newly created volume list_volume_response = Volume.list(self.user_api_client, id=self.volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") # Attach volume to VM cmd = attachVolume.attachVolumeCmd() cmd.id = self.volume.id cmd.virtualmachineid = self.virtual_machine.id cmd.deviceid = 1 vol1 = self.user_api_client.attachVolume(cmd) # Check all volumes attached to same VM list_volume_response = Volume.list( self.user_api_client, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") self.assertEqual(list_volume_response[0].deviceid, 1, "Check listed volume device id is 1") return
def test_06_deploy_startvm_attach_detach(self): """Test Deploy Virtual Machine with startVM=false and attach detach volumes """ # Validate the following: # 1. deploy Vm with the startvm=false. Attach volume to the instance # 2. listVM command should return the deployed VM.State of this VM # should be "Stopped". # 3. Attach volume should be successful # 4. Detach volume from instance. Detach should be successful self.debug("Deploying instance in the account: %s" % self.account.name) self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id, ) response = self.virtual_machine.getState(self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) self.debug("Creating a volume in account: %s" % self.account.name) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume in account: %s" % self.account.name) self.debug("Attaching volume to instance: %s" % self.virtual_machine.name) try: self.virtual_machine.attach_volume(self.apiclient, volume) except Exception as e: self.fail("Attach volume failed with Exception: %s" % e) self.debug("Detaching the disk: %s" % volume.name) self.virtual_machine.detach_volume(self.apiclient, volume) self.debug("Datadisk %s detached!" % volume.name) volumes = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type="DATADISK", id=volume.id, listall=True ) self.assertEqual(volumes, None, "List Volumes should not list any volume for instance") return
def test_06_resize_detach_vol_globalid(self): volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine2.attach_volume(self.apiclient, volume) self.virtual_machine2.detach_volume(self.apiclient, volume) listvol = Volume.list(self.apiclient, id=volume.id) self.helper.resizing_volume(listvol[0], globalid=True) Volume.delete(volume, self.apiclient)
def test_08_delete_volume_was_attached(self): '''Delete volume that was attached to a VM and is detached now''' TestScaleIOVolumes._start_vm(self.virtual_machine) ####################################### # STEP 1: Create vol and attach to VM # ####################################### new_volume = Volume.create(self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id) volume_to_delete_later = new_volume new_volume = self.virtual_machine.attach_volume( self.apiClient, new_volume) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(new_volume.virtualmachineid, vm.id, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), 'running', str(vm.state)) ####################################### # STEP 2: Detach and delete volume # ####################################### new_volume = self.virtual_machine.detach_volume( self.apiClient, new_volume) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(new_volume.virtualmachineid, None, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), 'running', str(vm.state)) volume_to_delete_later.delete(self.apiClient) list_volumes_response = list_volumes(self.apiClient, id=new_volume.id) self.assertEqual(list_volumes_response, None, "Check volume was deleted")
def test10_add_vm_with_datera_storage_and_volume(self): primarystorage = self.testdata[TestData.primaryStorage] primary_storage = StoragePool.create( self.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=self.zone.id, clusterid=self.cluster.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor]) primary_storage_url = primarystorage[TestData.url] self._verify_attributes(primary_storage.id, primary_storage_url) self.cleanup.append(primary_storage) self.virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True) self._validate_storage(primary_storage, self.virtual_machine) volume = Volume.create(self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id) virtual_machine.attach_volume(self.apiClient, volume) storage_pools_response = list_storage_pools(self.apiClient, id=primary_storage.id) for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == primary_storage.id: xen_server_response = value self.assertNotEqual(int(storage_pools_response[0].disksizeused), int(xen_server_response['physical_utilisation']))
def test_03_verify_libvirt_attach_disk(self): """ Verify that libvirt settings are expected after a disk add """ if self.hypervisorNotSupported: self.skipTest("Hypervisor not supported") self.volume = Volume.create( self.apiclient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.sparse_disk_offering.id) self.virtual_machine.attach_volume(self.apiclient, self.volume) self.verifyVirshState(3)
def test_09_attach_detach_vol_glId(self): volume = Volume.create(self.apiclient, {"diskname":"StorPoolDisk-GlId-%d" % random.randint(0, 100) }, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine3.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient,virtualmachineid = self.virtual_machine3.id, id = volume.id) self.assertIsNotNone(list, "Volume was not attached") self.helper.storpool_volume_globalid(list[0]) self.virtual_machine3.stop(self.apiclient, forced=True) detached = self.virtual_machine3.detach_volume(self.apiclient, list[0]) self.assertIsNone(detached.virtualmachineid, "Volume was not detached from vm") Volume.delete(volume, self.apiclient)
def test_create_volume_under_domain(self): """Create a volume under a non-root domain as non-root-domain user 1. Create a domain under ROOT 2. Create a user within this domain 3. As user in step 2. create a volume with standard disk offering 4. Ensure the volume is created in the domain and available to the user in his listVolumes call """ dom = Domain.create(self.apiclient, services={}, name="NROOT", parentdomainid=self.domain.id) self.cleanup.append(dom) self.assertTrue(dom is not None, msg="Domain creation failed") domuser = Account.create( apiclient=self.apiclient, services=self.services["account"], admin=False, domainid=dom.id ) self.cleanup.insert(-2, domuser) self.assertTrue(domuser is not None) domapiclient = self.testClient.getUserApiClient(UserName=domuser.name, DomainName=dom.name) diskoffering = DiskOffering.list(self.apiclient) self.assertTrue(isinstance(diskoffering, list), msg="DiskOffering list is not a list?") self.assertTrue(len(diskoffering) > 0, "no disk offerings in the deployment") vol = Volume.create( domapiclient, services=self.services["volume"], zoneid=self.zone.id, account=domuser.name, domainid=dom.id, diskofferingid=diskoffering[0].id, ) self.assertTrue(vol is not None, "volume creation fails in domain %s as user %s" % (dom.name, domuser.name)) listed_vol = Volume.list(domapiclient, id=vol.id) self.assertTrue( listed_vol is not None and isinstance(listed_vol, list), "invalid response from listVolumes for volume %s" % vol.id, ) self.assertTrue( listed_vol[0].id == vol.id, "Volume returned by list volumes %s not matching with queried\ volume %s in domain %s" % (listed_vol[0].id, vol.id, dom.name), )
def test_03_verify_libvirt_attach_disk(self): """ Verify that libvirt settings are expected after a disk add """ if self.hypervisorNotSupported: self.skipTest("Hypervisor not supported") self.volume = Volume.create( self.apiclient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.sparse_disk_offering.id ) self.virtual_machine.attach_volume( self.apiclient, self.volume ) self.verifyVirshState(3)
def test_04_deploy_startvm_false_attach_volume(self): """Test Deploy Virtual Machine with startVM=false and attach volume """ # Validate the following: # 1. deploy Vm with the startvm=false. Attach volume to the instance # 2. listVM command should return the deployed VM.State of this VM # should be "Stopped". # 3. Attach volume should be successful self.debug("Deploying instance in the account: %s" % self.account.name) self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id, ) response = self.virtual_machine.getState(self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) self.debug("Creating a volume in account: %s" % self.account.name) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume in account: %s" % self.account.name) self.debug("Attaching volume to instance: %s" % self.virtual_machine.name) try: self.virtual_machine.attach_volume(self.apiclient, volume) except Exception as e: self.fail("Attach volume failed with Exception: %s" % e) return
def test_02_project_disk_offerings(self): """ Test project disk offerings """ # Validate the following # 1. Create a project. # 2. List service offerings for the project. All disk offerings # available in the domain can be used for project resource creation # Create project as a domain admin project = Project.create( self.apiclient, self.services["project"], account=self.account.name, domainid=self.account.domainid ) # Cleanup created project at end of test self.cleanup.append(project) self.debug("Created project with domain admin with ID: %s" % project.id) list_projects_reponse = Project.list(self.apiclient, id=project.id, listall=True) self.assertEqual(isinstance(list_projects_reponse, list), True, "Check for a valid list projects response") list_project = list_projects_reponse[0] self.assertNotEqual(len(list_projects_reponse), 0, "Check list project response returns a valid project") self.assertEqual(project.name, list_project.name, "Check project name from list response") self.debug("Create a data volume for project: %s" % project.id) # Create a volume for project volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, diskofferingid=self.disk_offering.id, projectid=project.id, ) self.cleanup.append(volume) # Verify Volume state self.assertEqual(volume.state in ["Allocated", "Ready"], True, "Check Volume state is Ready or not") return
def test_attach_detach_volume(self, value): """Stop attach and detach volume from VM # Validate the following # 1. Create a VM with custom disk offering and check the primary storage count # of account # 2. Create custom volume in account # 3. Verify that primary storage count increases by same amount # 4. Attach volume to VM and verify resource count remains the same # 5. Detach volume and verify resource count remains the same""" response = self.setupAccount(value) self.assertEqual(response[0], PASS, response[1]) apiclient = self.apiclient if value == CHILD_DOMAIN_ADMIN: apiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain ) self.assertNotEqual(apiclient, FAIL, "Failure while getting\ api client of account: %s" % self.account.name) try: self.services["disk_offering"]["disksize"] = 4 expectedCount = self.initialResourceCount + int(self.services["disk_offering"]["disksize"]) disk_offering = DiskOffering.create(self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering) volume = Volume.create( apiclient,self.services["volume"],zoneid=self.zone.id, account=self.account.name,domainid=self.account.domainid, diskofferingid=disk_offering.id) except Exception as e: self.fail("Failure: %s" % e) response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: self.virtualMachine.attach_volume(apiclient, volume=volume) except Exception as e: self.fail("Failed while attaching volume to VM: %s" % e) response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: self.virtualMachine.detach_volume(apiclient, volume=volume) except Exception as e: self.fail("Failure while detaching volume: %s" % e) response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) return
def test_01_volume_iso_attach(self): """Test Volumes and ISO attach """ # Validate the following # 1. Create and attach 5 data volumes to VM # 2. Create an ISO. Attach it to VM instance # 3. Verify that attach ISO is successful # Create 5 volumes and attach to VM for i in range(self.max_data_volumes): volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Created volume: %s for account: %s" % ( volume.id, self.account.name )) # Check List Volume response for newly created volume list_volume_response = Volume.list( self.apiclient, id=volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) # Attach volume to VM self.virtual_machine.attach_volume( self.apiclient, volume ) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) self.assertEqual( len(list_volume_response), self.max_data_volumes, "Volumes attached to the VM %s. Expected %s" % (len(list_volume_response), self.max_data_volumes)) # Create an ISO and attach it to VM iso = Iso.create( self.apiclient, self.services["iso"], account=self.account.name, domainid=self.account.domainid, ) self.debug("Created ISO with ID: %s for account: %s" % ( iso.id, self.account.name )) try: self.debug("Downloading ISO with ID: %s" % iso.id) iso.download(self.apiclient) except Exception as e: self.fail("Exception while downloading ISO %s: %s" % (iso.id, e)) # Attach ISO to virtual machine self.debug("Attach ISO ID: %s to VM: %s" % ( iso.id, self.virtual_machine.id )) cmd = attachIso.attachIsoCmd() cmd.id = iso.id cmd.virtualmachineid = self.virtual_machine.id self.apiclient.attachIso(cmd) # Verify ISO is attached to VM vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual( isinstance(vm_response, list), True, "Check list VM response for valid list" ) self.assertNotEqual( len(vm_response), 0, "Check VMs available in List VMs response" ) vm = vm_response[0] self.assertEqual( vm.isoid, iso.id, "Check ISO is attached to VM or not" ) return
def test_01_volume_attach_detach(self): """Test Volume attach/detach to VM (5 data volumes) """ # Validate the following # 1. Deploy a vm and create 5 data disk # 2. Attach all the created Volume to the vm. # 3. Detach all the volumes attached. # 4. Reboot the VM. VM should be successfully rebooted # 5. Stop the VM. Stop VM should be successful # 6. Start The VM. Start VM should be successful try: volumes = [] # Create 5 volumes and attach to VM for i in range(self.max_data_volumes): volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.cleanup.append(volume) volumes.append(volume) # Check List Volume response for newly created volume list_volume_response = Volume.list( self.apiclient, id=volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM self.virtual_machine.attach_volume( self.apiclient, volume ) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) self.assertEqual( len(list_volume_response), self.max_data_volumes, "Volumes attached to the VM %s. Expected %s" % (len(list_volume_response), self.max_data_volumes)) # Detach all volumes from VM for volume in volumes: self.virtual_machine.detach_volume( self.apiclient, volume ) # Reboot VM self.debug("Rebooting the VM: %s" % self.virtual_machine.id) self.virtual_machine.reboot(self.apiclient) # Sleep to ensure that VM is in ready state time.sleep(self.services["sleep"]) vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual( isinstance(vm_response, list), True, "Check list VM response for valid list" ) self.assertNotEqual( len(vm_response), 0, "Check VMs available in List VMs response" ) vm = vm_response[0] self.assertEqual( vm.state, 'Running', "Check the state of VM" ) # Stop VM self.virtual_machine.stop(self.apiclient) # Start VM self.virtual_machine.start(self.apiclient) # Sleep to ensure that VM is in ready state time.sleep(self.services["sleep"]) vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual( isinstance(vm_response, list), True, "Check list VM response for valid list" ) self.assertNotEqual( len(vm_response), 0, "Check VMs available in List VMs response" ) vm = vm_response[0] self.assertEqual( vm.state, 'Running', "Check the state of VM" ) except Exception as e: self.fail("Exception occuered: %s" % e) return
def setUpClass(cls): testClient = super(TestVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.disk_offering = DiskOffering.create( cls.apiclient, cls.services["disk_offering"] ) cls.resized_disk_offering = DiskOffering.create( cls.apiclient, cls.services["resized_disk_offering"] ) cls.custom_resized_disk_offering = DiskOffering.create( cls.apiclient, cls.services["resized_disk_offering"], custom=True ) template = get_template( cls.apiclient, cls.zone.id, cls.services["ostype"] ) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["zoneid"] = cls.zone.id cls.services["template"] = template.id cls.services["diskofferingid"] = cls.disk_offering.id cls.services['resizeddiskofferingid'] = cls.resized_disk_offering.id cls.services['customresizeddiskofferingid'] = cls.custom_resized_disk_offering.id # Create VMs, VMs etc cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"] ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"] ) cls.volume = Volume.create( cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid ) cls._cleanup = [ cls.resized_disk_offering, cls.custom_resized_disk_offering, cls.service_offering, cls.disk_offering, cls.volume, cls.account ]
def test_01_create_volume(self): """Test Volume creation for all Disk Offerings (incl. custom) """ # Validate the following # 1. Create volumes from the different sizes # 2. Verify the size of volume with actual size allocated self.volumes = [] for k, v in self.services["volume_offerings"].items(): volume = Volume.create( self.apiClient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Created a volume with ID: %s" % volume.id) self.volumes.append(volume) if self.virtual_machine.hypervisor == "KVM": sparse_volume = Volume.create( self.apiClient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.sparse_disk_offering.id ) self.debug("Created a sparse volume: %s" % sparse_volume.id) self.volumes.append(sparse_volume) volume = Volume.create_custom_disk( self.apiClient, self.services, account=self.account.name, domainid=self.account.domainid, ) self.debug("Created a volume with custom offering: %s" % volume.id) self.volumes.append(volume) #Attach a volume with different disk offerings #and check the memory allocated to each of them for volume in self.volumes: list_volume_response = Volume.list( self.apiClient, id=volume.id) self.assertEqual( isinstance(list_volume_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume( self.apiClient, volume ) try: ssh = self.virtual_machine.get_ssh_client() self.debug("Rebooting VM %s" % self.virtual_machine.id) ssh.execute("reboot") except Exception as e: self.fail("SSH access failed for VM %s - %s" % (self.virtual_machine.ipaddress, e)) # Poll listVM to ensure VM is started properly timeout = self.services["timeout"] while True: time.sleep(self.services["sleep"]) # Ensure that VM is in running state list_vm_response = VirtualMachine.list( self.apiClient, id=self.virtual_machine.id ) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Running': self.debug("VM state: %s" % vm.state) break if timeout == 0: raise Exception( "Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 vol_sz = str(list_volume_response[0].size) ssh = self.virtual_machine.get_ssh_client( reconnect=True ) # Get the updated volume information list_volume_response = Volume.list( self.apiClient, id=volume.id) if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower(): volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer volume_name: %s" % (volume_name)) ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz) else: ret = checkVolumeSize(ssh_handle=ssh,size_to_verify=vol_sz) self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1])) self.virtual_machine.detach_volume(self.apiClient, volume) self.assertEqual(ret[0],SUCCESS,"Check if promised disk size actually available") time.sleep(self.services["sleep"])
def test_06_attachvolume_to_a_stopped_vm(self): """ Test Attach Volume To A Stopped VM """ list_vm_response = VirtualMachine.list( self.user_api_client, id=self.virtual_machine.id ) self.assertEqual( list_vm_response[0].state, 'Stopped', msg="Check if VM is in Stopped state" ) custom_disk_offering=DiskOffering.list( self.user_api_client, name="custom" ) self.__class__.volume = Volume.create( self.user_api_client, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=custom_disk_offering[0].id, size=1 ) # Check List Volume response for newly created volume list_volume_response = Volume.list( self.user_api_client, id=self.volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) # Attach volume to VM cmd = attachVolume.attachVolumeCmd() cmd.id = self.volume.id cmd.virtualmachineid = self.virtual_machine.id cmd.deviceid=1 vol1=self.user_api_client.attachVolume(cmd) # Check all volumes attached to same VM list_volume_response = Volume.list( self.user_api_client, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list") self.assertEqual( list_volume_response[0].deviceid, 1, "Check listed volume device id is 1") return
def test_15_restore_vm_with_template_id(self): """ Test restoring Virtual Machine with template id """ noffering=NetworkOffering.list( self.user_api_client, name="DefaultIsolatedNetworkOfferingWithSourceNatService" ) vm1network=Network.create( self.user_api_client, self.services["network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=noffering[0].id, zoneid=self.zone.id ) list_nw_response = Network.list( self.user_api_client, id=vm1network.id ) self.assertEqual( isinstance(list_nw_response, list), True, "Check list response returns a valid networks list" ) restorevm = VirtualMachine.create( self.user_api_client, self.services["small"], networkids=vm1network.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services['mode'], startvm="true" ) time.sleep(600) list_vm_response = VirtualMachine.list( self.user_api_client, id=restorevm.id ) self.assertEqual( list_vm_response[0].state, "Running", "Check virtual machine is in running state" ) custom_disk_offering=DiskOffering.list( self.user_api_client, name="custom" ) newvolume = Volume.create( self.user_api_client, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.domain.id, diskofferingid=custom_disk_offering[0].id, size=1 ) # Attach volume to VM cmd = attachVolume.attachVolumeCmd() cmd.id = newvolume.id cmd.virtualmachineid = restorevm.id cmd.deviceid=1 vol1=self.user_api_client.attachVolume(cmd) cmd = restoreVirtualMachine.restoreVirtualMachineCmd() cmd.virtualmachineid = restorevm.id cmd.templateid = self.xtemplate.id self.user_api_client.restoreVirtualMachine(cmd) time.sleep(600) list_vm_response = VirtualMachine.list( self.user_api_client, id=restorevm.id ) self.assertEqual( isinstance(list_vm_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_vm_response), 0, "Check VM available in List Virtual Machines" ) self.assertEqual( list_vm_response[0].state, "Running", "Check virtual machine is in Stopped state" ) restorevm.delete(self.apiclient) vm1network.delete(self.user_api_client) return
def test_create_multiple_volumes(self, value): """Test create multiple volumes # Validate the following # 1. Create a VM with custom disk offering and check the primary storage count # of account # 2. Create multiple volumes in account # 3. Verify that primary storage count increases by same amount # 4. Attach volumes to VM and verify resource count remains the same # 5. Detach and delete both volumes one by one and verify resource count decreases # proportionately""" # Creating service offering with 10 GB volume response = self.setupAccount(value) self.assertEqual(response[0], PASS, response[1]) apiclient = self.apiclient if value == CHILD_DOMAIN_ADMIN: apiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain ) self.assertNotEqual(apiclient, FAIL, "Failure while getting\ api client of account %s" % self.account.name) try: self.services["disk_offering"]["disksize"] = 5 disk_offering_5_GB = DiskOffering.create(self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_5_GB) self.services["disk_offering"]["disksize"] = 10 disk_offering_10_GB = DiskOffering.create(self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_10_GB) volume_1 = Volume.create( apiclient,self.services["volume"],zoneid=self.zone.id, account=self.account.name,domainid=self.account.domainid, diskofferingid=disk_offering_5_GB.id) volume_2 = Volume.create( apiclient,self.services["volume"],zoneid=self.zone.id, account=self.account.name,domainid=self.account.domainid, diskofferingid=disk_offering_10_GB.id) self.debug("Attaching volume %s to vm %s" % (volume_1.name, self.virtualMachine.name)) self.virtualMachine.attach_volume(apiclient, volume=volume_1) self.debug("Attaching volume %s to vm %s" % (volume_2.name, self.virtualMachine.name)) self.virtualMachine.attach_volume(apiclient, volume=volume_2) except Exception as e: self.fail("Failure: %s" % e) expectedCount = self.initialResourceCount + 15 # (5 + 10) response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: # Detaching and deleting volume 1 self.virtualMachine.detach_volume(apiclient, volume=volume_1) volume_1.delete(apiclient) except Exception as e: self.fail("Failure while volume operation: %s" % e) expectedCount -= 5 #After deleting first volume response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: # Detaching and deleting volume 2 self.virtualMachine.detach_volume(apiclient, volume=volume_2) volume_2.delete(apiclient) except Exception as e: self.fail("Failure while volume operation: %s" % e) expectedCount -= 10 response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) return
def test_01_storage_snapshots_limits(self): """ Storage and Snapshot Limit 1. Create Snapshot of ROOT disk. 2. Verify the Secondary Storage value is increased by the size of snapshot. 3. Delete Snaphshot. 4. Verify the Secondary Storage value is decreased by the size of snapshot. 5. Set the Snapshot limit of Account. 6. Create Snasphots till limit is reached. 7. Create Snapshot of ROOT Volume. Creation should fail. 8. Delete few Snapshots. 9. Create Snapshot again. Creation should succeed. """ # Get ROOT Volume root_volumes_list = Volume.list( self.userapiclient, virtualmachineid=self.vm.id, type='ROOT' ) status = validateList(root_volumes_list) self.assertEqual(status[0], PASS, "ROOT Volume List Validation Failed") root_volume = root_volumes_list[0] self.data_volume_created = Volume.create( self.userapiclient, self.testdata["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.cleanup.append(self.data_volume_created) data_volumes_list = Volume.list( self.userapiclient, id=self.data_volume_created.id ) status = validateList(data_volumes_list) self.assertEqual(status[0], PASS, "DATA Volume List Validation Failed") self.data_volume = data_volumes_list[0] self.vm.attach_volume( self.userapiclient, self.data_volume ) # Get Secondary Storage Value from Database qryresult_before_snapshot = self.dbclient.execute( " select id, account_name, secondaryStorageTotal\ from account_view where account_name = '%s';" % self.account.name) status = validateList(qryresult_before_snapshot) self.assertEqual( status[0], PASS, "Check sql query to return SecondaryStorageTotal of account") secStorageBeforeSnapshot = qryresult_before_snapshot[0][2] # Step 1 snapshot = Snapshot.create( self.userapiclient, root_volume.id) snapshots_list = Snapshot.list(self.userapiclient, id=snapshot.id) status = validateList(snapshots_list) self.assertEqual(status[0], PASS, "Snapshots List Validation Failed") # Verify Snapshot state self.assertEqual( snapshots_list[0].state.lower() in [ BACKED_UP, ], True, "Snapshot state is not as expected. It is %s" % snapshots_list[0].state ) # Step 2 qryresult_after_snapshot = self.dbclient.execute( " select id, account_name, secondaryStorageTotal\ from account_view where account_name = '%s';" % self.account.name) status = validateList(qryresult_after_snapshot) self.assertEqual( status[0], PASS, "Check sql query to return SecondaryStorageTotal of account") secStorageAfterSnapshotCreated = qryresult_after_snapshot[0][2] snapshot_size = snapshots_list[0].physicalsize secStorageIncreased = secStorageBeforeSnapshot + \ snapshot_size self.assertEqual( secStorageIncreased, secStorageAfterSnapshotCreated, "Secondary storage Total after Snapshot\ should be incremented by size of snapshot.") # Step 3 snapshot.delete(self.apiclient) snapshots_list = Snapshot.list(self.userapiclient, id=snapshot.id) status = validateList(snapshots_list) self.assertEqual(status[0], FAIL, "Snapshots Not Deleted.") # Step 4 qryresult_after_snapshot_deleted = self.dbclient.execute( " select id, account_name, secondaryStorageTotal\ from account_view where account_name = '%s';" % self.account.name) status = validateList(qryresult_after_snapshot_deleted) self.assertEqual( status[0], PASS, "Check sql query to return SecondaryStorageTotal of account") secStorageAfterSnapshotDeleted = qryresult_after_snapshot_deleted[0][2] secStorageDecreased = secStorageAfterSnapshotCreated - \ snapshot_size self.assertEqual( secStorageDecreased, secStorageAfterSnapshotDeleted, "Secondary storage Total after Snapshot\ should be incremented by size of snapshot.") # Step 5 # Set Snapshot Limit for account Resources.updateLimit(self.apiclient, resourcetype=3, max=1, account=self.account.name, domainid=self.account.domainid) # Step 6 snapshot = Snapshot.create( self.userapiclient, root_volume.id) snapshots_list = Snapshot.list(self.userapiclient, id=snapshot.id) status = validateList(snapshots_list) self.assertEqual(status[0], PASS, "Snapshots List Validation Failed") # Verify Snapshot state self.assertEqual( snapshots_list[0].state.lower() in [ BACKED_UP, ], True, "Snapshot state is not as expected. It is %s" % snapshots_list[0].state ) # Step 7 with self.assertRaises(Exception): Snapshot.create( self.userapiclient, self.data_volume.id) # Step 8 snapshot.delete(self.userapiclient) snapshots_list = Snapshot.list(self.userapiclient, id=snapshot.id) status = validateList(snapshots_list) self.assertEqual(status[0], FAIL, "Snapshots Not Deleted.") # Step 9 snapshot = Snapshot.create( self.userapiclient, root_volume.id) snapshots_list = Snapshot.list(self.userapiclient, id=snapshot.id) status = validateList(snapshots_list) self.assertEqual(status[0], PASS, "Snapshots List Validation Failed") # Verify Snapshot state self.assertEqual( snapshots_list[0].state.lower() in [ BACKED_UP, ], True, "Snapshot state is not as expected. It is %s" % snapshots_list[0].state ) return