def test_11_delete_detached_volume(self): """ Delete a Volume unattached to an VM """ list_volume_response1 = Volume.list( self.user_api_client, id=self.volume.id ) if list_volume_response1[0].virtualmachineid is not None: self.skipTest("Check if volume is detached before deleting") cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume.id self.user_api_client.deleteVolume(cmd) # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = Volume.list( self.user_api_client, id=self.volume.id, ) self.assertEqual( list_volume_response, None, "Volume %s was not deleted" % self.volume.id ) return
def test_01_migrateVolume(self): """ @Desc:Volume is not retaining same uuid when migrating from one storage to another. Step1:Create a volume/data disk Step2:Verify UUID of the volume Step3:Migrate the volume to another primary storage within the cluster Step4:Migrating volume to new primary storage should succeed Step5:volume UUID should not change even after migration """ vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list(self.apiclient, id=vol.id) self.assertEqual(validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id try: self.virtual_machine.attach_volume(self.apiclient, vol) except Exception as e: self.fail("Attaching data disk to vm failed with error %s" % e) pools = StoragePool.listForMigration(self.apiclient, id=vol.id) if not pools: self.skipTest( "No suitable storage pools found for volume migration.\ Skipping" ) self.assertEqual(validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration") pool = pools[0] self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id)) try: Volume.migrate(self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate="true") except Exception as e: self.fail("Volume migration failed with error %s" % e) migrated_vols = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, listall="true", type="DATADISK" ) self.assertEqual(validateList(migrated_vols)[0], PASS, "invalid volumes response after migration") migrated_vol_uuid = migrated_vols[0].id self.assertEqual( vol_uuid, migrated_vol_uuid, "Volume is not retaining same uuid when migrating from one\ storage to another", ) self.virtual_machine.detach_volume(self.apiclient, vol) self.cleanup.append(vol) return
def setUpClass(cls): cls.testClient = super(TestTemplates, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.domain = get_domain(cls.api_client) cls.services['mode'] = cls.zone.networktype template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id ) cls.services["account"] = cls.account.name cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"], ) # create virtual machine cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) #Stop virtual machine cls.virtual_machine.stop(cls.api_client) #Wait before server has be successfully stopped time.sleep(30) list_volume = Volume.list( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) try: if isinstance(list_volume, list): cls.volume = list_volume[0] except Exception as e: raise Exception("Warning: Exception during setup : %s" % e) cls._cleanup = [ cls.service_offering, cls.account, ]
def test_attach_volume_exceeding_primary_limits(self): """ # do # 1. create a normal user account and update primary store limits to the current resource count # 2. Upload a volume of any size # 3. Verify that upload volume succeeds # 4. Verify that primary storage count doesnt change # 6. Try attaching volume to VM and verify that the attach fails (as the resource limits exceed) # 7. Verify that primary storage count doesnt change # done """ # create an account, launch a vm with default template and custom disk offering, update the primary store limits to the current primary store resource count response = self.setupNormalAccount() self.assertEqual(response[0], PASS, response[1]) # upload volume and verify that the volume is uploaded volume = Volume.upload( self.apiclient, self.services["configurableData"]["upload_volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, url="http://people.apache.org/~sanjeev/rajani-thin-volume.vhd", ) volume.wait_for_upload(self.apiclient) volumes = Volume.list(self.apiclient, id=volume.id, zoneid=self.zone.id, listall=True) validationresult = validateList(volumes) assert validationresult[0] == PASS, "volumes list validation failed: %s" % validationresult[2] assert str(volumes[0].state).lower() == "uploaded", ( "Volume state should be 'uploaded' but it is %s" % volumes[0].state ) # verify that the resource count didnt change due to upload volume response = matchResourceCount( self.apiclient, self.initialResourceCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id ) self.assertEqual(response[0], PASS, response[1]) # attach the above volume to the vm try: self.virtualMachine.attach_volume(self.apiclient, volume=volume) except Exception as e: if ( "Maximum number of resources of type 'primary_storage' for account name=" + self.account.name in e.message ): self.assertTrue(True, "there should be primary store resource limit reached exception") else: self.fail( "only resource limit reached exception is expected. some other exception occurred. Failing the test case." ) # resource count should match as the attach should fail due to reaching resource limits response = matchResourceCount( self.apiclient, self.initialResourceCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id ) self.assertEqual(response[0], PASS, response[1]) return
def test_09_delete_detached_volume(self): """Delete a Volume unattached to an VM """ # Validate the following # 1. volume should be deleted successfully and listVolume should not # contain the deleted volume details. # 2. "Delete Volume" menu item not shown under "Actions" menu. # (UI should not allow to delete the volume when it is attached # to instance by hiding the menu Item) self.debug("Delete Volume ID: %s" % self.volume.id) self.volume_1 = Volume.create( self.apiclient, self.services, account=self.account.name, domainid=self.account.domainid ) self.virtual_machine.attach_volume(self.apiClient, self.volume_1) self.virtual_machine.detach_volume(self.apiClient, self.volume_1) cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume_1.id self.apiClient.deleteVolume(cmd) list_volume_response = Volume.list(self.apiClient, id=self.volume_1.id, type="DATADISK") self.assertEqual(list_volume_response, None, "Check if volume exists in ListVolumes") return
def test_attach_multiple_volumes(self): """Attach multiple Volumes simultaneously to a Running VM """ # Validate the following # 1. All data disks attached successfully without any exception self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume1.id, self.virtual_machine.id )) vol1_jobId = self.attach_volume(self.apiClient, self.virtual_machine.id,self.volume1) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume2.id, self.virtual_machine.id )) vol2_jobId = self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume2) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume3.id, self.virtual_machine.id )) vol3_jobId = self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume3) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume4.id, self.virtual_machine.id )) vol4_jobId = self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume4) self.query_async_job(self.apiClient,vol1_jobId.jobid) self.query_async_job(self.apiClient,vol2_jobId.jobid) self.query_async_job(self.apiClient,vol3_jobId.jobid) self.query_async_job(self.apiClient,vol4_jobId.jobid) # List all the volumes attached to the instance. Includes even the Root disk. list_volume_response = Volume.list( self.apiClient, virtualmachineid=self.virtual_machine.id, type="DATADISK", account=self.account.name, domainid=self.account.domainid ) self.assertEqual( validateList(list_volume_response)[0], PASS, "Check list response returns a valid list" ) self.assertEqual( len(list_volume_response), 4, "All 4 data disks are not attached to VM Successfully" ) return
def tearDown(self): try: for storagePool in self.pools: StoragePool.update(self.apiclient, id=storagePool.id, tags="") if hasattr(self, "data_volume_created"): data_volumes_list = Volume.list( self.userapiclient, id=self.data_volume_created.id, virtualmachineid=self.vm.id ) if data_volumes_list: self.vm.detach_volume( self.userapiclient, data_volumes_list[0] ) status = validateList(data_volumes_list) self.assertEqual( status[0], PASS, "DATA Volume List Validation Failed") cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def test_02_volume_attach_max(self): """Test attach volumes (more than max) to an instance """ # Validate the following # 1. Attach one more data volume to VM (Already 5 attached) # 2. Attach volume should fail # Create a volume and attach to VM volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume: %s for account: %s" % (volume.id, self.account.name)) # Check List Volume response for newly created volume list_volume_response = Volume.list(self.apiclient, id=volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM with self.assertRaises(Exception): self.debug("Trying to Attach volume: %s to VM: %s" % (volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiclient, volume) return
def test_03_delete_detached_volume(self): """Delete a Volume unattached to an VM """ # Validate the following # 1. volume should be deleted successfully and listVolume should not # contain the deleted volume details. self.debug("Deleting volume: %s" % self.volume.id) cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume.id self.apiclient.deleteVolume(cmd) # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = Volume.list( self.apiclient, id=self.volume.id, ) self.assertEqual( list_volume_response, None, "Volume %s was not deleted" % self.volume.id ) return
def test_03_delete_vm(self): """Test delete VM belonging to project # Validate the following # 1. Create VM with custom disk offering in a project and check # initial primary storage count # 2. Delete VM and verify that it's expunged # 3. Verify that primary storage count of project equals 0""" try: self.vm.delete(self.apiclient) except Exception as e: self.fail("Failed to detroy VM: %s" % e) self.assertTrue(isVmExpunged(self.apiclient, self.vm.id, self.project.id),\ "VM not expunged") totalallottedtime = timeout = 600 while timeout >= 0: volumes = Volume.list(self.apiclient, projectid=self.project.id, listall=True) if volumes is None: break if timeout == 0: self.fail("Volume attached to VM not cleaned up even\ after %s seconds" % totalallottedtime) timeout -= 60 time.sleep(60) expectedCount = 0 response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, projectid=self.project.id) self.assertEqual(response[0], PASS, response[1]) return
def setUpClass(cls): cls.testClient = super(TestTemplates, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.hypervisor = cls.testClient.getHypervisorInfo() cls.services = Services().services # Get Zone, Domain and templates cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.domain = get_domain(cls.api_client) cls.services['mode'] = cls.zone.networktype template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) if cls.hypervisor.lower() in ['lxc']: raise unittest.SkipTest("Template creation from root volume is not supported in LXC") cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls._cleanup = [] try: cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id ) cls._cleanup.append(cls.account) cls.services["account"] = cls.account.name cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"], ) cls._cleanup.append(cls.service_offering) # create virtual machine cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) #Stop virtual machine cls.virtual_machine.stop(cls.api_client) listvolumes = Volume.list( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) assert validateList(listvolumes)[0] == PASS, "volumes list is empty" cls.volume = listvolumes[0] except Exception as e: cls.tearDownClass() raise unittest.SkipTest("Exception in setUpClass: %s" % e)
def verify_detach_volume(self, vmid, volid): list_volumes = Volume.list(self.userapiclient, id=volid ) self.assertEqual(validateList(list_volumes)[0], PASS, "Check List volume response for volume %s" % volid) self.assertEqual(len(list_volumes), 1, "Detach data disk id: %s for vm id :%s was not successful" % (volid, vmid)) self.assertEqual(list_volumes[0].virtualmachineid, None, "Check if volume state (attached) is reflected") self.debug("volume id: %s successfully detached from vm id:%s" % (volid, vmid))
def test_10_detach_volume(self): """ Test Detach Volume """ list_volume_response1 = Volume.list( self.user_api_client, id=self.volume.id ) if list_volume_response1[0].virtualmachineid is None: self.skipTest("Check if volume is attached to the VM before detach") self.virtual_machine.detach_volume(self.user_api_client, self.volume) # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = Volume.list( self.user_api_client, id=self.volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) volume1 = list_volume_response[0] self.assertEqual( volume1.virtualmachineid, None, "Check if volume state (detached) is reflected" ) self.assertEqual( volume1.vmname, None, "Check if volume state (detached) is reflected" ) return
def verify_attach_volume(self, vmid, volid): list_volumes = Volume.list(self.userapiclient, id=volid ) self.assertEqual(validateList(list_volumes)[0], PASS, "Check List volume response for volume %s" % volid) self.assertEqual(len(list_volumes), 1, "There is no data disk attached to vm id:%s" % vmid) self.assertEqual(list_volumes[0].virtualmachineid, vmid, "Check if volume state (attached) is reflected") self.debug("volume id:%s successfully attached to vm id%s" % (volid, vmid)) return
def test_01_test_vm_volume_snapshot(self): """ @Desc: Test that Volume snapshot for root volume is allowed when VM snapshot is present for the VM @Steps: 1: Deploy a VM and create a VM snapshot for VM 2: Try to create snapshot for the root volume of the VM, It should not fail """ # Creating Virtual Machine virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) VmSnapshot.create( self.apiclient, virtual_machine.id, ) volumes = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id, type="ROOT", listall=True) self.assertEqual(validateList(volumes)[0], PASS, "Failed to get root volume of the VM") snapshot = Snapshot.create( self.apiclient, volumes[0].id, account=self.account.name, domainid=self.account.domainid ) self.debug("Snapshot created: ID - %s" % snapshot.id) snapshots = list_snapshots( self.apiclient, id=snapshot.id ) self.assertEqual( validateList(snapshots)[0], PASS, "Invalid snapshot list" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check resource id in list resources call" ) return
def checkVolumeResponse(): list_volume_response = Volume.list( self.apiClient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) if isinstance(list_volume_response, list) and list_volume_response[0].virtualsize is not None: return True, list_volume_response[0] return False, None
def test_04_concurrent_snapshots_create_volume(self): """Test while parent concurrent snapshot job in progress,create volume from completed snapshot 1.Configure the concurrent.snapshots.threshold.perhost=3 2.Deploy a Linux VM using default CentOS template, use small service offering, disk offering. 3.Perform snapshot on root disk of this newly created VM 4.while parent concurrent snapshot job in progress,create volume from completed snapshot""" # Validate the following # a.Able to create Volume from snapshots # b.check all snapshots jobs are running concurrently on back grounds # c.listSnapshots should list this newly created snapshot. self.debug("Create virtual machine and snapshot on ROOT disk thread") self.create_Snapshot_VM() self.debug("Verify whether snapshots were created properly or not?") self.verify_Snapshots() self.debug("Fetch the list of snapshots belong to account: %s" % self.account.name) snapshots = self.get_Snapshots_For_Account( self.account.name, self.account.domainid) jobs = [] for snapshot in snapshots: self.debug("Create a volume from snapshot: %s" % snapshot.name) jobs.append(self.create_Volume_from_Snapshot(snapshot)) # Verify IO usage by submitting the concurrent jobs self.testClient.submitCmdsAndWait(jobs) self.debug("Verifying if volume created properly or not?") volumes = Volume.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True, type='ROOT') self.assertNotEqual(volumes, None, "Check if result exists in list item call") for volume in volumes: self.debug("Volume: %s, state: %s" % (volume.name, volume.state)) self.assertEqual(volume.state, "Ready", "Check new volume state in list volumes call") self.debug("Test completed successfully.") return
def test_04_template_from_snapshot(self): """Create Template from snapshot """ # Validate the following # 2. Snapshot the Root disk # 3. Create Template from snapshot # 4. Deploy Virtual machine using this template # 5. VM should be in running state userapiclient = self.testClient.getUserApiClient(UserName=self.account.name, DomainName=self.account.domain) volumes = Volume.list(userapiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True) volume = volumes[0] self.debug("Creating a snapshot from volume: %s" % volume.id) # Create a snapshot of volume snapshot = Snapshot.create(userapiclient, volume.id, account=self.account.name, domainid=self.account.domainid) self.debug("Creating a template from snapshot: %s" % snapshot.id) # Generate template from the snapshot template = Template.create_from_snapshot(userapiclient, snapshot, self.services["template"]) self.cleanup.append(template) # Verify created template templates = Template.list( userapiclient, templatefilter=self.services["template"]["templatefilter"], id=template.id ) self.assertNotEqual(templates, None, "Check if result exists in list item call") self.assertEqual(templates[0].id, template.id, "Check new template id in list resources call") self.debug("Deploying a VM from template: %s" % template.id) # Deploy new virtual machine using template virtual_machine = VirtualMachine.create( userapiclient, self.services["virtual_machine"], templateid=template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(virtual_machine) vm_response = VirtualMachine.list( userapiclient, id=virtual_machine.id, account=self.account.name, domainid=self.account.domainid ) self.assertEqual(isinstance(vm_response, list), True, "Check for list VM response return valid list") # Verify VM response to check whether VM deployment was successful self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, "Running", "Check the state of VM created from Template") return
def test_01_attach_volume(self): """Attach a created Volume to a Running VM """ # Validate the following # 1. Create a data volume. # 2. List Volumes should not have vmname and virtualmachineid fields in # response before volume attach (to VM) # 3. Attch volume to VM. Attach volume should be successful. # 4. List Volumes should have vmname and virtualmachineid fields in # response before volume attach (to VM) # Check the list volumes response for vmname and virtualmachineid list_volume_response = Volume.list(self.apiclient, id=self.volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") volume = list_volume_response[0] self.assertEqual(volume.type, "DATADISK", "Check volume type from list volume response") self.assertEqual(hasattr(volume, "vmname"), True, "Check whether volume has vmname field") self.assertEqual(hasattr(volume, "virtualmachineid"), True, "Check whether volume has virtualmachineid field") # Attach volume to VM self.debug("Attach volume: %s to VM: %s" % (self.volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiclient, self.volume) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type="DATADISK", listall=True ) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") volume = list_volume_response[0] self.assertEqual( volume.vmname, self.virtual_machine.name, "Check virtual machine name in list volumes response" ) self.assertEqual(volume.virtualmachineid, self.virtual_machine.id, "Check VM ID in list Volume response") return
def test_06_deploy_startvm_attach_detach(self): """Test Deploy Virtual Machine with startVM=false and attach detach volumes """ # Validate the following: # 1. deploy Vm with the startvm=false. Attach volume to the instance # 2. listVM command should return the deployed VM.State of this VM # should be "Stopped". # 3. Attach volume should be successful # 4. Detach volume from instance. Detach should be successful self.debug("Deploying instance in the account: %s" % self.account.name) self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id, ) response = self.virtual_machine.getState(self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) self.debug("Creating a volume in account: %s" % self.account.name) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume in account: %s" % self.account.name) self.debug("Attaching volume to instance: %s" % self.virtual_machine.name) try: self.virtual_machine.attach_volume(self.apiclient, volume) except Exception as e: self.fail("Attach volume failed with Exception: %s" % e) self.debug("Detaching the disk: %s" % volume.name) self.virtual_machine.detach_volume(self.apiclient, volume) self.debug("Datadisk %s detached!" % volume.name) volumes = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type="DATADISK", id=volume.id, listall=True ) self.assertEqual(volumes, None, "List Volumes should not list any volume for instance") return
def createSnapshotFromVirtualMachineVolume(apiclient, account, vmid): """Create snapshot from volume""" try: volumes = Volume.list(apiclient, account=account.name, domainid=account.domainid, virtualmachineid=vmid) validationresult = validateList(volumes) assert validateList(volumes)[0] == PASS, "List volumes should return a valid response" snapshot = Snapshot.create(apiclient, volume_id=volumes[0].id, account=account.name, domainid=account.domainid) snapshots = Snapshot.list(apiclient, id=snapshot.id, listall=True) validationresult = validateList(snapshots) assert validationresult[0] == PASS, "List snapshot should return a valid list" except Exception as e: return [FAIL, e] return [PASS, snapshot]
def test_create_volume_under_domain(self): """Create a volume under a non-root domain as non-root-domain user 1. Create a domain under ROOT 2. Create a user within this domain 3. As user in step 2. create a volume with standard disk offering 4. Ensure the volume is created in the domain and available to the user in his listVolumes call """ dom = Domain.create(self.apiclient, services={}, name="NROOT", parentdomainid=self.domain.id) self.cleanup.append(dom) self.assertTrue(dom is not None, msg="Domain creation failed") domuser = Account.create( apiclient=self.apiclient, services=self.services["account"], admin=False, domainid=dom.id ) self.cleanup.insert(-2, domuser) self.assertTrue(domuser is not None) domapiclient = self.testClient.getUserApiClient(UserName=domuser.name, DomainName=dom.name) diskoffering = DiskOffering.list(self.apiclient) self.assertTrue(isinstance(diskoffering, list), msg="DiskOffering list is not a list?") self.assertTrue(len(diskoffering) > 0, "no disk offerings in the deployment") vol = Volume.create( domapiclient, services=self.services["volume"], zoneid=self.zone.id, account=domuser.name, domainid=dom.id, diskofferingid=diskoffering[0].id, ) self.assertTrue(vol is not None, "volume creation fails in domain %s as user %s" % (dom.name, domuser.name)) listed_vol = Volume.list(domapiclient, id=vol.id) self.assertTrue( listed_vol is not None and isinstance(listed_vol, list), "invalid response from listVolumes for volume %s" % vol.id, ) self.assertTrue( listed_vol[0].id == vol.id, "Volume returned by list volumes %s not matching with queried\ volume %s in domain %s" % (listed_vol[0].id, vol.id, dom.name), )
def create_template(self, vm): self.debug("Creating guest VM template") list_volume = Volume.list(self.api_client, virtualmachineid=vm.id, type="ROOT", listall=True) if isinstance(list_volume, list): self.volume = list_volume[0] else: raise Exception("Exception: Unable to find root volume for VM with ID - %s" % vm.id) self.pw_enabled_template = Template.create( self.api_client, self.test_data["template"], self.volume.id, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual(self.pw_enabled_template.passwordenabled, True, "template is not passwordenabled") self.cleanup.append(self.pw_enabled_template) self.debug("Created guest VM template")
def test_08_resize_volume(self): """Test resize a volume""" # Verify the size is the new size is what we wanted it to be. self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() == "vmware": self.skipTest("Resize Volume is unsupported on VmWare") # resize the data disk self.debug("Resize Volume ID: %s" % self.volume.id) cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = self.services['resizeddiskofferingid'] self.apiClient.resizeVolume(cmd) count = 0 success = False while count < 3: list_volume_response = Volume.list( self.apiClient, id=self.volume.id, type='DATADISK' ) for vol in list_volume_response: if vol.id == self.volume.id and vol.size == 3221225472L and vol.state == 'Ready': success = True if success: break else: time.sleep(10) count += 1
def test_02_detach_volume(self): """Detach a Volume attached to a VM """ # Validate the following # 1. Data disk should be detached from instance # 2. Listvolumes should not have vmname and virtualmachineid fields for # that volume. self.debug("Detach volume: %s to VM: %s" % ( self.volume.id, self.virtual_machine.id )) self.virtual_machine.detach_volume(self.apiclient, self.volume) # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = Volume.list( self.apiclient, id=self.volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) volume = list_volume_response[0] self.assertEqual( volume.virtualmachineid, None, "Check if volume state (detached) is reflected" ) self.assertEqual( volume.vmname, None, "Check if volume state (detached) is reflected" ) return
def test_02_attach_volume(self): """Attach a created Volume to a Running VM """ # Validate the following # 1. shows list of volumes # 2. "Attach Disk" pop-up box will display with list of instances # 3. disk should be attached to instance successfully self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True list_volume_response = Volume.list( self.apiClient, id=self.volume.id ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) volume = list_volume_response[0] self.assertNotEqual( volume.virtualmachineid, None, "Check if volume state (attached) is reflected" ) try: #Format the attached volume to a known fs format_volume_to_ext3(self.virtual_machine.get_ssh_client()) except Exception as e: self.fail("SSH failed for VM: %s - %s" % (self.virtual_machine.ipaddress, e)) return
def test_01_list_volumes(self): """Test listing Volumes using 'ids' parameter """ list_volume_response = Volume.list( self.apiclient, ids=[self.vm1_root_volume.id, self.vm2_root_volume.id, self.vm3_root_volume.id], type='ROOT', listAll=True ) self.assertEqual( isinstance(list_volume_response, list), True, "List Volume response was not a valid list" ) self.assertEqual( len(list_volume_response), 3, "ListVolumes response expected 3 Volumes, received %s" % len(list_volume_response) )
def uploadVolume(apiclient, zoneid, account, services): try: # Upload the volume volume = Volume.upload(apiclient, services["volume"], zoneid=zoneid, account=account.name, domainid=account.domainid, url=services["url"]) volume.wait_for_upload(apiclient) # Check List Volume response for newly created volume volumes = Volume.list(apiclient, id=volume.id, zoneid=zoneid, listall=True) validationresult = validateList(volumes) assert validationresult[0] == PASS,\ "volumes list validation failed: %s" % validationresult[2] assert str(volumes[0].state).lower() == "uploaded",\ "Volume state should be 'uploaded' but it is %s" % volumes[0].state except Exception as e: return [FAIL, e] return [PASS, volume]
def test_05_detach_volume(self): """Detach a Volume attached to a VM """ # Validate the following # Data disk should be detached from instance and detached data disk # details should be updated properly self.debug("Detaching volume (ID: %s) from VM (ID: %s)" % (self.volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.virtual_machine.detach_volume(self.apiClient, self.volume) self.attached = False # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = Volume.list(self.apiClient, id=self.volume.id) self.assertEqual(isinstance(list_volume_response, list), True, "Check list response returns a valid list") self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") volume = list_volume_response[0] self.assertEqual(volume.virtualmachineid, None, "Check if volume state (detached) is reflected") return
def create_template(self, vm): self.debug("CREATE TEMPLATE") list_volume = Volume.list(self.apiclient, virtualmachineid=vm.id, type='ROOT', listall=True) if isinstance(list_volume, list): self.volume = list_volume[0] else: raise Exception("Exception: Unable to find root volume for VM: %s" % vm.id) self.test_data["template_pr"]["ostype"] = self.test_data["ostype_pr"] self.pw_enabled_template = Template.create( self.apiclient, self.test_data["template_pr"], self.volume.id, account=self.account.name, domainid=self.account.domainid ) self.assertEqual(self.pw_enabled_template.passwordenabled, True, "template is not passwordenabled") self.cleanup.append(self.pw_enabled_template)
def test_04_vmreset_after_migrate_vm__rootvolume_resized(self): """Test migrate vm after root volume resize # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Perform(resize) of the root volume # 3. migrate vm from host to another # 4. perform vm reset after vm migration """ try: if self.updateclone: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual macine list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" \ % self.virtual_machine.id ) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.cleanup.append(self.virtual_machine) vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") # get root vol from created vm, verify it is correct size list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") rootvolume = list_volume_response[0] result = self.chk_volume_resize(self.apiclient, vm) if result: try: list_host_response = list_hosts( self.apiclient, id=self.virtual_machine.hostid) res = validateList(list_host_response) self.assertNotEqual( res[2], INVALID_INPUT, "listHosts returned invalid object in response") sourcehost = list_host_response[0] try: self.list_hosts_suitable = Host.listForMigration \ (self.apiclient, virtualmachineid=self.virtual_machine.id ) except Exception as e: self.debug("Not found suitable host") raise Exception("Exception while getting hosts" " list suitable for migration: %s" % e) self.virtualmachine_migrate_response = \ self.virtual_machine.migrate( self.apiclient, self.list_hosts_suitable[0].id) list_vms = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, hostid=self.list_hosts_suitable[0].id) res = validateList(list_vms) self.assertNotEqual( res[2], INVALID_INPUT, "listVirtualMachines returned " "invalid object in response") self.virtual_machine_reset = self.virtual_machine.restore \ (self.apiclient, self.services["virtual_machine"]["template"]) list_restorevolume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') restorerootvolume = list_restorevolume_response[0] self.assertEqual( rootvolume.size, restorerootvolume.size, "root volume and restore root" " volume size differs - CLOUDSTACK-10079") except Exception as e: raise Exception("Warning: Exception " "during VM migration: %s" % e) except Exception as e: raise Exception( "Warning: Exception during executing" " the test-migrate_vm_after_rootvolume_resize: %s" % e) return
def test_06_resized_rootvolume_with_lessvalue(self): """Test resize root volume with less than original volume size # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Perform(resize) of the root volume with less than current root volume # 3. Check for proper error message """ # deploy a vm try: if self.updateclone: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual macine time.sleep(self.services["sleep"]) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" \ % self.virtual_machine.id ) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.cleanup.append(self.virtual_machine) vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") # get root vol from created vm, verify it is correct size list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(self.apiclient) time.sleep(self.services["sleep"]) rootvolume = list_volume_response[0] # converting json response to Volume Object rootvol = Volume(rootvolume.__dict__) newsize = (rootvolume.size >> 30) - 1 success = False if rootvolume is not None and 'vmware' in vm.hypervisor.lower(): try: rootvol.resize(self.apiclient, size=newsize) except Exception as e: assert "Shrink operation on ROOT volume not supported" \ in e.message, \ "TestCase Failed,able to resize root volume or error message is not matched" except Exception as e: raise Exception("Warning: Exception " "during executing test resize" " volume with less value : %s" % e) if rootvol is not None and 'kvm' or 'xenserver' in vm.hypervisor.lower( ): rootvol.resize(self.apiclient, size=newsize)
def test_08_increase_volume_size_within_account_limit(self): """Test increasing volume size within the account limit and verify primary storage usage # Validate the following # 1. Create a domain and its admin account # 2. Set account primary storage limit well beyond (20 GB volume + # template size of VM) # 3. Deploy a VM without any disk offering (only root disk) # # 4. Increase (resize) the volume to 20 GB # 6. Resize operation should be successful and primary storage count # for account should be updated successfully""" # Setting up account and domain hierarchy result = self.setupAccounts() self.assertEqual(result[0], PASS, result[1]) apiclient = self.testClient.getUserApiClient( UserName=self.parentd_admin.name, DomainName=self.parentd_admin.domain) self.assertNotEqual( apiclient, FAILED, "Failed to get api client\ of account: %s" % self.parentd_admin.name) templateSize = (self.template.size / (1024**3)) accountLimit = (templateSize + 20) response = self.updateResourceLimits(accountLimit=accountLimit) self.assertEqual(response[0], PASS, response[1]) try: if self.updateclone: self.virtual_machine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id) else: self.virtual_machine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id) list_vms = VirtualMachine.list(apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" \ % self.virtual_machine.id ) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.cleanup.append(self.virtual_machine) self.cleanup.reverse() vm = list_vms[0] list_volume_response = Volume.list( apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(self.apiclient) time.sleep(self.services["sleep"]) rootvolume = list_volume_response[0] # converting json response to Volume Object rootvol = Volume(rootvolume.__dict__) newsize = (rootvolume.size >> 30) + 20 if rootvolume is not None: try: rootvol.resize(apiclient, size=newsize) response = matchResourceCount( self.apiclient, newsize, RESOURCE_PRIMARY_STORAGE, accountid=self.parentd_admin.id) if response[0] == FAIL: raise Exception(response[1]) except Exception as e: self.fail("Failed with exception: %s" % e) except Exception as e: raise Exception("Warning: Exception while checking primary" " storage capacity after root " "volume resize : %s" % e) return
def test_01_concurrent_snapshot_global_limit(self): """ Test if global value concurrent.snapshots.threshold.perhost value respected This is positive test cases and tests if we are able to create as many snapshots mentioned in global value # 1. Create an account and a VM in it # 2. Read the global value for concurrent.snapshots.threshold.perhost # 3. If the value is Null, create at least 10 concurrent snapshots and verify they are created successfully # 4. Else, create as many snapshots specified in the global value, and verify they are created successfully """ # Create an account account = Account.create(self.apiclient, self.testdata["account"], domainid=self.domain.id) self.cleanup.append(account) # Create user api client of the account userapiclient = self.testClient.getUserApiClient( UserName=account.name, DomainName=account.domain) # Create VM virtual_machine = VirtualMachine.create( userapiclient, self.testdata["small"], templateid=self.template.id, accountid=account.name, domainid=account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id) # Create 10 concurrent snapshots by default # We can have any value, so keeping it 10 as it # seems good enough to test concurrentSnapshots = 10 # Step 1 # Get ROOT Volume Id volumes = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id, type='ROOT', listall=True) self.assertEqual( validateList(volumes)[0], PASS, "Volumes list validation failed") root_volume = volumes[0] config = Configurations.list( self.apiclient, name="concurrent.snapshots.threshold.perhost") if config[0].value: self.assertEqual( isinstance(config, list), True, "concurrent.snapshots.threshold.perhost should be present\ in global config") concurrentSnapshots = int(config[0].value) self.debug("concurrent Snapshots: %s" % concurrentSnapshots) threads = [] for i in range(0, (concurrentSnapshots)): thread = Thread(target=Snapshot.create, args=(self.apiclient, root_volume.id)) threads.append(thread) thread.start() for thread in threads: thread.join() snapshots = Snapshot.list(self.apiclient, volumeid=root_volume.id, listall=True) self.assertEqual( validateList(snapshots)[0], PASS, "Snapshots list validation failed") self.assertEqual( len(snapshots), concurrentSnapshots, "There should be exactly %s snapshots present" % concurrentSnapshots) for snapshot in snapshots: self.assertEqual( str(snapshot.state).lower(), BACKED_UP, "Snapshot state should be backedUp but it is\ %s" % snapshot.state) return
def test_01_volume_attach_detach(self): """Test Volume attach/detach to VM (5 data volumes) """ # Validate the following # 1. Deploy a vm and create 5 data disk # 2. Attach all the created Volume to the vm. # 3. Detach all the volumes attached. # 4. Reboot the VM. VM should be successfully rebooted # 5. Stop the VM. Stop VM should be successful # 6. Start The VM. Start VM should be successful try: volumes = [] # Create 5 volumes and attach to VM for i in range(self.max_data_volumes): volume = Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.cleanup.append(volume) volumes.append(volume) # Check List Volume response for newly created volume list_volume_response = Volume.list(self.apiclient, id=volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM self.virtual_machine.attach_volume(self.apiclient, volume) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") self.assertEqual( len(list_volume_response), self.max_data_volumes, "Volumes attached to the VM %s. Expected %s" % (len(list_volume_response), self.max_data_volumes)) # Detach all volumes from VM for volume in volumes: self.virtual_machine.detach_volume(self.apiclient, volume) # Reboot VM self.debug("Rebooting the VM: %s" % self.virtual_machine.id) self.virtual_machine.reboot(self.apiclient) # Sleep to ensure that VM is in ready state time.sleep(self.services["sleep"]) vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual(isinstance(vm_response, list), True, "Check list VM response for valid list") self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, 'Running', "Check the state of VM") # Stop VM self.virtual_machine.stop(self.apiclient) # Start VM self.virtual_machine.start(self.apiclient) # Sleep to ensure that VM is in ready state time.sleep(self.services["sleep"]) vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual(isinstance(vm_response, list), True, "Check list VM response for valid list") self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, 'Running', "Check the state of VM") except Exception as e: self.fail("Exception occuered: %s" % e) return
def test_04_template_from_snapshot(self): """Create Template from snapshot """ # Validate the following # 2. Snapshot the Root disk # 3. Create Template from snapshot # 4. Deploy Virtual machine using this template # 5. VM should be in running state if self.hypervisor.lower() in ['hyperv', 'lxc']: self.skipTest("Snapshots feature is not supported on %s" % self.hypervisor.lower()) userapiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) volumes = Volume.list(userapiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True) volume = volumes[0] self.debug("Creating a snapshot from volume: %s" % volume.id) # Create a snapshot of volume snapshot = Snapshot.create(userapiclient, volume.id, account=self.account.name, domainid=self.account.domainid) self.debug("Creating a template from snapshot: %s" % snapshot.id) # Generate template from the snapshot template = Template.create_from_snapshot(userapiclient, snapshot, self.services["template"]) self.cleanup.append(template) # Verify created template templates = Template.list( userapiclient, templatefilter=self.services["template"]["templatefilter"], id=template.id) self.assertNotEqual(templates, None, "Check if result exists in list item call") self.assertEqual(templates[0].id, template.id, "Check new template id in list resources call") self.debug("Deploying a VM from template: %s" % template.id) # Deploy new virtual machine using template virtual_machine = VirtualMachine.create( userapiclient, self.services["virtual_machine"], templateid=template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(virtual_machine) vm_response = VirtualMachine.list(userapiclient, id=virtual_machine.id, account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(vm_response, list), True, "Check for list VM response return valid list") # Verify VM response to check whether VM deployment was successful self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, 'Running', "Check the state of VM created from Template") return
def test_01_volume_from_snapshot(self): """Test Creating snapshot from volume having spaces in name(KVM) """ # Validate the following # 1. Create a virtual machine and data volume # 2. Attach data volume to VM # 3. Login to machine; create temp/test directories on data volume # and write some random data # 4. Snapshot the Volume # 5. Create another Volume from snapshot # 6. Mount/Attach volume to another virtual machine # 7. Compare data, data should match if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) self.debug("random_data_0 : %s" % random_data_0) self.debug("random_data_1: %s" % random_data_1) try: ssh_client = self.virtual_machine.get_ssh_client() except Exception as e: self.fail("SSH failed for VM: %s" % self.virtual_machine.ipaddress) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Created volume with ID: %s" % volume.id) self.virtual_machine.attach_volume( self.apiclient, volume ) self.debug("Attach volume: %s to VM: %s" % (volume.id, self.virtual_machine.id)) self.debug("Formatting volume: %s to ext3" % volume.id) # Format partition using ext3 # Note that this is the second data disk partition of virtual machine # as it was already containing data disk before attaching the new # volume, Hence datadiskdevice_2 format_volume_to_ext3( ssh_client, self.services["volume"][self.hypervisor]["datadiskdevice_2"] ) cmds = [ "fdisk -l", "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount -t ext3 %s1 %s" % (self.services["volume"][ self.hypervisor]["datadiskdevice_2"], self.services["paths"]["mount_dir"]), "mkdir -p %s/%s/{%s,%s} " % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["sub_lvl_dir2"]), "echo %s > %s/%s/%s/%s" % (random_data_0, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"]), "echo %s > %s/%s/%s/%s" % (random_data_1, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"]), "cat %s/%s/%s/%s" % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"])] for c in cmds: self.debug("Command: %s" % c) result = ssh_client.execute(c) self.debug(result) # Unmount the Sec Storage cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', id=volume.id ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volume response for valid data" ) volume_response = list_volume_response[0] # Create snapshot from attached volume snapshot = Snapshot.create( self.apiclient, volume_response.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Created snapshot: %s" % snapshot.id) # Create volume from snapshot volume_from_snapshot = Volume.create_from_snapshot( self.apiclient, snapshot.id, self.services["volume"], account=self.account.name, domainid=self.account.domainid ) # Detach the volume from virtual machine self.virtual_machine.detach_volume( self.apiclient, volume ) self.debug("Detached volume: %s from VM: %s" % (volume.id, self.virtual_machine.id)) self.debug("Created Volume: %s from Snapshot: %s" % ( volume_from_snapshot.id, snapshot.id)) volumes = Volume.list( self.apiclient, id=volume_from_snapshot.id ) self.assertEqual( isinstance(volumes, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(volumes), None, "Check Volume list Length" ) self.assertEqual( volumes[0].id, volume_from_snapshot.id, "Check Volume in the List Volumes" ) # Attaching volume to new VM new_virtual_machine = VirtualMachine.create( self.apiclient, self.services["server_without_disk"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services["mode"] ) self.debug("Deployed new VM for account: %s" % self.account.name) # self.cleanup.append(new_virtual_machine) self.debug("Attaching volume: %s to VM: %s" % ( volume_from_snapshot.id, new_virtual_machine.id )) new_virtual_machine.attach_volume( self.apiclient, volume_from_snapshot ) # Rebooting is required so that newly attached disks are detected self.debug("Rebooting : %s" % new_virtual_machine.id) new_virtual_machine.reboot(self.apiclient) try: # Login to VM to verify test directories and files ssh = new_virtual_machine.get_ssh_client() # Mount datadiskdevice_1 because this is the first data disk of the # new virtual machine cmds = [ "fdisk -l", "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount -t ext3 %s1 %s" % (self.services["volume"][ self.hypervisor]["datadiskdevice_1"], self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) result = ssh.execute(c) self.debug(result) returned_data_0 = ssh.execute( "cat %s/%s/%s/%s" % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"] )) returned_data_1 = ssh.execute( "cat %s/%s/%s/%s" % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"] )) except Exception as e: self.fail("SSH access failed for VM: %s, Exception: %s" % (new_virtual_machine.ipaddress, e)) self.debug("returned_data_0: %s" % returned_data_0[0]) self.debug("returned_data_1: %s" % returned_data_1[0]) # Verify returned data self.assertEqual( random_data_0, returned_data_0[0], "Verify newly attached volume contents with existing one" ) self.assertEqual( random_data_1, returned_data_1[0], "Verify newly attached volume contents with existing one" ) # Unmount the Sec Storage cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) return
def setUpClass(cls): testClient = super(TestCreateTemplate, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.services = testClient.getParsedTestDataConfig() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: # Template creation from root volume is not supported in LXC cls.unsupportedHypervisor = True return # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype try: cls.disk_offering = DiskOffering.create( cls.apiclient, cls.services["disk_offering"]) cls._cleanup.append(cls.disk_offering) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services[ "ostype"] cls.services["template"]["ostypeid"] = template.ostypeid cls.services["template_2"]["ostypeid"] = template.ostypeid cls.services["ostypeid"] = template.ostypeid cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["volume"]["diskoffering"] = cls.disk_offering.id cls.services["volume"]["zoneid"] = cls.zone.id cls.services["sourcezoneid"] = cls.zone.id cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["tiny"]) cls._cleanup.append(cls.service_offering) #create virtual machine cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"]) #Stop virtual machine cls.virtual_machine.stop(cls.apiclient) list_volume = Volume.list(cls.apiclient, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) cls.volume = list_volume[0] except Exception as e: cls.tearDownClass() raise unittest.SkipTest("Exception in setUpClass: %s" % e) return
def setUpClass(cls): testClient = super(TestTemplates, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.services = testClient.getParsedTestDataConfig() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: # Template creation from root volume is not supported in LXC cls.unsupportedHypervisor = True return # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype #populate second zone id for iso copy cls.zones = Zone.list(cls.apiclient) if not isinstance(cls.zones, list): raise Exception("Failed to find zones.") cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"]) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services[ "ostype"] cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["volume"]["diskoffering"] = cls.disk_offering.id cls.services["volume"]["zoneid"] = cls.zone.id cls.services["template_2"]["zoneid"] = cls.zone.id cls.services["sourcezoneid"] = cls.zone.id cls.services["template"]["ostypeid"] = template.ostypeid cls.services["template_2"]["ostypeid"] = template.ostypeid cls.services["ostypeid"] = template.ostypeid cls.account = Account.create(cls.apiclient, cls.services["account"], admin=True, domainid=cls.domain.id) cls.user = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["tiny"]) #create virtual machine cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"]) #Stop virtual machine cls.virtual_machine.stop(cls.apiclient) list_volume = Volume.list(cls.apiclient, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) try: cls.volume = list_volume[0] except Exception as e: raise Exception( "Exception: Unable to find root volume foe VM: %s - %s" % (cls.virtual_machine.id, e)) #Create templates for Edit, Delete & update permissions testcases cls.template_1 = Template.create(cls.apiclient, cls.services["template"], cls.volume.id, account=cls.account.name, domainid=cls.account.domainid) cls.template_2 = Template.create(cls.apiclient, cls.services["template_2"], cls.volume.id, account=cls.account.name, domainid=cls.account.domainid) cls._cleanup = [ cls.service_offering, cls.disk_offering, cls.account, cls.user ]
def chk_volume_resize(self, apiclient, vm): self.assertEqual(vm.state, "Running", msg="VM is not in Running state") # get root vol from created vm, verify its size list_volume_response = Volume.list(apiclient, virtualmachineid=vm.id, type='ROOT', listall='True') rootvolume = list_volume_response[0] if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(apiclient) time.sleep(self.services["sleep"]) if vm.hypervisor.lower() == "vmware": rootdiskcontroller = self.getDiskController(vm) if rootdiskcontroller != "scsi": raise Exception( "root volume resize only supported on scsi disk ," "please check rootdiskcontroller type") rootvolobj = Volume(rootvolume.__dict__) newsize = (rootvolume.size >> 30) + 2 success = False if rootvolume is not None: try: rootvolobj.resize(apiclient, size=newsize) if vm.hypervisor.lower() == "xenserver": self.virtual_machine.start(apiclient) time.sleep(self.services["sleep"]) ssh = SshClient(self.virtual_machine.ssh_ip, 22, "root", "password") newsizeinbytes = newsize * 1024 * 1024 * 1024 if vm.hypervisor.lower() == "xenserver": volume_name = "/dev/xvd" + \ chr(ord('a') + int( list_volume_response[0].deviceid)) self.debug(" Using XenServer" " volume_name: %s" % volume_name) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=newsizeinbytes) success = True elif vm.hypervisor.lower() == "kvm": volume_name = "/dev/vd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name:" " %s" % volume_name) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=newsizeinbytes) success = True elif vm.hypervisor.lower() == "vmware": ret = checkVolumeSize(ssh_handle=ssh, volume_name="/dev/sdb", size_to_verify=newsizeinbytes) success = True self.debug(" Volume Size Expected %s " " Actual :%s" % (newsizeinbytes, ret[1])) except Exception as e: # need to write the rootdisk controller code. if vm.hypervisor == "vmware" and rootdiskcontroller == "ide": assert "Found unsupported root disk " \ "controller :ide" in e.message, \ "able to resize ide root volume Testcase failed" else: raise Exception("fail to resize the volume: %s" % e) else: self.debug("hypervisor %s unsupported for test " ", verifying it errors properly" % self.hypervisor) success = False return success
def test_04_resize_detached_volume(self): ''' Test Resize Volume Detached To Virtual Machine ''' list_vm_volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, id=self.volume_2.id) #check that the volume is not attached to VM self.assertIsNone(list_vm_volumes, "List volumes is not None") shrinkOk = False if self.volume_2.size > int( (self.disk_offering_20.disksize) * (1024**3)): shrinkOk = True cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume_2.id cmd.diskofferingid = self.disk_offering_20.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list(self.apiclient, id=self.volume_2.id) self.assertTrue( (new_size[0].size == int( (self.disk_offering_20.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)") self.volume_2 = new_size[0] shrinkOk = False if self.volume_2.size > int( (self.disk_offering_100.disksize) * (1024**3)): shrinkOk = True cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume_2.id cmd.diskofferingid = self.disk_offering_100.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list(self.apiclient, id=self.volume_2.id) self.assertTrue( (new_size[0].size == int( (self.disk_offering_100.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)") # return to small disk self.volume_2 = new_size[0] shrinkOk = False if self.volume_2.size > int( (self.disk_offerings.disksize) * (1024**3)): shrinkOk = True cmd.diskofferingid = self.disk_offerings.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list(self.apiclient, id=self.volume_2.id) self.assertTrue((new_size[0].size == int( (self.disk_offerings.disksize) * (1024**3))), "Could not return to Small disk")
def test_01_volume_iso_attach(self): """Test Volumes and ISO attach """ # Validate the following # 1. Create and attach 5 data volumes to VM # 2. Create an ISO. Attach it to VM instance # 3. Verify that attach ISO is successful # Create 5 volumes and attach to VM for i in range(self.max_data_volumes): volume = Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.debug("Created volume: %s for account: %s" % (volume.id, self.account.name)) # Check List Volume response for newly created volume list_volume_response = Volume.list(self.apiclient, id=volume.id) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM self.virtual_machine.attach_volume(self.apiclient, volume) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True) self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") self.assertEqual( len(list_volume_response), self.max_data_volumes, "Volumes attached to the VM %s. Expected %s" % (len(list_volume_response), self.max_data_volumes)) # Create an ISO and attach it to VM iso = Iso.create( self.apiclient, self.services["iso"], account=self.account.name, domainid=self.account.domainid, ) self.debug("Created ISO with ID: %s for account: %s" % (iso.id, self.account.name)) try: self.debug("Downloading ISO with ID: %s" % iso.id) iso.download(self.apiclient) except Exception as e: self.fail("Exception while downloading ISO %s: %s" % (iso.id, e)) # Attach ISO to virtual machine self.debug("Attach ISO ID: %s to VM: %s" % (iso.id, self.virtual_machine.id)) cmd = attachIso.attachIsoCmd() cmd.id = iso.id cmd.virtualmachineid = self.virtual_machine.id self.apiclient.attachIso(cmd) # Verify ISO is attached to VM vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual(isinstance(vm_response, list), True, "Check list VM response for valid list") self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.isoid, iso.id, "Check ISO is attached to VM or not") return
def test_02_negative_path(self): """ negative test for volume life cycle # 1. Deploy a vm [vm1] with shared storage and data disk #v1. Create VM2 with local storage offering disk offerings # 2.TBD # 3. Detach the data disk from VM1 and Download the volume # 4.TBD # 5. Attach volume with deviceid = 0 # 6. Attach volume, specify a VM which is destroyed # 7.TBD # 8.TBD # 9.TBD # 10.TBD # 11.Upload the volume from T3 by providing the URL of the downloaded volume, but specify a wrong format (not supported by the hypervisor) # 12.Upload the same volume from T4 by providing a wrong URL # 13.Upload volume, provide wrong checksum # 14.Upload a volume when maximum limit for the account is reached # 15.TBD # 16.Upload volume with all correct parameters (covered in positive test path) # 17.TBD # 18.TBD # 19.Now attach the volume with all correct parameters (covered in positive test path) # 20.Destroy and expunge all VMs """ # 1. Deploy a vm [vm1] with shared storage and data disk self.virtual_machine_1 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_1.id) # List data volume for vm1 list_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK') self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for vm id %s" % self.virtual_machine_1.id) list_data_volume_for_vm1 = list_volume[0] self.assertEqual( len(list_volume), 1, "There is no data disk attached to vm id:%s" % self.virtual_machine_1.id) self.assertEqual(list_data_volume_for_vm1.virtualmachineid, str(self.virtual_machine_1.id), "Check if volume state (attached) is reflected") # Variance if self.zone.localstorageenabled: # V1.Create vm3 with local storage offering self.virtual_machine_local_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_2.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_local_2.id) # 3. Detach the data disk from VM1 and Download the volume self.virtual_machine_1.detach_volume(self.userapiclient, volume=list_data_volume_for_vm1) verify_detach_volume(self, self.virtual_machine_1.id, list_data_volume_for_vm1.id) # download detached volume self.extract_volume = Volume.extract( self.userapiclient, volume_id=list_data_volume_for_vm1.id, zoneid=self.zone.id, mode='HTTP_DOWNLOAD') self.debug("extracted url is%s :" % self.extract_volume.url) try: formatted_url = urllib.unquote_plus(self.extract_volume.url) self.debug("Attempting to download volume at url %s" % formatted_url) response = urllib.urlopen(formatted_url) self.debug("response from volume url %s" % response.getcode()) fd, path = tempfile.mkstemp() self.debug("Saving volume %s to path %s" % (list_data_volume_for_vm1.id, path)) os.close(fd) with open(path, 'wb') as fd: fd.write(response.read()) self.debug("Saved volume successfully") except Exception: self.fail( "Extract Volume Failed with invalid URL %s (vol id: %s)" % (self.extract_volume, list_data_volume_for_vm1.id)) # 6. Attach volume, specify a VM which is destroyed self.virtual_machine_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_2.id) try: self.virtual_machine_2.delete(self.apiclient) except Exception as e: raise Exception("Vm deletion failed with error %s" % e) # Create a new volume self.volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_data_volume = Volume.list(self.userapiclient, id=self.volume.id) self.assertEqual( validateList(list_data_volume)[0], PASS, "Check List volume response for volume %s" % self.volume.id) self.assertEqual( list_data_volume[0].id, self.volume.id, "check list volume response for volume id: %s" % self.volume.id) self.debug("volume id %s got created successfully" % list_data_volume[0].id) # try Attach volume to vm2 try: self.virtual_machine_2.attach_volume(self.userapiclient, self.volume) self.fail("Volume got attached to a destroyed vm ") except Exception: self.debug("Volume cant not be attached to a destroyed vm ") # 11.Upload the volume by providing the URL of the downloaded # volume, but specify a wrong format (not supported by the hypervisor) if "OVA" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "VHD" else: self.testdata["configurableData"]["upload_volume"][ "format"] = "OVA" try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"]) self.fail("Volume got uploaded with invalid format") except Exception as e: self.debug("upload volume failed due %s" % e) # 12. Upload the same volume from T4 by providing a wrong URL self.testdata["configurableData"]["upload_volume"]["format"] = "VHD" if "OVA" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "OVA" if "QCOW2" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "QCOW2" u1 = self.extract_volume.url.split('.') u1[-2] = "wrong" wrong_url = ".".join(u1) try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=wrong_url, services=self.testdata["configurableData"]["upload_volume"]) self.upload_response.wait_for_upload(self.userapiclient) self.fail("volume got uploaded with wrong url") except Exception as e: self.debug("upload volume failed due to %s" % e) # 13.Upload volume, provide wrong checksum try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"], checksome="123456") self.upload_response.wait_for_upload(self.userapiclient) self.fail("volume got uploaded with wrong checksome") except Exception as e: self.debug("upload volume failed due to %s" % e) # 14.Upload a volume when maximum limit for the account is reached account_update = Resources.updateLimit(self.apiclient, resourcetype=2, account=self.account.name, domainid=self.account.domainid, max=1) list_resource = Resources.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, resourcetype=2) self.assertEqual( validateList(list_resource)[0], PASS, "Check List resource response for volume %s" % self.account.name) self.assertEqual( str(list_resource[0].max), '1', "check list List resource response for account id: %s" % self.account.name) self.debug("Max resources got updated successfully for account %s" % self.account.name) try: self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"]) self.upload_response.wait_for_upload(self.userapiclient) self.fail("volume got uploaded after account reached max limit for\ volumes ") except Exception as e: self.debug("upload volume failed due to %s" % e)
def test_01_migrateVolume(self): """ @Desc:Volume is not retaining same uuid when migrating from one storage to another. Step1:Create a volume/data disk Step2:Verify UUID of the volume Step3:Migrate the volume to another primary storage within the cluster Step4:Migrating volume to new primary storage should succeed Step5:volume UUID should not change even after migration """ vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list(self.apiclient, id=vol.id) self.assertEqual( validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id try: self.virtual_machine.attach_volume(self.apiclient, vol) except Exception as e: self.fail("Attaching data disk to vm failed with error %s" % e) pools = StoragePool.listForMigration(self.apiclient, id=vol.id) if not pools: self.skipTest( "No suitable storage pools found for volume migration.\ Skipping") self.assertEqual( validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration") pool = pools[0] self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id)) try: Volume.migrate(self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate='true') except Exception as e: self.fail("Volume migration failed with error %s" % e) migrated_vols = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, listall='true', type='DATADISK') self.assertEqual( validateList(migrated_vols)[0], PASS, "invalid volumes response after migration") migrated_vol_uuid = migrated_vols[0].id self.assertEqual( vol_uuid, migrated_vol_uuid, "Volume is not retaining same uuid when migrating from one\ storage to another") self.virtual_machine.detach_volume(self.apiclient, vol) self.cleanup.append(vol) return
def test_01_positive_path(self): """ positive test for volume life cycle # 1. Deploy a vm [vm1] with shared storage and data disk # 2. Deploy a vm [vm2]with shared storage without data disk # 3. TBD # 4. Create a new volume and attache to vm2 # 5. Detach data disk from vm1 and download it # Variance(1-9) # 6. Upload volume by providing url of downloaded volume in step 5 # 7. Attach the volume to a different vm - vm2 # 8. Try to delete an attached volume # 9. Create template from root volume of VM1 # 10. Create new VM using the template created in step 9 # 11. Delete the template # 12. Detach the disk from VM2 and re-attach the disk to VM1 # 13.TBD # 14.TBD # 15.Migrate volume(detached) and then attach to a vm and live-migrate # 16.Upload volume of size smaller than storage.max.volume.upload.size(leaving the negative case) # 17.TBD # 18.TBD # 19.TBD # 20.Detach data disks from VM2 and delete volume """ # 1. Deploy a vm [vm1] with shared storage and data disk self.virtual_machine_1 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_1.id) # List data volume for vm1 list_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK') self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for vm id %s" % self.virtual_machine_1.id) list_data_volume_for_vm1 = list_volume[0] self.assertEqual( len(list_volume), 1, "There is no data disk attached to vm id:%s" % self.virtual_machine_1.id) self.assertEqual(list_data_volume_for_vm1.virtualmachineid, str(self.virtual_machine_1.id), "Check if volume state (attached) is reflected") # 2. Deploy a vm [vm2]with shared storage without data disk self.virtual_machine_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_2.id) # 4. Create a new volume and attache to vm2 self.volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_data_volume = Volume.list(self.userapiclient, id=self.volume.id) self.assertEqual( validateList(list_data_volume)[0], PASS, "Check List volume response for volume %s" % self.volume.id) self.assertEqual( list_data_volume[0].id, self.volume.id, "check list volume response for volume id: %s" % self.volume.id) self.debug("volume id %s got created successfully" % list_data_volume[0].id) # Attach volume to vm2 self.virtual_machine_2.attach_volume(self.userapiclient, self.volume) verify_attach_volume(self, self.virtual_machine_2.id, self.volume.id) # Variance if self.zone.localstorageenabled: # V1.Create vm3 with local storage offering self.virtual_machine_local_3 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_2.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_local_3.id) # V2.create two data disk on local storage self.local_volumes = [] for i in range(2): local_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_local.id, zoneid=self.zone.id) list_local_data_volume = Volume.list(self.userapiclient, id=local_volume.id) self.assertEqual( validateList(list_local_data_volume)[0], PASS, "Check List volume response for volume %s" % local_volume.id) self.assertEqual( list_local_data_volume[0].id, local_volume.id, "check list volume response for volume id: %s" % local_volume.id) self.debug("volume id %s got created successfully" % list_local_data_volume[0].id) self.local_volumes.append(local_volume) # V3.Attach local disk to vm1 self.virtual_machine_1.attach_volume(self.userapiclient, self.local_volumes[0]) verify_attach_volume(self, self.virtual_machine_1.id, self.local_volumes[0].id) if self.list_storage: # V4.create vm4 with zone wide storage self.virtual_machine_zone_4 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.tagged_so.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_zone_4.id) # V5.Create two data disk on zone wide storage self.zone_volumes = [] for i in range(2): zone_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_tagged.id, zoneid=self.zone.id) list_zone_data_volume = Volume.list(self.userapiclient, id=zone_volume.id) self.assertEqual( validateList(list_zone_data_volume)[0], PASS, "Check List volume response for volume %s" % zone_volume.id) self.assertEqual( list_zone_data_volume[0].id, zone_volume.id, "check list volume response for volume id: %s" % zone_volume.id) self.debug("volume id:%s got created successfully" % list_zone_data_volume[0].id) self.zone_volumes.append(zone_volume) # V6.Attach data disk running on ZWPS to VM1 (root disk on shared) self.virtual_machine_1.attach_volume(self.userapiclient, self.zone_volumes[0]) verify_attach_volume(self, self.virtual_machine_1.id, self.zone_volumes[0].id) # V7. Create a cluster wide volume and attach to vm running on zone # wide storage self.cluster_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_cluster_volume = Volume.list(self.userapiclient, id=self.cluster_volume.id) self.assertEqual( validateList(list_cluster_volume)[0], PASS, "Check List volume response for volume %s" % self.cluster_volume.id) self.assertEqual( list_cluster_volume[0].id, str(self.cluster_volume.id), "volume does not exist %s" % self.cluster_volume.id) self.debug("volume id %s got created successfuly" % list_cluster_volume[0].id) self.virtual_machine_zone_4.attach_volume(self.userapiclient, self.cluster_volume) verify_attach_volume(self, self.virtual_machine_zone_4.id, self.cluster_volume.id) if self.list_storage and self.zone.localstorageenabled: # V8.Attach zone wide volume to vm running on local storage self.virtual_machine_local_3.attach_volume(self.userapiclient, self.zone_volumes[1]) verify_attach_volume(self, self.virtual_machine_local_3.id, self.zone_volumes[1].id) # V9.Attach local volume to a vm running on zone wide storage self.virtual_machine_zone_4.attach_volume(self.userapiclient, self.local_volumes[1]) verify_attach_volume(self, self.virtual_machine_zone_4.id, self.local_volumes[1].id) # 5. Detach data disk from vm1 and download it self.virtual_machine_1.detach_volume(self.userapiclient, volume=list_data_volume_for_vm1) verify_detach_volume(self, self.virtual_machine_1.id, list_data_volume_for_vm1.id) # download detached volume self.extract_volume = Volume.extract( self.userapiclient, volume_id=list_data_volume_for_vm1.id, zoneid=self.zone.id, mode='HTTP_DOWNLOAD') self.debug("extracted url is%s :" % self.extract_volume.url) try: formatted_url = urllib.unquote_plus(self.extract_volume.url) self.debug("Attempting to download volume at url %s" % formatted_url) response = urllib.urlopen(formatted_url) self.debug("response from volume url %s" % response.getcode()) fd, path = tempfile.mkstemp() self.debug("Saving volume %s to path %s" % (list_data_volume_for_vm1.id, path)) os.close(fd) with open(path, 'wb') as fd: fd.write(response.read()) self.debug("Saved volume successfully") except Exception: self.fail( "Extract Volume Failed with invalid URL %s (vol id: %s)" % (self.extract_volume, list_data_volume_for_vm1.id)) # checking format of downloaded volume and assigning to # testdata["volume_upload"] if "OVA" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "OVA" if "QCOW2" in self.extract_volume.url.upper(): self.testdata["configurableData"]["upload_volume"][ "format"] = "QCOW2" # 6. Upload volume by providing url of downloaded volume in step 5 self.upload_response = Volume.upload( self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["configurableData"]["upload_volume"]) self.upload_response.wait_for_upload(self.userapiclient) self.debug("uploaded volume id is %s" % self.upload_response.id) # 7. Attach the volume to a different vm - vm2 self.virtual_machine_2.attach_volume(self.userapiclient, volume=self.upload_response) verify_attach_volume(self, self.virtual_machine_2.id, self.upload_response.id) # 8. Try to delete an attached volume try: self.volume.delete(self.userapiclient) self.fail("Volume got deleted in attached state %s " % self.volume.id) except Exception as e: self.debug("Attached volume deletion failed because %s" % e) # 9. Create template from root volume of VM1(stop VM->create template # -> start vm) self.virtual_machine_1.stop(self.userapiclient) self.list_root_disk_for_vm1 = Volume.list( self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT') self.assertEqual( validateList(self.list_root_disk_for_vm1)[0], PASS, "Check List volume response for vm %s" % self.virtual_machine_1.id) self.assertEqual( len(self.list_root_disk_for_vm1), 1, "list root disk for vm1 is empty : %s" % self.virtual_machine_1.id) self.template_from_vm1_root_disk = Template.create( self.userapiclient, self.testdata["template"], self.list_root_disk_for_vm1[0].id, account=self.account.name, domainid=self.account.domainid) list_template = Template.list( self.userapiclient, templatefilter=self.testdata["templatefilter"], id=self.template_from_vm1_root_disk.id) self.assertEqual( validateList(list_template)[0], PASS, "Check List template response for template id %s" % self.template_from_vm1_root_disk.id) self.assertEqual( len(list_template), 1, "list template response is empty for template id : %s" % list_template[0].id) self.assertEqual(list_template[0].id, self.template_from_vm1_root_disk.id, "list template id is not same as created template") self.debug("Template id:%s got created successfully" % self.template_from_vm1_root_disk.id) self.virtual_machine_1.start(self.userapiclient) # 10. Deploy a vm using template ,created from vm1's root disk self.virtual_machine_3 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template_from_vm1_root_disk.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"]) verify_vm(self, self.virtual_machine_3.id) # 11.delete the template created from root disk of vm1 try: self.template_from_vm1_root_disk.delete(self.userapiclient) self.debug("Template id: %s got deleted successfuly" % self.template_from_vm1_root_disk.id) except Exception as e: raise Exception("Template deletion failed with error %s" % e) list_template = Template.list( self.userapiclient, templatefilter=self.testdata["templatefilter"], id=self.template_from_vm1_root_disk.id) self.assertEqual( list_template, None, "Template is not deleted, id %s:" % self.template_from_vm1_root_disk.id) self.debug("Template id%s got deleted successfully" % self.template_from_vm1_root_disk.id) # List vm and check the state of vm verify_vm(self, self.virtual_machine_3.id) # 12.Detach the disk from VM2 and re-attach the disk to VM1 self.virtual_machine_2.detach_volume(self.userapiclient, volume=self.upload_response) verify_detach_volume(self, self.virtual_machine_2.id, self.upload_response.id) self.virtual_machine_1.attach_volume(self.userapiclient, volume=self.upload_response) verify_attach_volume(self, self.virtual_machine_1.id, self.upload_response.id) # 15.Migrate volume(detached) and then attach to a vm and live-migrate self.migrate_volume = Volume.create( self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id) list_volume = Volume.list(self.apiclient, id=self.migrate_volume.id) self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id) self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id) self.debug("volume id %s got created successfuly" % list_volume[0].id) self.virtual_machine_1.attach_volume(self.userapiclient, self.migrate_volume) verify_attach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id) self.virtual_machine_1.detach_volume(self.userapiclient, volume=self.migrate_volume) verify_detach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id) list_volume = Volume.list(self.apiclient, id=self.migrate_volume.id) self.assertEqual( validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id) self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id) self.debug("volume id %s got created successfuly" % list_volume[0].id) list_pool = StoragePool.list(self.apiclient, id=list_volume[0].storageid) self.assertEqual( validateList(list_pool)[0], PASS, "Check List pool response for storage id %s" % list_volume[0].storageid) self.assertGreater( len(list_pool), 0, "Check the list list storagepoolresponse for vm id: %s" % list_volume[0].storageid) list_pools = StoragePool.list(self.apiclient, scope=list_pool[0].scope) self.assertEqual( validateList(list_pools)[0], PASS, "Check List pool response for scope %s" % list_pool[0].scope) self.assertGreater( len(list_pools), 0, "Check the list vm response for scope :%s" % list_volume[0].scope) storagepoolid = None for i in range(len(list_pools)): if list_volume[0].storageid != list_pools[i].id: storagepoolid = list_pools[i].id break else: self.debug("No pool available for volume migration ") if storagepoolid is not None: try: volume_migrate = Volume.migrate( self.apiclient, storageid=storagepoolid, volumeid=self.migrate_volume.id) except Exception as e: raise Exception("Volume migration failed with error %s" % e) self.virtual_machine_2.attach_volume(self.userapiclient, self.migrate_volume) verify_attach_volume(self, self.virtual_machine_2.id, self.migrate_volume.id) pool_for_migration = StoragePool.listForMigration( self.apiclient, id=self.migrate_volume.id) self.assertEqual( validateList(pool_for_migration)[0], PASS, "Check list pool For Migration response for volume %s" % self.migrate_volume.id) self.assertGreater( len(pool_for_migration), 0, "Check the listForMigration response for volume :%s" % self.migrate_volume.id) try: volume_migrate = Volume.migrate( self.apiclient, storageid=pool_for_migration[0].id, volumeid=self.migrate_volume.id, livemigrate=True) except Exception as e: raise Exception("Volume migration failed with error %s" % e) else: try: self.migrate_volume.delete(self.userapiclient) self.debug("volume id:%s got deleted successfully " % self.migrate_volume.id) except Exception as e: raise Exception("Volume deletion failed with error %s" % e) # 16.Upload volume of size smaller than # storage.max.volume.upload.size(leaving the negative case) self.testdata["configurableData"]["upload_volume"]["format"] = "VHD" volume_upload = Volume.upload( self.userapiclient, self.testdata["configurableData"]["upload_volume"], zoneid=self.zone.id) volume_upload.wait_for_upload(self.userapiclient) self.debug("volume id :%s got uploaded successfully is " % volume_upload.id) # 20.Detach data disk from vm 2 and delete the volume self.virtual_machine_2.detach_volume(self.userapiclient, volume=self.volume) verify_detach_volume(self, self.virtual_machine_2.id, self.volume.id) try: self.volume.delete(self.userapiclient) self.debug("volume id:%s got deleted successfully " % self.volume.id) except Exception as e: raise Exception("Volume deletion failed with error %s" % e)
def test_04_pt_startvm_false_attach_disk(self): """ Positive test for stopped VM test path - T3 and variant, T9 # 1. Deploy VM in the network with specifying startvm parameter # as False # 2. List VMs and verify that VM is in stopped state # 3. Create a data disk and attach it to VM # 4. Now detach the disk from the VM # 5. Verify that attach and detach operations are successful # 6. Deploy another VM in the account with startVM = False # 7. Attach the same volume to this VM # 8. Detach the volume # 9. Both attach and detach operations should be successful """ disk_offering = DiskOffering.create( self.apiclient, self.testdata["disk_offering"] ) self.cleanup.append(disk_offering) volume = Volume.create( self.apiclient, self.testdata["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering.id ) self.cleanup.append(volume) # Create VM in account virtual_machine = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[self.networkid, ] if self.networkid else None, zoneid=self.zone.id, startvm=False, mode=self.zone.networktype ) self.cleanup.append(virtual_machine) response = virtual_machine.getState( self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) virtual_machine.attach_volume(self.userapiclient, volume=volume) volumes = Volume.list( self.userapiclient, virtualmachineid=virtual_machine.id, type="DATADISK", listall=True ) self.assertEqual( validateList(volumes)[0], PASS, "Volumes list validation failed" ) self.assertEqual( volumes[0].id, volume.id, "Listed Volume id not matching with attached volume id" ) virtual_machine.detach_volume( self.userapiclient, volume) volumes = Volume.list( self.userapiclient, virtualmachineid=virtual_machine.id, type="DATADISK", listall=True ) self.assertEqual( validateList(volumes)[0], FAIL, "Detached volume should not be listed" ) virtual_machine_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[self.networkid, ] if self.networkid else None, zoneid=self.zone.id, startvm=False, mode=self.zone.networktype ) self.cleanup.append(virtual_machine_2) response = virtual_machine_2.getState( self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) virtual_machine_2.attach_volume(self.userapiclient, volume=volume) volumes = Volume.list( self.userapiclient, virtualmachineid=virtual_machine_2.id, type="DATADISK", listall=True ) self.assertEqual( validateList(volumes)[0], PASS, "Volumes list validation failed" ) self.assertEqual( volumes[0].id, volume.id, "Listed Volume id not matching with attached volume id" ) virtual_machine_2.detach_volume( self.userapiclient, volume) volumes = Volume.list( self.userapiclient, virtualmachineid=virtual_machine_2.id, type="DATADISK", listall=True ) self.assertEqual( validateList(volumes)[0], FAIL, "Detached volume should not be listed" ) return
def test_07_usage_events_after_rootvolume_resized_(self): """Test check usage events after root volume resize # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Perform(resize) of the root volume # 3. Check the corresponding usage events """ # deploy a vm try: if self.updateclone: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual macine time.sleep(self.services["sleep"]) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" % self.virtual_machine.id) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.cleanup.append(self.virtual_machine) vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") # get root vol from created vm, verify it is correct size list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") if vm.state == "Running" and vm.hypervisor.lower() == "xenserver": self.virtual_machine.stop(self.apiclient) time.sleep(self.services["sleep"]) rootvolume = list_volume_response[0] # converting json response to Volume Object rootvol = Volume(rootvolume.__dict__) newsize = (rootvolume.size >> 30) + 2 success = False if rootvolume is not None: try: rootvol.resize(self.apiclient, size=newsize) qresultset = self.dbclient.execute( "select id from account where uuid = '%s';" % self.parentd_admin.id) res = validateList(qresultset) self.assertNotEqual(res[2], INVALID_INPUT, "Check DB Query result set") qresult = qresultset[0] account_id = qresult[0] self.debug("select type,size from usage_event" " where account_id = '%s';" % account_id) qresultsize = self.dbclient.execute( "select size from usage_event where account_id = '%s' " "and type='VOLUME.RESIZE' ORDER BY ID DESC LIMIT 1;" % account_id) res = validateList(qresultsize) self.assertNotEqual(res[2], INVALID_INPUT, "Check DB Query result set") qresult = int(qresultsize[0][0]) self.debug("Query result: %s" % qresult) self.assertEqual( qresult, (newsize * 1024 * 1024 * 1024), "Usage event not logged properly with right volume" " size please check ") except Exception as e: raise Exception("Warning: Exception while checking usage " "event for the root volume resize : %s" % e) except Exception as e: raise Exception( "Warning: Exception performing " "usage_events_after_rootvolume_resized Test : %s" % e)
def test_05_pt_startvm_false_attach_disk_change_SO(self): """ Positive test for stopped VM test path - T4 # 1. Deploy VM in the network with specifying startvm parameter # as False # 2. List VMs and verify that VM is in stopped state # 3. Create a data disk and attach it to VM # 4. Change the service offering of VM from small to medium, verify that the operation is successful # 5. Start the VM # 6. Now detach the disk from the VM # 7. Verify that attach and detach operations are successful """ disk_offering = DiskOffering.create( self.apiclient, self.testdata["disk_offering"] ) self.cleanup.append(disk_offering) volume = Volume.create( self.apiclient, self.testdata["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering.id ) self.cleanup.append(volume) # Create VM in account virtual_machine = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[self.networkid, ] if self.networkid else None, zoneid=self.zone.id, startvm=False, mode=self.zone.networktype ) self.cleanup.append(virtual_machine) response = virtual_machine.getState( self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) virtual_machine.attach_volume(self.userapiclient, volume=volume) volumes = Volume.list( self.userapiclient, virtualmachineid=virtual_machine.id, type="DATADISK", listall=True ) self.assertEqual( validateList(volumes)[0], PASS, "Volumes list validation failed" ) self.assertEqual( volumes[0].id, volume.id, "Listed Volume id not matching with attached volume id" ) # Change service offering of VM and verify that it is changed virtual_machine.change_service_offering( self.userapiclient, serviceOfferingId=self.service_offering_2.id ) response = VerifyChangeInServiceOffering( self, virtual_machine, self.service_offering_2 ) exceptionOccurred, exceptionMessage = response[0], response[1] self.assertFalse(exceptionOccurred, exceptionMessage) virtual_machine.detach_volume( self.userapiclient, volume) volumes = Volume.list( self.userapiclient, virtualmachineid=virtual_machine.id, type="DATADISK", listall=True ) self.assertEqual( validateList(volumes)[0], FAIL, "Detached volume should not be listed" ) return
def test_05_vmdeployment_with_size(self): """Test vm deployment with new rootdisk size parameter # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Verify the root disksize after deployment """ templateSize = (self.template.size / (1024**3)) newsize = templateSize + 2 # deploy a vm try: if self.updateclone: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual macine list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" % self.virtual_machine.id) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.cleanup.append(self.virtual_machine) vm = list_vms[0] ssh = SshClient(self.virtual_machine.ssh_ip, 22, "root", "password") newsize = newsize * 1024 * 1024 * 1024 list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") if vm.hypervisor.lower() == "xenserver": volume_name = "/dev/xvd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer" " volume_name: %s" % volume_name) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=newsize) elif vm.hypervisor.lower() == "kvm": volume_name = "/dev/vd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % volume_name) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=newsize) elif vm.hypervisor.lower() == "vmware": ret = checkVolumeSize(ssh_handle=ssh, volume_name="/dev/sdb", size_to_verify=newsize) self.debug(" Volume Size Expected %s" " Actual :%s" % (newsize, ret[1])) except Exception as e: raise Exception("Warning: Exception during" " VM deployment with new" " rootdisk parameter : %s" % e)
def test_08_pt_startvm_false_password_enabled_template(self): """ Positive test for stopped VM test path - T10 # 1 Create a password enabled template # 2. Deploy a new VM with password enabled template # 3. Verify that VM is in stopped state # 4. Start the VM, verify that it is in running state # 5. Verify that new password is generated for the VM """ if self.hypervisor.lower() in ['lxc']: self.skipTest( "feature is not supported in %s" % self.hypervisor) vm_for_template = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype, networkids=[self.networkid, ] if self.networkid else None) vm_for_template.password = self.testdata["virtual_machine"]["password"] ssh = vm_for_template.get_ssh_client() # below steps are required to get the new password from # VR(reset password) # http://cloudstack.org/dl/cloud-set-guest-password # Copy this file to /etc/init.d # chmod +x /etc/init.d/cloud-set-guest-password # chkconfig --add cloud-set-guest-password # similar steps to get SSH key from web so as to make it ssh enabled cmds = [ "cd /etc/init.d;wget http://people.apache.org/~tsp/\ cloud-set-guest-password", "chmod +x /etc/init.d/cloud-set-guest-password", "chkconfig --add cloud-set-guest-password"] for c in cmds: ssh.execute(c) # Stop virtual machine vm_for_template.stop(self.userapiclient) list_volume = Volume.list( self.userapiclient, virtualmachineid=vm_for_template.id, type='ROOT', listall=True) if isinstance(list_volume, list): self.volume = list_volume[0] else: raise Exception( "Exception: Unable to find root volume for VM: %s" % vm_for_template.id) self.testdata["template"]["ostype"] = self.testdata["ostype"] # Create templates for Edit, Delete & update permissions testcases pw_ssh_enabled_template = Template.create( self.userapiclient, self.testdata["template"], self.volume.id, account=self.account.name, domainid=self.account.domainid ) self.cleanup.append(pw_ssh_enabled_template) # Delete the VM - No longer needed vm_for_template.delete(self.apiclient) # Create VM in account virtual_machine = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, startvm=False, mode=self.zone.networktype, networkids=[self.networkid, ] if self.networkid else None ) self.cleanup.append(virtual_machine) response = virtual_machine.getState( self.apiclient, VirtualMachine.STOPPED) self.assertEqual(response[0], PASS, response[1]) virtual_machine.start(self.userapiclient) vms = virtual_machine.list( self.userapiclient, id=virtual_machine.id, listall=True) self.assertEqual( validateList(vms)[0], PASS, "vms list validation failed" ) self.assertNotEqual( str(vms[0].password), str(virtual_machine.password), "New password should be generated for the VM" ) return
def test_03_vmsnapshot__on_resized_rootvolume_vm(self): """Test vmsnapshot on resized root volume # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Perform(resize) of the root volume # 3. Perform VM snapshot on VM """ # deploy a vm try: if self.updateclone: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual macine list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" \ % self.virtual_machine.id ) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.cleanup.append(self.virtual_machine) vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") # get root vol from created vm, verify it is correct size list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") rootvolume = list_volume_response[0] newsize = (rootvolume.size >> 30) + 2 result = self.chk_volume_resize(self.apiclient, vm) if result: try: if 'kvm' in self.hypervisor.lower(): self.virtual_machine.stop(self.apiclient) virtualmachine_snapshot = VmSnapshot.create \ (self.apiclient, self.virtual_machine.id) virtulmachine_snapshot_list = VmSnapshot. \ list(self.apiclient, vmsnapshotid=virtualmachine_snapshot.id) status = validateList(virtulmachine_snapshot_list) self.assertEqual(PASS, status[0], "Listing of configuration failed") self.assertEqual( virtualmachine_snapshot.id, virtulmachine_snapshot_list[0].id, "Virtual Machine Snapshot id do not match") except Exception as e: raise Exception( "Issue CLOUDSTACK-10080: Exception while performing" " vmsnapshot: %s" % e) else: self.debug("volume resize failed for root volume") except Exception as e: raise Exception("Exception while performing" " vmsnapshot on resized volume Test: %s" % e)
def test_16_create_template_volume(self): """Test Create template from volume """ noffering = NetworkOffering.list( self.user_api_client, name="DefaultIsolatedNetworkOfferingWithSourceNatService") vm2network = Network.create(self.user_api_client, self.services["network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=noffering[0].id, zoneid=self.zone.id) list_nw_response = Network.list(self.user_api_client, id=vm2network.id) self.assertEqual(isinstance(list_nw_response, list), True, "Check list response returns a valid networks list") templatevm = VirtualMachine.create( self.user_api_client, self.services["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, networkids=vm2network.id, serviceofferingid=self.service_offering.id, mode=self.services['mode'], startvm="true") time.sleep(600) vm_response = VirtualMachine.list(self.user_api_client, id=templatevm.id) self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, 'Running', "Check the state of VM created from Template") templatevm.stop(self.user_api_client, forced="false") vm_response = VirtualMachine.list(self.user_api_client, id=templatevm.id) vm = vm_response[0] self.assertEqual( vm.state, 'Stopped', "Check the state of VM is in Stopped state before creating the Template" ) list_volume_response = Volume.list(self.user_api_client, virtualmachineid=vm.id, type="ROOT", listall=True) #Create template from Virtual machine and Volume ID roottemplate = Template.create( self.user_api_client, self.services["interop"]["template"], volumeid=list_volume_response[0].id, account=self.account.name, domainid=self.domain.id, ) time.sleep(600) list_template_response = Template.list( self.user_api_client, templatefilter=\ self.services["templatefilter"], id=roottemplate.id ) self.assertEqual(isinstance(list_template_response, list), True, "Check list response returns a valid list") #Verify template response to check whether template added successfully self.assertNotEqual(len(list_template_response), 0, "Check template available in List Templates") template_response = list_template_response[0] self.assertEqual(template_response.displaytext, self.services["interop"]["template"]["displaytext"], "Check display text of newly created template") name = template_response.name self.assertEqual( name.count(self.services["interop"]["template"]["name"]), 1, "Check name of newly created template") templatevm.delete(self.apiclient) vm2network.delete(self.user_api_client) vm3network = Network.create(self.user_api_client, self.services["network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=noffering[0].id, zoneid=self.zone.id) list_nw_response = Network.list(self.user_api_client, id=vm3network.id) self.assertEqual(isinstance(list_nw_response, list), True, "Check list response returns a valid networks list") templatevm = VirtualMachine.create( self.user_api_client, self.services["small"], templateid=roottemplate.id, networkids=vm3network.id, serviceofferingid=self.service_offering.id, accountid=self.account.name, domainid=self.account.domainid, mode=self.services['mode'], startvm="true") time.sleep(600) vm_response = VirtualMachine.list(self.user_api_client, id=templatevm.id) self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, 'Running', "Check the state of VM created from Template") # Delete the template roottemplate.delete(self.user_api_client) list_template_response = Template.list( self.user_api_client, templatefilter=\ self.services["template"]["templatefilter"], id=roottemplate.id, zoneid=self.zone.id ) self.assertEqual(list_template_response, None, "Check template available in List Templates") templatevm.delete(self.apiclient) vm3network.delete(self.user_api_client) return
def test_02_create__template_new_resized_rootvolume_size(self): """Test create Template resized root volume # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Perform(resize) of the root volume # 3. Stop the vm # 4. Create a template from resized root volume """ result = self.setupAccounts() self.assertEqual(result[0], PASS, result[1]) apiclient = self.testClient.getUserApiClient( UserName=self.parentd_admin.name, DomainName=self.parentd_admin.domain) self.assertNotEqual( apiclient, FAILED, "Failed to get api client\ of account: %s" % self.parentd_admin.name) # deploy a vm try: if self.updateclone: self.virtual_machine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual macine list_vms = VirtualMachine.list(apiclient, id=self.virtual_machine.id) self.debug("Verify listVirtualMachines response" " for virtual machine: %s" % self.virtual_machine.id) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.cleanup.append(self.virtual_machine) vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") self.assertEqual(vm.name, self.virtual_machine.name, "Virtual Machine names do not match") self.assertEqual(vm.state, "Running", msg="VM is not in Running state") # get root vol from created vm, verify it is correct size list_volume_response = Volume.list( apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") rootvolume = list_volume_response[0] newsize = (rootvolume.size >> 30) + 2 result = self.chk_volume_resize(apiclient, vm) if result: try: # create a template from stopped VM instances root volume if vm.state == "Running": self.virtual_machine.stop(apiclient) template_from_root = Template.create( apiclient, self.services["template"], volumeid=rootvolume.id, account=self.parentd_admin.name, domainid=self.parent_domain.id) list_template_response = Template.list( apiclient, id=template_from_root.id, templatefilter="all") res = validateList(list_template_response) self.assertNotEqual( res[2], INVALID_INPUT, "Check if template exists in ListTemplates") # Deploy new virtual machine using template self.virtual_machine2 = VirtualMachine.create( apiclient, self.services["virtual_machine"], templateid=template_from_root.id, accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, ) vm_response = VirtualMachine.list( apiclient, id=self.virtual_machine2.id, account=self.parentd_admin.name, domainid=self.parent_domain.id) res = validateList(vm_response) self.assertNotEqual( res[2], INVALID_INPUT, "Check for list VM response return valid list") self.cleanup.append(self.virtual_machine2) self.cleanup.reverse() vm2 = vm_response[0] self.assertEqual( vm2.state, 'Running', "Check the state of VM created from Template") list_volume_response = Volume.list(apiclient, virtualmachineid=vm2.id, type='ROOT', listall='True') self.assertEqual( list_volume_response[0].size, (newsize * 1024 * 1024 * 1024), "Check for root volume size not matched with template size" ) except Exception as e: raise Exception("Exception while resizing the " "root volume: %s" % e) else: self.debug(" volume resize failed for root volume") except Exception as e: raise Exception("Exception while performing" " template creation from " "resized_root_volume : %s" % e) return
def test_19_template_tag(self): """ Test creation, listing and deletion tag on templates """ if self.hypervisor.lower() in ['lxc']: self.skipTest( "template creation from volume feature is not supported on %s" % self.hypervisor.lower()) try: noffering = NetworkOffering.list( self.user_api_client, name="DefaultIsolatedNetworkOfferingWithSourceNatService") vm4network = Network.create(self.user_api_client, self.services["network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=noffering[0].id, zoneid=self.zone.id) list_nw_response = Network.list(self.user_api_client, id=vm4network.id) self.assertEqual( isinstance(list_nw_response, list), True, "Check list response returns a valid networks list") vm_1 = VirtualMachine.create( self.user_api_client, self.services["small"], templateid=self.template.id, networkids=vm4network.id, serviceofferingid=self.service_offering.id, accountid=self.account.name, domainid=self.account.domainid, mode=self.services['mode'], startvm="true") time.sleep(600) self.debug("Stopping the virtual machine: %s" % vm_1.name) # Stop virtual machine vm_1.stop(self.user_api_client) except Exception as e: self.fail("Failed to stop VM: %s" % e) timeout = self.services["timeout"] while True: list_volume = Volume.list(self.user_api_client, virtualmachineid=vm_1.id, type='ROOT', listall=True) if isinstance(list_volume, list): break elif timeout == 0: raise Exception("List volumes failed.") time.sleep(5) timeout = timeout - 1 self.volume = list_volume[0] self.debug("Creating template from ROOT disk of virtual machine: %s" % vm_1.name) # Create template from volume template = Template.create(self.user_api_client, self.services["template"], self.volume.id) self.cleanup.append(template) self.debug("Created the template(%s). Now restarting the userVm: %s" % (template.name, vm_1.name)) vm_1.start(self.user_api_client) self.debug("Creating a tag for the template") tag = Tag.create(self.user_api_client, resourceIds=template.id, resourceType='Template', tags={'OS': 'windows8'}) self.debug("Tag created: %s" % tag.__dict__) tags = Tag.list(self.user_api_client, listall=True, resourceType='Template', key='OS', value='windows8') self.assertEqual(isinstance(tags, list), True, "List tags should not return empty response") self.assertEqual(tags[0].value, 'windows8', 'The tag should have original value') Template.list( self.user_api_client, templatefilter=self.services["template"]["templatefilter"], listall=True, key='OS', value='windows8') self.debug("Deleting the created tag..") try: tag.delete(self.user_api_client, resourceIds=template.id, resourceType='Template', tags={'OS': 'windows8'}) except Exception as e: self.fail("Failed to delete the tag - %s" % e) self.debug("Verifying if tag is actually deleted!") tags = Tag.list(self.user_api_client, listall=True, resourceType='Template', key='OS', value='windows8') self.assertEqual(tags, None, "List tags should return empty response") return
def test_01_create__snapshot_new_resized_rootvolume_size(self): """Test create snapshot on resized root volume # Validate the following # 1. Deploy a VM without any disk offering (only root disk) # 2. Perform(resize) of the root volume # 3. Perform snapshot on resized volume """ # deploy a vm try: if self.updateclone: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.services_offering_vmware.id, mode=self.zone.networktype) else: self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype) # listVirtual machine list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" % self.virtual_machine.id) res = validateList(list_vms) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") self.assertEqual(vm.name, self.virtual_machine.name, "Virtual Machine names do not match") self.assertEqual(vm.state, "Running", msg="VM is not in Running state") result = self.chk_volume_resize(self.apiclient, vm) if result: # get root vol from created vm, verify it is correct size list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall='True') res = validateList(list_volume_response) self.assertNotEqual( res[2], INVALID_INPUT, "listVolumes returned invalid object in response") rootvolume = list_volume_response[0] self.debug("Creating a Snapshot from root volume: " "%s" % rootvolume.id) snapshot = Snapshot.create(self.apiclient, rootvolume.id, account=self.parentd_admin.name, domainid=self.parent_domain.id) snapshots = list_snapshots(self.apiclient, id=snapshot.id) res = validateList(snapshots) self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list response") self.assertEqual(snapshots[0].id, snapshot.id, "Check resource id in list resources call") else: self.debug("Volume resize is failed") except Exception as e: raise Exception("Exception while performing" " the snapshot on resized root volume" " test case: %s" % e) self.cleanup.append(self.virtual_machine) self.cleanup.append(snapshot) return
def test_01_create_volume(self): """Test Volume creation for all Disk Offerings (incl. custom) """ # Validate the following # 1. Create volumes from the different sizes # 2. Verify the size of volume with actual size allocated self.volumes = [] for k, v in self.services["volume_offerings"].items(): volume = Volume.create(self.apiClient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.debug("Created a volume with ID: %s" % volume.id) self.volumes.append(volume) if self.virtual_machine.hypervisor == "KVM": sparse_volume = Volume.create( self.apiClient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.sparse_disk_offering.id) self.debug("Created a sparse volume: %s" % sparse_volume.id) self.volumes.append(sparse_volume) volume = Volume.create_custom_disk( self.apiClient, self.services, account=self.account.name, domainid=self.account.domainid, ) self.debug("Created a volume with custom offering: %s" % volume.id) self.volumes.append(volume) #Attach a volume with different disk offerings #and check the memory allocated to each of them for volume in self.volumes: list_volume_response = Volume.list(self.apiClient, id=volume.id) self.assertEqual(isinstance(list_volume_response, list), True, "Check list response returns a valid list") self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiClient, volume) try: ssh = self.virtual_machine.get_ssh_client() self.debug("Rebooting VM %s" % self.virtual_machine.id) ssh.execute("reboot") except Exception as e: self.fail("SSH access failed for VM %s - %s" % (self.virtual_machine.ipaddress, e)) # Poll listVM to ensure VM is started properly timeout = self.services["timeout"] while True: time.sleep(self.services["sleep"]) # Ensure that VM is in running state list_vm_response = VirtualMachine.list( self.apiClient, id=self.virtual_machine.id) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Running': self.debug("VM state: %s" % vm.state) break if timeout == 0: raise Exception("Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 vol_sz = str(list_volume_response[0].size) ssh = self.virtual_machine.get_ssh_client(reconnect=True) # Get the updated volume information list_volume_response = Volume.list(self.apiClient, id=volume.id) if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower( ): volume_name = "/dev/xvd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer volume_name: %s" % (volume_name)) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) elif list_volume_response[0].hypervisor.lower() == "kvm": volume_name = "/dev/vd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % (volume_name)) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) else: ret = checkVolumeSize(ssh_handle=ssh, size_to_verify=vol_sz) self.debug(" Volume Size Expected %s Actual :%s" % (vol_sz, ret[1])) self.virtual_machine.detach_volume(self.apiClient, volume) self.assertEqual(ret[0], SUCCESS, "Check if promised disk size actually available") time.sleep(self.services["sleep"])
def test_01_snapshot_root_disk(self): """Test Snapshot Root Disk """ # Validate the following # 1. Account List should list the accounts that was existed. # 2. List Volumes # 3. Create Snapshot From the volume[0] from volume list # 4. List Snapshots # 5. Create Volume V1,V2,V3 from Snapshot List[0] # 6. Verify that Async Job id's status # 7. List all the volumes # 8. Add Volumes V1,V2,V3 to cleanup # 9. Check list response returns a valid list # 10. Check if result exists in list item call # 1. Account List should list the accounts that was existed. account_list = Account.list( self.apiclient, listAll = True, roleType ='Admin' ); # 2. List Volumes volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine_with_disk.id, type='ROOT', listall=True ) # 3. Create Snapshot From the volume[0] from volume list snapshot = Snapshot.create( self.apiclient, volumes[0].id, account=self.account.name, domainid=self.account.domainid ) #self._cleanup.append(snapshot) self.debug("Snapshot created: ID - %s" % snapshot.id) # 4. List Snapshots snapshots = list_snapshots( self.apiclient,listall=True ) # 5. Create Volume V1,V2,V3 from Snapshot List[0] services = {"diskname": "Vol", "zoneid": self.zone.id, "size": 10, "ispublic": True} vol1_jobId = self.create_from_snapshot( self.apiclient,snapshots[0].id, services, account_list[0].name, account_list[0].domainid ); vol2_jobId = self.create_from_snapshot( self.apiclient, snapshots[0].id, services, account_list[0].name, account_list[0].domainid ); vol3_jobId = self.create_from_snapshot( self.apiclient, snapshots[0].id, services, account_list[0].name, account_list[0].domainid ); # 6. Verify that Async Job id's status self.query_async_job(self.apiclient, vol1_jobId.jobid) self.query_async_job(self.apiclient, vol2_jobId.jobid) self.query_async_job(self.apiclient, vol3_jobId.jobid) # 7. List all the volumes list_volume_response = Volume.list( self.apiclient, type="DATADISK", account=account_list[0].name, domainid=account_list[0].domainid ) # 8. Add Volumes V1,V2,V3 to cleanup self.cleanup.append(list_volume_response[0]); self.cleanup.append(list_volume_response[1]); self.cleanup.append(list_volume_response[2]); # 9. Check list response returns a valid list self.assertEqual( isinstance(list_volume_response, list), True, "Check list response returns a valid list" ) # 10.Check if result exists in list item call self.assertNotEqual( list_volume_response, None, "Check if result exists in list item call" ) self.assertIsNotNone(snapshots[0].zoneid, "Zone id is not none in listSnapshots") self.assertEqual( snapshots[0].zoneid, self.zone.id, "Check zone id in the list snapshots" ) return
def get_root_device_uuid_for_vm(cls, vm_id, root_device_id): volumes = Volume.list(cls.api_client, virtualmachineid=vm_id, listall=True) return volumes[root_device_id].id
def setUpClass(cls): cls.testClient = super(TestTemplates, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls._cleanup = [] cls.unsupportedHypervisor = False cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: cls.unsupportedHypervisor = True return # populate second zone id for iso copy cmd = listZones.listZonesCmd() zones = cls.api_client.listZones(cmd) if not isinstance(zones, list): raise Exception("Failed to find zones.") if len(zones) >= 2: cls.services["destzoneid"] = zones[1].id template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["virtual_machine"]["zoneid"] = cls.zone.id try: cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) cls.services["account"] = cls.account.name cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls._cleanup.append(cls.service_offering) # create virtual machine cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) # Stop virtual machine cls.virtual_machine.stop(cls.api_client) timeout = cls.services["timeout"] while True: list_volume = Volume.list( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) if isinstance(list_volume, list): break elif timeout == 0: raise Exception("List volumes failed.") time.sleep(5) timeout = timeout - 1 cls.volume = list_volume[0] # Create template from volume cls.template = Template.create(cls.api_client, cls.services["template"], cls.volume.id) except Exception as e: cls.tearDownClass() raise unittest.SkipTest("Failure in setUpClass: %s" % e)
def test_02_concurrent_snapshot_global_limit(self): """ Test if global value concurrent.snapshots.threshold.perhost value is respected This is negative test cases and tests no more concurrent snapshots as specified in global value are created # 1. Read the global value for concurrent.snapshots.threshold.perhost # 2. If the value is Null, skip the test case # 3. Create an account and a VM in it # 4. Create more concurrent snapshots than specified in global allowed limit # 5. Verify that exception is raised while creating snapshots """ config = Configurations.list( self.apiclient, name="concurrent.snapshots.threshold.perhost") if config[0].value: self.assertEqual( isinstance(config, list), True, "concurrent.snapshots.threshold.perhost should be present\ in global config") concurrentSnapshots = int(config[0].value) else: self.skipTest("Skipping tests as the config value \ concurrent.snapshots.threshold.perhost is Null") # Create an account account = Account.create(self.apiclient, self.testdata["account"], domainid=self.domain.id) self.cleanup.append(account) # Create user api client of the account userapiclient = self.testClient.getUserApiClient( UserName=account.name, DomainName=account.domain) # Create VM virtual_machine = VirtualMachine.create( userapiclient, self.testdata["small"], templateid=self.template.id, accountid=account.name, domainid=account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id) # Step 1 # Get ROOT Volume Id volumes = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id, type='ROOT', listall=True) self.assertEqual( validateList(volumes)[0], PASS, "Volumes list validation failed") root_volume = volumes[0] threads = [] for i in range(0, (concurrentSnapshots + 1)): thread = Thread(target=self.createSnapshot, args=(self.apiclient, root_volume.id)) threads.append(thread) thread.start() for thread in threads: thread.join() self.assertTrue( self.exceptionOccured, "Concurrent snapshots\ more than concurrent.snapshots.threshold.perhost\ value successfully created") return