def test_02_list_multiple_aff_grps_for_vm(self): """ List multiple affinity groups associated with a vm for projects """ aff_grp_01 = self.create_aff_grp(self.account_api_client) aff_grp_02 = self.create_aff_grp(self.account_api_client) aff_grps_names = [aff_grp_01.name, aff_grp_02.name] vm, hostid = self.create_vm_in_aff_grps(ag_list=aff_grps_names) list_aff_grps = AffinityGroup.list(self.api_client, virtualmachineid=vm.id) list_aff_grps_names = [list_aff_grps[0].name, list_aff_grps[1].name] aff_grps_names.sort() list_aff_grps_names.sort() self.assertEqual( aff_grps_names, list_aff_grps_names, "One of the Affinity Groups is missing %s" % list_aff_grps_names) vm.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp_01) self.cleanup.append(aff_grp_02)
def test_07_list_all_vms_in_aff_grp(self): """ List affinity group should list all for a vms associated with that group for projects """ aff_grp = self.create_aff_grp(self.account_api_client) vm1, hostid1 = self.create_vm_in_aff_grps(ag_list=[aff_grp.name]) vm2, hostid2 = self.create_vm_in_aff_grps(ag_list=[aff_grp.name]) list_aff_grps = AffinityGroup.list(self.api_client, id=aff_grp.id, projectid=self.project.id) self.assertEqual(list_aff_grps[0].name, aff_grp.name, "Listing Affinity Group by id failed") self.assertEqual( list_aff_grps[0].virtualmachineIds[0], vm1.id, "List affinity group response.virtualmachineIds for group: %s doesn't contain vmid : %s" % (aff_grp.name, vm1.id)) self.assertEqual( list_aff_grps[0].virtualmachineIds[1], vm2.id, "List affinity group response.virtualmachineIds for group: %s doesn't contain vmid : %s" % (aff_grp.name, vm2.id)) vm1.delete(self.api_client) vm2.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp)
def test_02_deploy_vm_anti_affinity_group_fail_on_not_enough_hosts(self): """ test DeployVM in anti-affinity groups with more vms than hosts. """ hosts = list_hosts(self.api_client, type="routing") aff_grp = self.create_aff_grp(self.account_api_client) vms = [] for host in hosts: vms.append( self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grp.name])) vm_failed = None with self.assertRaises(Exception): vm_failed = self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grp.name]) self.assertEqual(len(hosts), len(vms), "Received %s and %s " % (hosts, vms)) if vm_failed: vm_failed.expunge(self.api_client) wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp)
def test_03_project_vmlifecycle_delete_instance(self): # Validate the following # 1. Assign account to projects and verify the resource updates # 2. Deploy VM with the accounts added to the project # 3. Destroy VM of an accounts added to the project # 4. Resource count should list as 0 after destroying the instance self.debug("Checking memory resource count for project: %s" % self.project.name) project_list = Project.list(self.apiclient, id=self.project.id, listall=True) self.debug(project_list) self.assertIsInstance(project_list, list, "List Projects should return a valid response") resource_count = project_list[0].memorytotal self.debug(resource_count) self.debug("Destroying instance: %s" % self.vm.name) try: self.vm.delete(self.apiclient) except Exception as e: self.fail("Failed to delete instance: %s" % e) # Wait for expunge interval to cleanup Memory wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.debug("Checking memory resource count for project: %s" % self.project.name) project_list = Project.list(self.apiclient, id=self.project.id, listall=True) self.assertIsInstance(project_list, list, "List Projects should return a valid response") resource_count_after_delete = project_list[0].memorytotal self.assertEqual( resource_count_after_delete, 0, "Resource count for %s should be 0" % get_resource_type(resource_id=9) ) # RAM return
def test_01_deploy_vm_anti_affinity_group(self): """ test DeployVM in anti-affinity groups deploy VM1 and VM2 in the same host-anti-affinity groups Verify that the vms are deployed on separate hosts """ aff_grp = self.create_aff_grp(self.account_api_client) vm1, hostid1 = self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grp.name]) vm2, hostid2 = self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grp.name]) self.assertNotEqual( hostid1, hostid2, msg= "Both VMs of affinity group %s are on the same host: %s , %s, %s, %s" % (aff_grp.name, vm1, hostid1, vm2, hostid2)) vm1.delete(self.api_client) vm2.delete(self.api_client) wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp)
def test_03_delete_instance(self): """Test Deploy VM with specified GB RAM & verify the usage""" # Validate the following # 1. Create compute offering with specified RAM & Deploy VM as root admin # 2. List Resource count for the root admin Memory usage # 3. Delete instance, resource count should be 0 after delete operation. account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response") resource_count = account_list[0].memorytotal expected_resource_count = int(self.services["service_offering"]["memory"]) self.assertEqual( resource_count, expected_resource_count, "Resource count should match with the expected resource count" ) self.debug("Destroying instance: %s" % self.vm.name) try: self.vm.delete(self.apiclient) except Exception as e: self.fail("Failed to delete instance: %s" % e) # Wait for expunge interval to cleanup Memory wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response") resource_count_after_delete = account_list[0].memorytotal self.assertEqual( resource_count_after_delete, 0, "Resource count for %s should be 0" % get_resource_type(resource_id=9) ) # RAM return
def test_02_pt_deploy_vm_with_startvm_true(self): """ Positive test for stopped VM test path - T1 variant # 1. Deploy VM in the network specifying startvm parameter as True # 2. List VMs and verify that VM is in running state # 3. Verify that router is in running state (Advanced zone) # 4. Add network rules for VM (done in base.py itself) to make # it accessible # 5. Verify that VM is accessible # 6. Destroy and expunge the VM # 7. Wait for network gc time interval and check that router is # in stopped state """ # Create VM in account virtual_machine = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[ self.networkid, ] if self.networkid else None, zoneid=self.zone.id, startvm=True, mode=self.zone.networktype) response = virtual_machine.getState(self.userapiclient, VirtualMachine.RUNNING) self.assertEqual(response[0], PASS, response[1]) if str(self.zone.networktype).lower() == "advanced": response = VerifyRouterState(self.apiclient, self.account.name, self.account.domainid, RUNNING) self.assertTrue(response[0], response[1]) # Check VM accessibility try: SshClient(host=virtual_machine.ssh_ip, port=self.testdata["natrule"]["publicport"], user=virtual_machine.username, passwd=virtual_machine.password) except Exception as e: self.fail("Exception while SSHing to VM: %s" % e) virtual_machine.delete(self.apiclient) if str(self.zone.networktype).lower() == "advanced": # Wait for router to get router in stopped state wait_for_cleanup(self.apiclient, ["network.gc.interval", "network.gc.wait"]) response = VerifyRouterState(self.apiclient, self.account.name, self.account.domainid, STOPPED, retries=10) self.assertTrue(response[0], response[1]) return
def tearDown(self): try: cleanup_resources(self.apiclient, self.cleanup) # Wait for Router cleanup before runnign further test case wait_for_cleanup(self.apiclient, ["network.gc.interval", "network.gc.wait"]) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def test_03_delete_vm(self): """Test Deploy VM with specified RAM & verify the usage""" # Validate the following # 1. Create compute offering with specified RAM & Deploy VM in the created domain # 2. List Resource count for the root admin Memory usage # 3. Delete vm, resource count should list as 0 after delete operation. # Resetting the memory count of service offering self.services["service_offering"]["memory"] = 2048 self.debug("Setting up account and domain hierarchy") self.setupAccounts() users = {self.child_domain_1: self.child_do_admin_1, self.child_domain_2: self.child_do_admin_2 } for domain, admin in users.items(): self.account = admin self.domain = domain self.debug("Creating an instance with service offering: %s" % self.service_offering.name) api_client = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) vm = self.createInstance(service_off=self.service_offering, api_client=api_client) account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response" ) resource_count = account_list[0].memorytotal expected_resource_count = int(self.services["service_offering"]["memory"]) self.assertEqual(resource_count, expected_resource_count, "Resource count should match with the expected resource count") self.debug("Destroying instance: %s" % vm.name) try: vm.delete(self.apiclient) except Exception as e: self.fail("Failed to delete instance: %s" % e) # Wait for expunge interval to cleanup Memory wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response" ) resource_count_after_delete = account_list[0].memorytotal self.assertEqual(resource_count_after_delete, 0, "Resource count for %s should be 0" % get_resource_type(resource_id=9)) # RAM return
def delete_VM(self, vm): self.debug('Deleting VM - %s' % vm.name) vm.delete(self.api_client) # Wait for expunge interval to cleanup VM wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) if vm in self.cleanup: self.cleanup.remove(vm) self.debug('Deleted VM - %s' % vm.name)
def test_03_delete_vm(self): """Test Deploy VM with specified RAM & verify the usage""" # Validate the following # 1. Create compute offering with specified RAM & Deploy VM in the created domain # 2. List Resource count for the root admin Memory usage # 3. Delete vm, resource count should list as 0 after delete operation. # Resetting the memory count of service offering self.services["service_offering"]["memory"] = 2048 self.debug("Setting up account and domain hierarchy") self.setupAccounts() users = { self.child_domain_1: self.child_do_admin_1, self.child_domain_2: self.child_do_admin_2 } for domain, admin in users.items(): self.account = admin self.domain = domain self.debug("Creating an instance with service offering: %s" % self.service_offering.name) api_client = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) vm = self.createInstance(service_off=self.service_offering, api_client=api_client) account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response" ) resource_count = account_list[0].memorytotal expected_resource_count = int(self.services["service_offering"]["memory"]) self.assertEqual(resource_count, expected_resource_count, "Resource count should match with the expected resource count") self.debug("Destroying instance: %s" % vm.name) try: vm.delete(self.apiclient) except Exception as e: self.fail("Failed to delete instance: %s" % e) # Wait for expunge interval to cleanup Memory wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response" ) resource_count_after_delete = account_list[0].memorytotal self.assertEqual(resource_count_after_delete, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM return
def delete_Network(self, network): self.debug('Deleting Network - %s' % network.name) # Wait for network garbage collection before network deletion wait_for_cleanup(self.api_client, ["network.gc.interval", "network.gc.wait"]) network.delete(self.api_client) if network in self.cleanup: self.cleanup.remove(network) self.debug('Deleted Network - %s' % network.name)
def delete_Network(self, network): self.debug('Deleting Network - %s' % network.name) # Wait for network garbage collection before network deletion wait_for_cleanup(self.api_client, ["network.gc.interval", "network.gc.wait"] ) network.delete(self.api_client) if network in self.cleanup: self.cleanup.remove(network) self.debug('Deleted Network - %s' % network.name)
def delete_VM(self, vm): self.debug('Deleting VM - %s' % vm.name) vm.delete(self.api_client) # Wait for expunge interval to cleanup VM wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"] ) if vm in self.cleanup: self.cleanup.remove(vm) self.debug('Deleted VM - %s' % vm.name)
def test_02_pt_deploy_vm_with_startvm_true(self): """ Positive test for stopped VM test path - T1 variant # 1. Deploy VM in the network specifying startvm parameter as True # 2. List VMs and verify that VM is in running state # 3. Verify that router is in running state (Advanced zone) # 4. Add network rules for VM (done in base.py itself) to make # it accessible # 5. Verify that VM is accessible # 6. Destroy and expunge the VM # 7. Wait for network gc time interval and check that router is # in stopped state """ # Create VM in account virtual_machine = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[self.networkid] if self.networkid else None, zoneid=self.zone.id, startvm=True, mode=self.zone.networktype, ) response = virtual_machine.getState(self.userapiclient, VirtualMachine.RUNNING) self.assertEqual(response[0], PASS, response[1]) if str(self.zone.networktype).lower() == "advanced": response = VerifyRouterState(self.apiclient, self.account.name, self.account.domainid, RUNNING) self.assertTrue(response[0], response[1]) # Check VM accessibility try: SshClient( host=virtual_machine.ssh_ip, port=self.testdata["natrule"]["publicport"], user=virtual_machine.username, passwd=virtual_machine.password, ) except Exception as e: self.fail("Exception while SSHing to VM: %s" % e) virtual_machine.delete(self.apiclient) if str(self.zone.networktype).lower() == "advanced": # Wait for router to get router in stopped state wait_for_cleanup(self.apiclient, ["network.gc.interval", "network.gc.wait"]) response = VerifyRouterState(self.apiclient, self.account.name, self.account.domainid, STOPPED, retries=10) self.assertTrue(response[0], response[1]) return
def test_02_accountSnapshotClean(self): """Test snapshot cleanup after account deletion """ # Validate the following # 1. listAccounts API should list out the newly created account # 2. listVirtualMachines() command should return the deployed VM. # State of this VM should be "Running" # 3. a)listSnapshots should list the snapshot that was created. # b)verify that secondary storage NFS share contains the reqd volume # under /secondary/snapshots/$accountid/$volumeid/$snapshot_id # 4. a)listAccounts should not list account that is deleted # b) snapshot image($snapshot_id) should be deleted from the # /secondary/snapshots/$accountid/$volumeid/ try: accounts = list_accounts(self.apiclient, id=self.account.id) self.assertEqual(isinstance(accounts, list), True, "Check list response returns a valid list") self.assertNotEqual(len(accounts), 0, "Check list Accounts response") # Verify the snapshot was created or not snapshots = list_snapshots(self.apiclient, id=self.snapshot.id) self.assertEqual(isinstance(snapshots, list), True, "Check list response returns a valid list") self.assertNotEqual(snapshots, None, "No such snapshot %s found" % self.snapshot.id) self.assertEqual(snapshots[0].id, self.snapshot.id, "Check snapshot id in list resources call") self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was not found on NFS") except Exception as e: self._cleanup.append(self.account) self.fail("Exception occured: %s" % e) self.debug("Deleting account: %s" % self.account.name) # Delete account self.account.delete(self.apiclient) # Wait for account cleanup interval wait_for_cleanup(self.apiclient, configs=["account.cleanup.interval"]) with self.assertRaises(Exception): accounts = list_accounts(self.apiclient, id=self.account.id) self.assertFalse( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was still found on NFS after account gc") return
def tearDownClass(cls): try: #Delete the host tags Host.update(cls.api_client, id=cls.hosts[0].id, hosttags="") Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="") cls.account.delete(cls.api_client) wait_for_cleanup(cls.api_client, ["account.cleanup.interval"]) #Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) cls.vpc_off.delete(cls.api_client) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def tearDownClass(cls): try: # Delete the host tags Host.update(cls.api_client, id=cls.hosts[0].id, hosttags="") Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="") cls.account.delete(cls.api_client) wait_for_cleanup(cls.api_client, ["account.cleanup.interval"]) # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) cls.vpc_off.delete(cls.api_client) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def test_01_update_aff_grp_by_ids(self): """ Update the list of affinityGroups by using affinity groupids """ aff_grp1 = self.create_aff_grp(self.account_api_client) aff_grp2 = self.create_aff_grp(self.account_api_client) vm1, hostid1 = self.create_vm_in_aff_grps(ag_list=[aff_grp1.name]) vm2, hostid2 = self.create_vm_in_aff_grps(ag_list=[aff_grp1.name]) vm1.stop(self.api_client) list_aff_grps = AffinityGroup.list(self.api_client, projectid=self.project.id) self.assertEqual(len(list_aff_grps), 2, "2 affinity groups should be present") vm1.update_affinity_group( self.api_client, affinitygroupids=[list_aff_grps[0].id, list_aff_grps[1].id]) list_aff_grps = AffinityGroup.list(self.api_client, virtualmachineid=vm1.id) list_aff_grps_names = [list_aff_grps[0].name, list_aff_grps[1].name] aff_grps_names = [aff_grp1.name, aff_grp2.name] aff_grps_names.sort() list_aff_grps_names.sort() self.assertEqual( aff_grps_names, list_aff_grps_names, "One of the Affinity Groups is missing %s" % list_aff_grps_names) vm1.start(self.api_client) vm_status = VirtualMachine.list(self.api_client, id=vm1.id) self.assertNotEqual( vm_status[0].hostid, hostid2, "The virtual machine started on host %s violating the host anti-affinity rule" % vm_status[0].hostid) vm1.delete(self.api_client) vm2.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) aff_grp1.delete(self.api_client) aff_grp2.delete(self.api_client)
def tearDown(self): try: if self.egressruleid: self.debug('remove egress rule id=%s' % self.egressruleid) self.deleteEgressRule() self.debug("Cleaning up the resources") #below components is not a part of cleanup because to mandate the order and to cleanup network try: for vm in self.cleanup_vms: if str(vm.state).lower() != "error": vm.delete(self.api_client) except Exception as e: self.fail( "Warning: Exception during virtual machines cleanup : %s" % e) # Wait for VMs to expunge wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) if len(self.cleanup_vms) > 0: retriesCount = 10 while True: vms = list_virtual_machines(self.api_client, id=self.virtual_machine.id) if vms is None: break elif retriesCount == 0: self.fail("Failed to expunge vm even after 10 minutes") time.sleep(60) retriesCount -= 1 try: for network in self.cleanup_networks: network.delete(self.api_client) except Exception as e: self.fail("Warning: Exception during networks cleanup : %s" % e) self.debug("Sleep for Network cleanup to complete.") wait_for_cleanup(self.apiclient, ["network.gc.wait", "network.gc.interval"]) cleanup_resources(self.apiclient, reversed(self.cleanup)) self.debug("Cleanup complete!") except Exception as e: self.fail("Warning! Cleanup failed: %s" % e)
def test_01_deploy_vm_anti_affinity_group(self): """ test DeployVM in anti-affinity groups deploy VM1 and VM2 in the same host-anti-affinity groups Verify that the vms are deployed on separate hosts """ aff_grp = self.create_aff_grp(self.account_api_client) vm1, hostid1 = self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grp.name]) vm2, hostid2 = self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grp.name]) self.assertNotEqual(hostid1, hostid2, msg="Both VMs of affinity group %s are on the same host: %s , %s, %s, %s" % (aff_grp.name, vm1, hostid1, vm2, hostid2)) vm1.delete(self.api_client) vm2.delete(self.api_client) wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp)
def test_01_list_aff_grps_for_vm(self): """ List affinity group for a vm for projects """ aff_grps = [] aff_grps.append(self.create_aff_grp(self.domain_api_client, projectid=self.project.id)) vm, hostid = self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grps[0].name]) list_aff_grps = AffinityGroup.list(self.api_client,virtualmachineid=vm.id) self.assertEqual(list_aff_grps[0].name, aff_grps[0].name,"Listing Affinity Group by VM id failed") self.assertEqual(list_aff_grps[0].projectid, self.project.id,"Listing Affinity Group by VM id failed, vm was not in project") vm.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grps[0])
def tearDown(self): try: if self.egressruleid: self.debug('remove egress rule id=%s' % self.egressruleid) self.deleteEgressRule() self.debug("Cleaning up the resources") #below components is not a part of cleanup because to mandate the order and to cleanup network try: for vm in self.cleanup_vms: if str(vm.state).lower() != "error": vm.delete(self.api_client) except Exception as e: self.fail("Warning: Exception during virtual machines cleanup : %s" % e) # Wait for VMs to expunge wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) if len(self.cleanup_vms) > 0: retriesCount = 10 while True: vms = list_virtual_machines(self.api_client, id=self.virtual_machine.id) if vms is None: break elif retriesCount == 0: self.fail("Failed to expunge vm even after 10 minutes") time.sleep(60) retriesCount -= 1 try: for network in self.cleanup_networks: network.delete(self.api_client) except Exception as e: self.fail("Warning: Exception during networks cleanup : %s" % e) self.debug("Sleep for Network cleanup to complete.") wait_for_cleanup(self.apiclient, ["network.gc.wait", "network.gc.interval"]) cleanup_resources(self.apiclient, reversed(self.cleanup)) self.debug("Cleanup complete!") except Exception as e: self.fail("Warning! Cleanup failed: %s" % e)
def test_02_deploy_vm_anti_affinity_group_fail_on_not_enough_hosts(self): """ test DeployVM in anti-affinity groups with more vms than hosts. """ hosts = list_hosts(self.api_client, type="routing") aff_grp = self.create_aff_grp(self.account_api_client) vms = [] for host in hosts: vms.append(self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grp.name])) vm_failed = None with self.assertRaises(Exception): vm_failed = self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grp.name]) self.assertEqual(len(hosts), len(vms), "Received %s and %s " % (hosts, vms)) if vm_failed: vm_failed.expunge(self.api_client) wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp)
def test_03_project_vmlifecycle_delete_instance(self): # Validate the following # 1. Assign account to projects and verify the resource updates # 2. Deploy VM with the accounts added to the project # 3. Destroy VM of an accounts added to the project # 4. Resource count should list as 0 after destroying the instance self.debug("Checking memory resource count for project: %s" % self.project.name) project_list = Project.list(self.apiclient, id=self.project.id, listall=True) self.debug(project_list) self.assertIsInstance(project_list, list, "List Projects should return a valid response") resource_count = project_list[0].memorytotal self.debug(resource_count) self.debug("Destroying instance: %s" % self.vm.name) try: self.vm.delete(self.apiclient) except Exception as e: self.fail("Failed to delete instance: %s" % e) # Wait for expunge interval to cleanup Memory wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.debug("Checking memory resource count for project: %s" % self.project.name) project_list = Project.list(self.apiclient, id=self.project.id, listall=True) self.assertIsInstance(project_list, list, "List Projects should return a valid response") resource_count_after_delete = project_list[0].memorytotal self.assertEqual(resource_count_after_delete, 0, "Resource count for %s should be 0" % get_resource_type(resource_id=9)) #RAM return
def test_01_update_aff_grp_by_ids(self): """ Update the list of affinityGroups by using affinity groupids """ aff_grp1 = self.create_aff_grp(self.account_api_client) aff_grp2 = self.create_aff_grp(self.account_api_client) vm1, hostid1 = self.create_vm_in_aff_grps(ag_list=[aff_grp1.name]) vm2, hostid2 = self.create_vm_in_aff_grps(ag_list=[aff_grp1.name]) vm1.stop(self.api_client) list_aff_grps = AffinityGroup.list(self.api_client, projectid=self.project.id) self.assertEqual(len(list_aff_grps), 2 , "2 affinity groups should be present") vm1.update_affinity_group(self.api_client,affinitygroupids=[list_aff_grps[0].id,list_aff_grps[1].id]) list_aff_grps = AffinityGroup.list(self.api_client,virtualmachineid=vm1.id) list_aff_grps_names = [list_aff_grps[0].name, list_aff_grps[1].name] aff_grps_names = [aff_grp1.name, aff_grp2.name] aff_grps_names.sort() list_aff_grps_names.sort() self.assertEqual(aff_grps_names, list_aff_grps_names,"One of the Affinity Groups is missing %s" % list_aff_grps_names) vm1.start(self.api_client) vm_status = VirtualMachine.list(self.api_client, id=vm1.id) self.assertNotEqual(vm_status[0].hostid, hostid2, "The virtual machine started on host %s violating the host anti-affinity rule" %vm_status[0].hostid) vm1.delete(self.api_client) vm2.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) aff_grp1.delete(self.api_client) aff_grp2.delete(self.api_client)
def test_07_list_all_vms_in_aff_grp(self): """ List affinity group should list all for a vms associated with that group for projects """ aff_grp = self.create_aff_grp(self.account_api_client) vm1, hostid1 = self.create_vm_in_aff_grps(ag_list=[aff_grp.name]) vm2, hostid2 = self.create_vm_in_aff_grps(ag_list=[aff_grp.name]) list_aff_grps = AffinityGroup.list(self.api_client, id=aff_grp.id, projectid=self.project.id) self.assertEqual(list_aff_grps[0].name, aff_grp.name, "Listing Affinity Group by id failed") self.assertEqual(list_aff_grps[0].virtualmachineIds[0], vm1.id, "List affinity group response.virtualmachineIds for group: %s doesn't contain vmid : %s" % (aff_grp.name, vm1.id)) self.assertEqual(list_aff_grps[0].virtualmachineIds[1], vm2.id, "List affinity group response.virtualmachineIds for group: %s doesn't contain vmid : %s" % (aff_grp.name, vm2.id)) vm1.delete(self.api_client) vm2.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp)
def test_03_delete_instance(self): """Test Deploy VM with specified GB RAM & verify the usage""" # Validate the following # 1. Create compute offering with specified RAM & Deploy VM as root admin # 2. List Resource count for the root admin Memory usage # 3. Delete instance, resource count should be 0 after delete operation. account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response") resource_count = account_list[0].memorytotal expected_resource_count = int( self.services["service_offering"]["memory"]) self.assertEqual( resource_count, expected_resource_count, "Resource count should match with the expected resource count") self.debug("Destroying instance: %s" % self.vm.name) try: self.vm.delete(self.apiclient) except Exception as e: self.fail("Failed to delete instance: %s" % e) # Wait for expunge interval to cleanup Memory wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response") resource_count_after_delete = account_list[0].memorytotal self.assertEqual(resource_count_after_delete, 0, "Resource count for %s should be 0" % get_resource_type(resource_id=9)) #RAM return
def test_01_list_aff_grps_for_vm(self): """ List affinity group for a vm for projects """ aff_grps = [] aff_grps.append( self.create_aff_grp(self.domain_api_client, projectid=self.project.id)) vm, hostid = self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grps[0].name]) list_aff_grps = AffinityGroup.list(self.api_client, virtualmachineid=vm.id) self.assertEqual(list_aff_grps[0].name, aff_grps[0].name, "Listing Affinity Group by VM id failed") self.assertEqual( list_aff_grps[0].projectid, self.project.id, "Listing Affinity Group by VM id failed, vm was not in project") vm.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grps[0])
def test_02_list_multiple_aff_grps_for_vm(self): """ List multiple affinity groups associated with a vm for projects """ aff_grp_01 = self.create_aff_grp(self.account_api_client) aff_grp_02 = self.create_aff_grp(self.account_api_client) aff_grps_names = [aff_grp_01.name, aff_grp_02.name] vm, hostid = self.create_vm_in_aff_grps(ag_list=aff_grps_names) list_aff_grps = AffinityGroup.list(self.api_client, virtualmachineid=vm.id) list_aff_grps_names = [list_aff_grps[0].name, list_aff_grps[1].name] aff_grps_names.sort() list_aff_grps_names.sort() self.assertEqual(aff_grps_names, list_aff_grps_names,"One of the Affinity Groups is missing %s" % list_aff_grps_names) vm.delete(self.api_client) #Wait for expunge interval to cleanup VM wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) self.cleanup.append(aff_grp_01) self.cleanup.append(aff_grp_02)
def test_02_accountSnapshotClean(self): """Test snapshot cleanup after account deletion """ # Validate the following # 1. listAccounts API should list out the newly created account # 2. listVirtualMachines() command should return the deployed VM. # State of this VM should be "Running" # 3. a)listSnapshots should list the snapshot that was created. # b)verify that secondary storage NFS share contains the reqd volume # under /secondary/snapshots/$accountid/$volumeid/$snapshot_id # 4. a)listAccounts should not list account that is deleted # b) snapshot image($snapshot_id) should be deleted from the # /secondary/snapshots/$accountid/$volumeid/ try: accounts = list_accounts( self.apiclient, id=self.account.id ) self.assertEqual( isinstance(accounts, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(accounts), 0, "Check list Accounts response" ) # Verify the snapshot was created or not snapshots = list_snapshots( self.apiclient, id=self.snapshot.id ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "No such snapshot %s found" % self.snapshot.id ) self.assertEqual( snapshots[0].id, self.snapshot.id, "Check snapshot id in list resources call" ) self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was not found on NFS") except Exception as e: self._cleanup.append(self.account) self.fail("Exception occured: %s" % e) self.debug("Deleting account: %s" % self.account.name) # Delete account self.account.delete(self.apiclient) # Wait for account cleanup interval wait_for_cleanup(self.apiclient, configs=["account.cleanup.interval"]) with self.assertRaises(Exception): accounts = list_accounts( self.apiclient, id=self.account.id ) self.assertFalse(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was still found on NFS after account gc") return
def test_forceDeleteDomain(self): """ Test delete domain with force option""" # Steps for validations # 1. create a domain DOM # 2. create 2 users under this domain # 3. deploy 1 VM into each of these user accounts # 4. create PF / FW rules for port 22 on these VMs for their # respective accounts # 5. delete the domain with force=true option # Validate the following # 1. listDomains should list the created domain # 2. listAccounts should list the created accounts # 3. listvirtualmachines should show the Running VMs # 4. PF and FW rules should be shown in listFirewallRules # 5. domain should delete successfully and above three list calls # should show all the resources now deleted. listRouters should # not return any routers in the deleted accounts/domains self.debug("Creating a domain for login with API domain test") domain = Domain.create( self.apiclient, self.services["domain"], parentdomainid=self.domain.id ) self.debug("Domain is created succesfully.") self.debug( "Checking if the created domain is listed in list domains API") domains = Domain.list(self.apiclient, id=domain.id, listall=True) self.assertEqual( isinstance(domains, list), True, "List domains shall return a valid response" ) self.debug("Creating 2 user accounts in domain: %s" % domain.name) self.account_1 = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) self.account_2 = Account.create( self.apiclient, self.services["account"], domainid=domain.id ) try: self.debug("Creating a tiny service offering for VM deployment") self.service_offering = ServiceOffering.create( self.apiclient, self.services["service_offering"], domainid=self.domain.id ) self.debug("Deploying virtual machine in account 1: %s" % self.account_1.name) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account_1.name, domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deploying virtual machine in account 2: %s" % self.account_2.name) VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account_2.name, domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) networks = Network.list( self.apiclient, account=self.account_1.name, domainid=self.account_1.domainid, listall=True ) self.assertEqual( isinstance(networks, list), True, "List networks should return a valid response" ) network_1 = networks[0] self.debug("Default network in account 1: %s is %s" % ( self.account_1.name, network_1.name)) src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=network_1.id, account=self.account_1.name, domainid=self.account_1.domainid, listall=True, issourcenat=True, ) self.assertEqual( isinstance(src_nat_list, list), True, "List Public IP should return a valid source NAT" ) self.assertNotEqual( len(src_nat_list), 0, "Length of response from listPublicIp should not be 0" ) src_nat = src_nat_list[0] self.debug( "Trying to create a port forwarding rule in source NAT: %s" % src_nat.ipaddress) #Create NAT rule nat_rule = NATRule.create( self.apiclient, vm_1, self.services["natrule"], ipaddressid=src_nat.id ) self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress) nat_rules = NATRule.list(self.apiclient, id=nat_rule.id) self.assertEqual( isinstance(nat_rules, list), True, "List NAT should return a valid port forwarding rules" ) self.assertNotEqual( len(nat_rules), 0, "Length of response from listLbRules should not be 0" ) except Exception as e: self._cleanup.append(self.account_1) self._cleanup.append(self.account_2) self.fail(e) self.debug("Deleting domain with force option") try: domain.delete(self.apiclient, cleanup=True) except Exception as e: self.debug("Waiting for account.cleanup.interval" + " to cleanup any remaining resouces") # Sleep 3*account.gc to ensure that all resources are deleted wait_for_cleanup(self.apiclient, ["account.cleanup.interval"]*3) with self.assertRaises(CloudstackAPIException): Domain.list( self.apiclient, id=domain.id, listall=True ) self.debug("Checking if the resources in domain are deleted") with self.assertRaises(CloudstackAPIException): Account.list( self.apiclient, name=self.account_1.name, domainid=self.account_1.domainid, listall=True ) return