def test_08_vcpolicy_tag_to_reverted_disk(self): tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine2.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine2.id, listall=True) vm_tags = vm[0].tags volume = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine2.id, listall=True, type="ROOT") self.vc_policy_tags(volume, vm_tags, vm) snapshot = Snapshot.create(self.apiclient, volume[0].id, account=self.account.name, domainid=self.account.domainid) virtual_machine = self.virtual_machine2.stop(self.apiclient, forced=True) cmd = revertSnapshot.revertSnapshotCmd() cmd.id = snapshot.id revertedn = self.apiclient.revertSnapshot(cmd) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine2.id) vm_tags = vm[0].tags vol = list_volumes(self.apiclient, id=snapshot.volumeid, listall=True) self.vc_policy_tags(vol, vm_tags, vm)
def test_25_vc_policy_attach_vol_global_id_vm_uuid(self): tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine4.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine4.id) vm_tags = vm[0].tags volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine4.id, ) self.assertTrue(len(volumes) == 1, "Volume length should be == 1") for v in volumes: self.helper.vc_policy_tags_global_id(v, vm_tags, False) volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine4.attach_volume(self.apiclient, volume) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine4.id) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine4.id, id=volume.id) self.assertTrue(len(volumes) == 1, "Volume length should be == 1") self.helper.vc_policy_tags_global_id(volumes[0], vm_tags, False) self._cleanup.append(volume)
def test_01_add_vm_to_subdomain(self): """ Test Sub domain allowed to launch VM when a Domain level zone is created""" # Validate the following # 1. Verify VM created by Account_1 is in Running state # 2. Verify VM created by Account_2 is in Running state vm_response = list_virtual_machines( self.apiclient, id=self.vm_1.id ) self.assertEqual( isinstance(vm_response, list), True, "Check List VM for a valid response" ) self.assertNotEqual( len(vm_response), 0, "Check List Template response" ) for vm in vm_response: self.debug("VM ID: %s and state: %s" % (vm.id, vm.state)) self.assertEqual( vm.state, 'Running', "Check State of Virtual machine" ) vm_response = list_virtual_machines( self.apiclient, id=self.vm_2.id ) self.assertNotEqual( len(vm_response), 0, "Check List Template response" ) for vm in vm_response: self.debug("VM ID: %s and state: %s" % (vm.id, vm.state)) self.assertEqual( vm.state, 'Running', "Check State of Virtual machine" ) return
def check_vm_is_moved_in_account_domainid(self, account): list_vm_response = list_virtual_machines(self.api_client, id=self.virtual_machine.id, account=account.name, domainid=account.domainid) self.debug('VM=%s moved to account=%s and domainid=%s' % (list_vm_response, account.name, account.domainid)) self.assertNotEqual(len(list_vm_response), 0, 'Unable to move VM to account=%s domainid=%s' % (account.name, account.domainid))
def _get_vm(self, vm_id): list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg) return list_vms_response[0]
def test_02_set_vcpolicy_tag_to_attached_disk(self): """ Test set vc-policy tag to new disk attached to VM""" volume_attached = self.virtual_machine.attach_volume( self.apiclient, self.volume_2) volume = list_volumes(self.apiclient, id=volume_attached.id, listall=True) name = volume[0].path.split("/")[3] sp_volume = self.spapi.volumeList(volumeName="~" + name) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id, listall=True) vm_tags = vm[0].tags for vm_tag in vm_tags: for sp_tag in sp_volume[0].tags: if sp_tag == vm_tag.key: self.assertEqual( sp_tag, vm_tag.key, "StorPool tag is not the same as the Virtual Machine tag" ) self.assertEqual( sp_volume[0].tags[sp_tag], vm_tag.value, "StorPool tag value is not the same as the Virtual Machine tag value" ) if sp_tag == 'cvm': self.assertEqual(sp_volume[0].tags[sp_tag], vm[0].id, "cvm tag is not the expected value")
def test_delete_network_while_vm_on_it(self): """It verifies the user is not able to delete network which has running vms""" # Validate the following: # 1. Deploys a VM # 2. Tries to delete network and expects exception to appear self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, serviceofferingid=self.service_offering.id, networkids=self.l2_network.id, zoneid=self.zone.id) self.cleanup.insert(0, self.virtual_machine) list_vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual(isinstance(list_vm, list), True, "Check if virtual machine is present") try: self.l2_network.delete(self.apiclient) except Exception: pass else: self.fail("Expected an exception to be thrown, failing") return
def test_deploy_vm_l2network(self): """Creates an l2 network and verifies user is able to deploy a VM in it""" # Validate the following: # 1. Deploys a VM # 2. There are no network services available since this is L2 Network self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, serviceofferingid=self.service_offering.id, networkids=self.l2_network.id, zoneid=self.zone.id) self.cleanup.insert(0, self.virtual_machine) list_vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual(isinstance(list_vm, list), True, "Check if virtual machine is present") self.assertEqual(list_vm[0].nic[0].type, 'L2', "Check Correct Network type is available") self.assertFalse('gateway' in str(list_vm[0].nic[0])) self.assertFalse('ipaddress' in str(list_vm[0].nic[0])) return
def test_05_set_vcpolicy_tag_with_admin_and_try_delete_with_user(self): ''' Test set vc-policy tag to VM with one attached disk ''' tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) self.debug( '######################### test_05_set_vcpolicy_tag_with_admin_and_try_delete_with_user tags ######################### ' ) vm = list_virtual_machines(self.userapiclient, id=self.virtual_machine.id) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) self.debug( '######################### test_01_set_vcpolicy_tag_to_vm_with_attached_disks tags ######################### ' ) self.vc_policy_tags(volumes, vm_tags, vm) try: Tag.delete(self.userapiclient, resourceIds=self.virtual_machine.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) except Exception as e: self.debug( "##################### test_05_set_vcpolicy_tag_with_admin_and_try_delete_with_user %s " % e)
def create_vm_in_aff_grps(self, api_client=None, ag_list=[], projectid=None): self.debug('Creating VM in AffinityGroups=%s' % ag_list) if api_client is None: api_client = self.api_client if projectid is None: projectid = self.project.id vm = VirtualMachine.create( api_client, self.services["virtual_machine"], projectid=projectid, templateid=self.template.id, serviceofferingid=self.service_offering.id, affinitygroupnames=ag_list ) self.debug('Created VM=%s in Affinity Group=%s' % (vm.id, tuple(ag_list))) list_vm = list_virtual_machines(self.api_client, id=vm.id, projectid=projectid) self.assertEqual(isinstance(list_vm, list), True,"Check list response returns an invalid list %s" % list_vm) self.assertNotEqual(len(list_vm),0, "Check VM available in TestDeployVMAffinityGroups") self.assertEqual(list_vm[0].id, vm.id,"Listed vm does not have the same ids") vm_response = list_vm[0] self.assertEqual(vm.state, 'Running',msg="VM is not in Running state") self.assertEqual(vm.projectid, projectid,msg="VM is not in project") self.assertNotEqual(vm_response.hostid, None, "Host id was null for vm %s" % vm_response) return vm, vm_response.hostid
def create_vm(self, pfrule=False, egress_policy=True, RR=False): self.create_network_offering(egress_policy, RR) # Creating network using the network offering created self.debug("Creating network with network offering: %s" % self.network_offering.id) self.network = Network.create( self.apiclient, self.services["network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering.id, zoneid=self.zone.id, ) self.debug("Created network with ID: %s" % self.network.id) self.debug("Deploying instance in the account: %s" % self.account.name) project = None self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype if pfrule else "basic", networkids=[str(self.network.id)], projectid=project.id if project else None, ) self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id, self.account.name)) # Checking if VM is running or not, in case it is deployed in error state, test case fails self.vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual(validateList(self.vm_list)[0], PASS, "vm list validation failed, vm list is %s" % self.vm_list) self.assertEqual( str(self.vm_list[0].state).lower(), "running", "VM state should be running, it is %s" % self.vm_list[0].state, ) self.public_ip = PublicIPAddress.create( self.apiclient, accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, networkid=self.network.id, ) # Open up firewall port for SSH FireWallRule.create( self.apiclient, ipaddressid=self.public_ip.ipaddress.id, protocol=self.services["natrule"]["protocol"], cidrlist=["0.0.0.0/0"], startport=self.services["natrule"]["publicport"], endport=self.services["natrule"]["publicport"], ) self.debug("Creating NAT rule for VM ID: %s" % self.virtual_machine.id) # Create NAT rule NATRule.create(self.apiclient, self.virtual_machine, self.services["natrule"], self.public_ip.ipaddress.id) return
def create_vm_in_aff_grps(self, api_client=None, ag_list=[], projectid=None): self.debug('Creating VM in AffinityGroups=%s' % ag_list) if api_client is None: api_client = self.api_client if projectid is None: projectid = self.project.id vm = VirtualMachine.create(api_client, self.services["virtual_machine"], projectid=projectid, templateid=self.template.id, serviceofferingid=self.service_offering.id, affinitygroupnames=ag_list) self.debug('Created VM=%s in Affinity Group=%s' % (vm.id, tuple(ag_list))) list_vm = list_virtual_machines(api_client, id=vm.id, projectid=projectid) self.assertEqual(isinstance(list_vm, list), True, "Check list response returns a valid list") self.assertNotEqual(len(list_vm), 0, "Check VM available in List Virtual Machines") vm_response = list_vm[0] self.assertEqual(vm_response.state, 'Running', msg="VM is not in Running state") self.assertEqual(vm_response.projectid, projectid, msg="VM is not in project") return vm, vm_response.hostid
def test_08_removeNic_in_sharedNetwork_scope_all_as_domain_parentAdmin( self): """Validate that Parent domain admin is able to remove a NIC which is added by child domain user """ self.api_client.connection.apiKey = self.user_d1_apikey self.api_client.connection.securityKey = self.user_d1_secretkey self.debug("Removing NIC od shared Network as user d1") vm_list = list_virtual_machines(self.api_client, id=self.vmvpc1.id) vm_list_validation_result = validateList(vm_list) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) self.debug("virtual machine nics: %s" % vm_list[0].nic) for nic in vm_list[0].nic: if nic.networkid == self.shared_network_all.id: reqNic = nic self.vmvpc1.remove_nic(self.api_client, reqNic.id) if not self.verify_nic(self.shared_network_all, self.vmvpc1): self.debug("virtual machine has mot NIC is SharedNetwork: %s" % self.shared_network_all.name) else: self.fail("network %s NIC is present in the virtual Machine %s" % (self.shared_network_all.name, self.vmvpc1.id))
def create_another_vm(self): self.debug("Deploying instance in the account: %s and network: %s" % (self.account.name, self.network.id)) project = None self.virtual_machine1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype, networkids=[str(self.network.id)], projectid=project.id if project else None) self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id, self.account.name)) # Checking if VM is running or not, in case it is deployed in error state, test case fails self.vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual( validateList(self.vm_list)[0], PASS, "vm list validation failed, vm list is %s" % self.vm_list) self.assertEqual( str(self.vm_list[0].state).lower(), 'running', "VM state should be running, it is %s" % self.vm_list[0].state)
def test_08_removeNic_in_sharedNetwork_scope_all_as_domain_parentAdmin( self): """Validate that Parent domain admin is able to remove a NIC which is added by child domain user """ self.api_client.connection.apiKey = self.user_d1_apikey self.api_client.connection.securityKey = self.user_d1_secretkey self.debug("Removing NIC od shared Network as user d1") vm_list = list_virtual_machines(self.api_client, id=self.vmvpc1.id) vm_list_validation_result = validateList(vm_list) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) self.debug("virtual machine nics: %s" % vm_list[0].nic) for nic in vm_list[0].nic: if nic.networkid == self.shared_network_all.id: reqNic = nic self.vmvpc1.remove_nic(self.api_client, reqNic.id) if not self.verify_nic(self.shared_network_all, self.vmvpc1): self.debug( "virtual machine has mot NIC is SharedNetwork: %s" % self.shared_network_all.name) else: self.fail("network %s NIC is present in the virtual Machine %s" % (self.shared_network_all.name, self.vmvpc1.id))
def create_vm(self, pfrule=False, egress_policy=True, RR=False): self.create_network_offering(egress_policy, RR) # Creating network using the network offering created self.debug("Creating network with network offering: %s" % self.network_offering.id) self.network = Network.create( self.apiclient, self.services["network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering.id, zoneid=self.zone.id) self.debug("Created network with ID: %s" % self.network.id) self.debug("Deploying instance in the account: %s" % self.account.name) project = None self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype if pfrule else 'basic', networkids=[str(self.network.id)], projectid=project.id if project else None) self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id, self.account.name)) # Checking if VM is running or not, in case it is deployed in error state, test case fails self.vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual( validateList(self.vm_list)[0], PASS, "vm list validation failed, vm list is %s" % self.vm_list) self.assertEqual( str(self.vm_list[0].state).lower(), 'running', "VM state should be running, it is %s" % self.vm_list[0].state) self.public_ip = PublicIPAddress.create(self.apiclient, accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, networkid=self.network.id) # Open up firewall port for SSH FireWallRule.create(self.apiclient, ipaddressid=self.public_ip.ipaddress.id, protocol=self.services["natrule"]["protocol"], cidrlist=['0.0.0.0/0'], startport=self.services["natrule"]["publicport"], endport=self.services["natrule"]["publicport"]) self.debug("Creating NAT rule for VM ID: %s" % self.virtual_machine.id) #Create NAT rule NATRule.create(self.apiclient, self.virtual_machine, self.services["natrule"], self.public_ip.ipaddress.id) return
def MigrateRootVolume(self, vm, destinationHost, expectexception=False): """ Migrate given volume to type of storage pool mentioned in migrateto: Inputs: 1. vm: VM to be migrated is to be migrated 2. expectexception: If exception is expected while migration 3. destinationHost: Destination host where the VM\ should get migrated """ if expectexception: with self.assertRaises(Exception): VirtualMachine.migrate( vm, self.apiclient, hostid=destinationHost.id, ) else: VirtualMachine.migrate( vm, self.apiclient, hostid=destinationHost.id, ) migrated_vm_response = list_virtual_machines( self.apiclient, id=vm.id ) self.assertEqual( isinstance(migrated_vm_response, list), True, "Check list virtual machines response for valid list" ) self.assertNotEqual( migrated_vm_response, None, "Check if virtual machine exists in ListVirtualMachines" ) migrated_vm = migrated_vm_response[0] vm_list = VirtualMachine.list( self.apiclient, id=migrated_vm.id ) self.assertEqual( vm_list[0].hostid, destinationHost.id, "Check volume is on migrated pool" ) return
def test_01_native_to_native_network_migration(self): """ Verify Migration for an isolated network nativeOnly 1. create native non-persistent isolated network 2. migrate to other non-persistent isolated network 3. migrate back to first native non-persistent network 4. deploy VM in non-persistent isolated network 5. migrate to native persistent isolated network 6. migrate back to native non-persistent network """ isolated_network = Network.create( self.apiclient, self.test_data["isolated_network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_all.id, zoneid=self.zone.id ) self.migrate_network( self.network_offering_nouserdata, isolated_network, resume=False) self.migrate_network( self.network_offering_all, isolated_network, resume=False) deployVmResponse = VirtualMachine.create( self.apiclient, services=self.test_data["virtual_machine_userdata"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[str(isolated_network.id)], templateid=self.template.id, zoneid=self.zone.id ) vms = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid, id=deployVmResponse.id ) self.assert_(len(vms) > 0, "There are no Vms deployed in the account" " %s" % self.account.name) vm = vms[0] self.assert_(vm.id == str(deployVmResponse.id), "Vm deployed is different from the test") self.assert_(vm.state == "Running", "VM is not in Running state") self.migrate_network( self.network_offering_nouserdata, isolated_network, resume=False) self.migrate_network( self.network_offering_all, isolated_network, resume=False)
def test_01_native_to_native_network_migration(self): """ Verify Migration for an isolated network nativeOnly 1. create native non-persistent isolated network 2. migrate to other non-persistent isolated network 3. migrate back to first native non-persistent network 4. deploy VM in non-persistent isolated network 5. migrate to native persistent isolated network 6. migrate back to native non-persistent network """ isolated_network = Network.create( self.apiclient, self.test_data["isolated_network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_all.id, zoneid=self.zone.id ) self.migrate_network( self.network_offering_nouserdata, isolated_network, resume=False) self.migrate_network( self.network_offering_all, isolated_network, resume=False) deployVmResponse = VirtualMachine.create( self.apiclient, services=self.test_data["virtual_machine_userdata"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[str(isolated_network.id)], templateid=self.template.id, zoneid=self.zone.id ) vms = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid, id=deployVmResponse.id ) self.assertTrue(len(vms) > 0, "There are no Vms deployed in the account" " %s" % self.account.name) vm = vms[0] self.assertTrue(vm.id == str(deployVmResponse.id), "Vm deployed is different from the test") self.assertTrue(vm.state == "Running", "VM is not in Running state") self.migrate_network( self.network_offering_nouserdata, isolated_network, resume=False) self.migrate_network( self.network_offering_all, isolated_network, resume=False)
def _start_vm(cls, vm): vm_for_check = list_virtual_machines(cls.apiClient, id=vm.id)[0] if vm_for_check.state == VirtualMachine.STOPPED: vm.start(cls.apiClient) # For KVM, just give it 90 seconds to boot up. if TestData.hypervisor_type == TestData.kvm: time.sleep(90)
def test_deployvm_multinic(self): """Test userdata update when non default nic is without userdata for deploy and update """ self.userdata = base64.b64encode(self.userdata) network1 = Network.create( self.apiclient, self.test_data["isolated_network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_all.id, zoneid=self.zone.id) self.test_data["network_without_acl"]["netmask"] = "255.255.255.128" network2 = Network.create( self.apiclient, self.test_data["network_without_acl"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_nouserdata.id, gateway="10.2.1.1", zoneid=self.zone.id) deployVmResponse = VirtualMachine.create( self.apiclient, services=self.test_data["virtual_machine_userdata"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[str(network1.id), str(network2.id)], templateid=self.template.id, zoneid=self.zone.id) vms = list_virtual_machines(self.apiclient, account=self.account.name, domainid=self.account.domainid, id=deployVmResponse.id) self.assert_( len(vms) > 0, "There are no Vms deployed in the account %s" % self.account.name) vm = vms[0] self.assert_(vm.id == str(deployVmResponse.id), "Vm deployed is different from the test") self.assert_(vm.state == "Running", "VM is not in Running state") try: updateresponse = deployVmResponse.update(self.apiclient, userdata=self.userdata) except Exception as e: self.fail("Failed to update userdata: %s" % e) self.debug("virtual machine update response is: %s" % updateresponse)
def test_24_attach_volume_to_vm_with_vc_policy_uuid(self): self.virtual_machine.attach_volume(self.apiclient, self.volume4) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) vm_tags = vm[0].tags volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, ) for v in volumes: self.helper.vc_policy_tags_global_id(v, vm_tags, False)
def verify_vm(self, vmid): list_vm = list_virtual_machines(self.userapiclient, account=self.account.name, domainid=self.account.domainid, id=vmid ) self.assertEqual(validateList(list_vm)[0], PASS, "Check List vm response for vmid: %s" % vmid) self.assertGreater(len(list_vm), 0, "Check the list vm response for vm id: %s" % vmid) vm = list_vm[0] self.assertEqual(vm.id, str(vmid), "Vm deployed is different from the test") self.assertEqual(vm.state, "Running", "VM is not in Running state") self.debug("VM got created successfully %s" % vmid)
def test_03_create_vm_snapshot_vc_policy_tag(self): """Test to create VM snapshots """ volume_attached = self.virtual_machine.attach_volume( self.apiclient, self.volume) volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id, listall=True) vm_tags = vm[0].tags self.debug( '######################### test_03_create_vm_snapshot_vc-policy_tag tags ######################### ' ) self.vc_policy_tags(volumes, vm_tags, vm) self.assertEqual(volume_attached.id, self.volume.id, "Is not the same volume ") try: # Login to VM and write data to file system ssh_client = self.virtual_machine.get_ssh_client() cmds = [ "echo %s > %s/%s" % (self.random_data_0, self.test_dir, self.random_data), "sync", "sleep 1", "sync", "sleep 1", "cat %s/%s" % (self.test_dir, self.random_data) ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception: self.fail("SSH failed for Virtual machine: %s" % self.virtual_machine.ipaddress) self.assertEqual(self.random_data_0, result[0], "Check the random data has be write into temp file!") time.sleep(30) MemorySnapshot = False vm_snapshot = VmSnapshot.create(self.apiclient, self.virtual_machine.id, MemorySnapshot, "TestSnapshot", "Display Text") self.assertEqual(vm_snapshot.state, "Ready", "Check the snapshot of vm is ready!") return
def test_01_delete_all_virtual_machines(self): """Test to delete VMs """ virtual_machines = list_virtual_machines(self.apiclient) for v in virtual_machines: try: cmd = destroyVirtualMachine.destroyVirtualMachineCmd() cmd.id = v.id cmd.expunge = True self.apiclient.destroyVirtualMachine(cmd) except Exception as e: continue
def test_03_attach_volume_to_vm_with_vc_policy_uuid(self): self.virtual_machine.attach_volume(self.apiclient, self.volume4) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) vm_tags = vm[0].tags volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, ) self.assertTrue(len(volumes) == 3, "Volume length should be == 3") for v in volumes: self.helper.vc_policy_tags_global_id(v, vm_tags, False)
def test_01_create_vm_with_volume(self): '''Create VM with attached volume and expunge VM''' ####################################### # STEP 1: Create VM and attach volume # ####################################### test_virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine2], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=False) TestScaleIOVolumes._start_vm(test_virtual_machine) self.volume = test_virtual_machine.attach_volume( self.apiClient, self.volume) self.attached = True vm = self._get_vm(test_virtual_machine.id) self.assertEqual( self.volume.virtualmachineid, vm.id, TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), 'running', TestScaleIOVolumes._vm_not_in_running_state_err_msg) ####################################### # STEP 2: Destroy and Expunge VM # ####################################### test_virtual_machine.delete(self.apiClient, True) self.attached = False vol = self._get_volume(self.volume.id) self.assertEqual(vol.virtualmachineid, None, "Check if attached to virtual machine") self.assertEqual(vol.vmname, None, "Check if VM was expunged") list_virtual_machine_response = list_virtual_machines( self.apiClient, id=test_virtual_machine.id) self.assertEqual(list_virtual_machine_response, None, "Check if VM was actually expunged")
def test_deployvm_multinic(self): """Test userdata update when non default nic is without userdata for deploy and update """ self.userdata = base64.b64encode(self.userdata) network1 = Network.create( self.apiclient, self.test_data["isolated_network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_all.id, zoneid=self.zone.id, ) self.test_data["network_without_acl"]["netmask"] = "255.255.255.128" network2 = Network.create( self.apiclient, self.test_data["network_without_acl"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_nouserdata.id, gateway="10.2.1.1", zoneid=self.zone.id, ) deployVmResponse = VirtualMachine.create( self.apiclient, services=self.test_data["virtual_machine_userdata"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[str(network1.id), str(network2.id)], templateid=self.template.id, zoneid=self.zone.id, ) vms = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid, id=deployVmResponse.id ) self.assert_(len(vms) > 0, "There are no Vms deployed in the account %s" % self.account.name) vm = vms[0] self.assert_(vm.id == str(deployVmResponse.id), "Vm deployed is different from the test") self.assert_(vm.state == "Running", "VM is not in Running state") try: updateresponse = deployVmResponse.update(self.apiclient, userdata=self.userdata) except Exception as e: self.fail("Failed to update userdata: %s" % e) self.debug("virtual machine update response is: %s" % updateresponse)
def create_vm(self, account, domain, isRunning=False, project =None, limit =None, pfrule =False, lbrule =None, natrule =None, volume =None, snapshot =False): #TODO: Implemnt pfrule/lbrule/natrule self.debug("Deploying instance in the account: %s" % account.name) self.virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=account.name, domainid=domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype if pfrule else 'basic', projectid=project.id if project else None) self.debug("Deployed instance in account: %s" % account.name) list_virtual_machines(self.apiclient, id=self.virtual_machine.id) if snapshot: volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True) self.snapshot = Snapshot.create(self.apiclient, volumes[0].id, account=account.name, domainid=account.domainid) if volume: self.virtual_machine.attach_volume(self.apiclient, volume) if not isRunning: self.virtual_machine.stop(self.apiclient) self.cleanup.append(self.virtual_machine)
def test_06_remove_vcpolicy_tag_when_disk_detached(self): """ Test remove vc-policy tag to disk detached from VM""" time.sleep(60) volume_detached = self.virtual_machine.detach_volume( self.apiclient, self.volume_2) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id, listall=True) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) self.vc_policy_tags(volumes, vm_tags, vm)
def test_l2network_restart(self): """This test covers a few scenarios around restarting a network""" # Validate the following: # 1. Creates a l2 network # 2. Tries to restart a network with no VMs, which trows error 'not in the right state' # 3. Deploys a VM # 4. Restarts the network without cleanup # 5. Restarts the network with cleanup try: self.l2_network.restart(self.apiclient, cleanup=True) except Exception: pass else: self.fail("Expected an exception to be thrown, failing") li_net = self.l2_network.list(self.apiclient)[0] self.assertTrue(li_net.state, 'Allocated' "Not the correct state") self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, serviceofferingid=self.service_offering.id, networkids=self.l2_network.id, zoneid=self.zone.id) self.cleanup.insert(0, self.virtual_machine) list_vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual(isinstance(list_vm, list), True, "Check if virtual machine is present") self.l2_network.restart(self.apiclient, cleanup=False) li_net = self.l2_network.list(self.apiclient)[0] self.assertTrue(li_net.state, 'Implemented' "Not the correct state") self.l2_network.restart(self.apiclient, cleanup=True) li_net = self.l2_network.list(self.apiclient)[0] self.assertTrue(li_net.state, 'Implemented' "Not the correct state") return
def tearDown(self): try: if self.egressruleid: self.debug('remove egress rule id=%s' % self.egressruleid) self.deleteEgressRule() self.debug("Cleaning up the resources") #below components is not a part of cleanup because to mandate the order and to cleanup network try: for vm in self.cleanup_vms: if str(vm.state).lower() != "error": vm.delete(self.api_client) except Exception as e: self.fail( "Warning: Exception during virtual machines cleanup : %s" % e) # Wait for VMs to expunge wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) if len(self.cleanup_vms) > 0: retriesCount = 10 while True: vms = list_virtual_machines(self.api_client, id=self.virtual_machine.id) if vms is None: break elif retriesCount == 0: self.fail("Failed to expunge vm even after 10 minutes") time.sleep(60) retriesCount -= 1 try: for network in self.cleanup_networks: network.delete(self.api_client) except Exception as e: self.fail("Warning: Exception during networks cleanup : %s" % e) self.debug("Sleep for Network cleanup to complete.") wait_for_cleanup(self.apiclient, ["network.gc.wait", "network.gc.interval"]) cleanup_resources(self.apiclient, reversed(self.cleanup)) self.debug("Cleanup complete!") except Exception as e: self.fail("Warning! Cleanup failed: %s" % e)
def migrateVmWithVolumes(self, apiclient, vm, destinationHost, volumes, pool): """ This method is used to migrate a vm and its volumes using migrate virtual machine with volume API INPUTS: 1. vm -> virtual machine object 2. destinationHost -> the host to which VM will be migrated 3. volumes -> list of volumes which are to be migrated 4. pools -> list of destination pools """ vol_pool_map = {vol.id: pool.id for vol in volumes} cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd( ) cmd.hostid = destinationHost.id cmd.migrateto = [] cmd.virtualmachineid = self.virtual_machine.id for volume, pool1 in vol_pool_map.items(): cmd.migrateto.append({'volume': volume, 'pool': pool1}) apiclient.migrateVirtualMachineWithVolume(cmd) vm.getState(apiclient, "Running") # check for the VM's host and volume's storage post migration migrated_vm_response = list_virtual_machines(apiclient, id=vm.id) assert isinstance( migrated_vm_response, list), "Check list virtual machines response for valid list" assert migrated_vm_response[ 0].hostid == destinationHost.id, "VM did not migrate to a specified host" for vol in volumes: migrated_volume_response = list_volumes( apiclient, virtualmachineid=migrated_vm_response[0].id, name=vol.name, listall=True) assert isinstance( migrated_volume_response, list), "Check list virtual machines response for valid list" assert migrated_volume_response[ 0].storageid == pool.id, "Volume did not migrate to a specified pool" assert str(migrated_volume_response[0].state).lower().eq( 'ready'), "Check migrated volume is in Ready state" return migrated_vm_response[0]
def test_27_vc_policy_to_volume_and_vm_with_glid(self): vm = VirtualMachine.create(self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10) tag = Tag.create(self.apiclient, resourceIds=vm.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm_list = list_virtual_machines(self.apiclient, id=vm.id) vm_tags = vm_list[0].tags self._cleanup.append(vm)
def test_01_set_vcpolicy_tag_to_vm_with_attached_disks(self): ''' Test set vc-policy tag to VM with one attached disk ''' volume_attached = self.virtual_machine.attach_volume( self.apiclient, self.volume_1) tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id, listall=True) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) self.vc_policy_tags(volumes, vm_tags, vm)
def verify_nic(self, network, vm): """verify required nic is present in the VM""" self.debug( "Going to verify if %s Network nic is present in virtual machine " "%s" % (network.name, vm.id)) vm_list = list_virtual_machines(self.api_client, id=vm.id) vm_list_validation_result = validateList(vm_list) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) self.debug("virtual machine nics: %s" % vm_list[0].nic) # filter nic of virtual machine based on Network nics = [x for x in vm_list[0].nic if x.networkid == network.id] self.debug("Filtered nics list: %s:" % nics) if len(nics) == 1: return True else: return False
def test_deploy_vm_l2network(self): """Creates an l2 network and verifies user is able to deploy a VM in it""" # Validate the following: # 1. Deploys a VM # 2. There are no network services available since this is L2 Network self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, serviceofferingid=self.service_offering.id, networkids=self.l2_network.id, zoneid=self.zone.id ) self.cleanup.insert(0, self.virtual_machine) list_vm = list_virtual_machines( self.apiclient, id = self.virtual_machine.id ) self.assertEqual( isinstance(list_vm, list), True, "Check if virtual machine is present" ) self.assertEqual( list_vm[0].nic[0].type, 'L2', "Check Correct Network type is available" ) self.assertFalse( 'gateway' in str(list_vm[0].nic[0]) ) self.assertFalse( 'ipaddress' in str(list_vm[0].nic[0]) ) return
def verify_nic(self, network, vm): """verify required nic is present in the VM""" self.debug( "Going to verify if %s Network nic is present in virtual machine " "%s" % (network.name, vm.id)) vm_list = list_virtual_machines(self.api_client, id=vm.id) vm_list_validation_result = validateList(vm_list) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) self.debug("virtual machine nics: %s" % vm_list[0].nic) # filter nic of virtual machine based on Network nics = [x for x in vm_list[0].nic if x.networkid == network.id] self.debug("Filtered nics list: %s:" % nics) if len(nics) == 1: return True else: return False
def tearDown(self): try: if self.egressruleid: self.debug('remove egress rule id=%s' % self.egressruleid) self.deleteEgressRule() self.debug("Cleaning up the resources") #below components is not a part of cleanup because to mandate the order and to cleanup network try: for vm in self.cleanup_vms: if str(vm.state).lower() != "error": vm.delete(self.api_client) except Exception as e: self.fail("Warning: Exception during virtual machines cleanup : %s" % e) # Wait for VMs to expunge wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"]) if len(self.cleanup_vms) > 0: retriesCount = 10 while True: vms = list_virtual_machines(self.api_client, id=self.virtual_machine.id) if vms is None: break elif retriesCount == 0: self.fail("Failed to expunge vm even after 10 minutes") time.sleep(60) retriesCount -= 1 try: for network in self.cleanup_networks: network.delete(self.api_client) except Exception as e: self.fail("Warning: Exception during networks cleanup : %s" % e) self.debug("Sleep for Network cleanup to complete.") wait_for_cleanup(self.apiclient, ["network.gc.wait", "network.gc.interval"]) cleanup_resources(self.apiclient, reversed(self.cleanup)) self.debug("Cleanup complete!") except Exception as e: self.fail("Warning! Cleanup failed: %s" % e)
def create_another_vm(self): self.debug("Deploying instance in the account: %s and network: %s" % (self.account.name, self.network.id)) project = None self.virtual_machine1 = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype, networkids=[str(self.network.id)], projectid=project.id if project else None) self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id,self.account.name)) # Checking if VM is running or not, in case it is deployed in error state, test case fails self.vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id) self.assertEqual(validateList(self.vm_list)[0], PASS, "vm list validation failed, vm list is %s" % self.vm_list) self.assertEqual(str(self.vm_list[0].state).lower(),'running',"VM state should be running, it is %s" % self.vm_list[0].state)
def test_deployvm_userdata(self): """Test userdata as GET, size > 2k """ deployVmResponse = VirtualMachine.create( self.apiClient, services=self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, zoneid=self.zone.id ) vms = list_virtual_machines( self.apiClient, account=self.account.name, domainid=self.account.domainid, id=deployVmResponse.id ) self.assert_(len(vms) > 0, "There are no Vms deployed in the account %s" % self.account.name) vm = vms[0] self.assert_(vm.id == str(deployVmResponse.id), "Vm deployed is different from the test") self.assert_(vm.state == "Running", "VM is not in Running state")
def test_delete_network_while_vm_on_it(self): """It verifies the user is not able to delete network which has running vms""" # Validate the following: # 1. Deploys a VM # 2. Tries to delete network and expects exception to appear self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, serviceofferingid=self.service_offering.id, networkids=self.l2_network.id, zoneid=self.zone.id ) self.cleanup.insert(0, self.virtual_machine) list_vm = list_virtual_machines( self.apiclient, id = self.virtual_machine.id ) self.assertEqual( isinstance(list_vm, list), True, "Check if virtual machine is present" ) try: self.l2_network.delete(self.apiclient) except Exception: pass else: self.fail("Expected an exception to be thrown, failing") return
def test_l2network_restart(self): """This test covers a few scenarios around restarting a network""" # Validate the following: # 1. Creates a l2 network # 2. Tries to restart a network with no VMs, which trows error 'not in the right state' # 3. Deploys a VM # 4. Restarts the network without cleanup # 5. Restarts the network with cleanup try: self.l2_network.restart(self.apiclient, cleanup=True) except Exception: pass else: self.fail("Expected an exception to be thrown, failing") li_net = self.l2_network.list(self.apiclient)[0] self.assertTrue( li_net.state, 'Allocated' "Not the correct state" ) self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, serviceofferingid=self.service_offering.id, networkids=self.l2_network.id, zoneid=self.zone.id ) self.cleanup.insert(0, self.virtual_machine) list_vm = list_virtual_machines( self.apiclient, id = self.virtual_machine.id ) self.assertEqual( isinstance(list_vm, list), True, "Check if virtual machine is present" ) self.l2_network.restart(self.apiclient, cleanup=False) li_net = self.l2_network.list(self.apiclient)[0] self.assertTrue( li_net.state, 'Implemented' "Not the correct state" ) self.l2_network.restart(self.apiclient, cleanup=True) li_net = self.l2_network.list(self.apiclient)[0] self.assertTrue( li_net.state, 'Implemented' "Not the correct state" ) return
def test_02_NetworkGarbageCollection(self): """Test network garbage collection """ # Validate the following # 1. wait for router to start and guest network to be created # a.listRouters account=user, domainid=1 (router state=Running) # b.listNetworks account=user domainid=1 (network state=Implemented) # c.listVirtualMachines account=user domainid=1 (VM states=Running) # 4. stopVirtualMachines (stop all VMs in this account) # 5. wait for VMs to stop-listVirtualMachines account=user, domainid=1 # (Both VM states = Stopped) # 6. wait for network.gc.interval*2 seconds (600s) # 7. listRouters account=user, domainid=1 routers = list_routers( self.apiclient, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(routers, list), True, "Check for list routers response return valid data" ) self.assertNotEqual( len(routers), 0, "Check list router response" ) # Router associated with account should be in running state timeout = 180 router = routers[0] self.debug("Router ID: %s & Router state: %s" % ( router.id, router.state )) self.debug( "Wait for %s secs max for router to reach Running state" % timeout) while timeout: time.sleep(60) routers = list_routers( self.apiclient, account=self.account.name, domainid=self.account.domainid, id=router.id) router = routers[0] if router.state == 'Running': break timeout = timeout - 60 if timeout == 0: self.assertEqual( router.state, 'Running', "Router not in Running state") # Network state associated with account should be 'Implemented' networks = list_networks( self.apiclient, account=self.account.name, domainid=self.account.domainid, type='Isolated' ) self.assertEqual( isinstance(networks, list), True, "Check for list networks response return valid data" ) self.assertNotEqual( len(networks), 0, "Check list networks response" ) # Check if network in 'Implemented' state for network in networks: self.debug("Network ID: %s & Network state: %s" % ( network.id, network.state )) self.assertIn( network.state, ['Implemented', 'Allocated'], "Check list network response for network state" ) # VM state associated with account should be 'Running' virtual_machines = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(virtual_machines, list), True, "Check for list virtual machines response return valid data" ) self.assertNotEqual( len(virtual_machines), 0, "Check list virtual machines response" ) for virtual_machine in virtual_machines: self.debug("VM ID: %s & VM state: %s" % ( virtual_machine.id, virtual_machine.state )) self.assertEqual( virtual_machine.state, 'Running', "Check list VM response for Running state" ) # Stop virtual machine cmd = stopVirtualMachine.stopVirtualMachineCmd() cmd.id = virtual_machine.id self.apiclient.stopVirtualMachine(cmd) gcinterval = list_configurations( self.apiclient, name='network.gc.interval' ) self.assertEqual( isinstance(gcinterval, list), True, "Check for list intervals response return valid data" ) self.debug("network.gc.interval: %s" % gcinterval[0].value) gcwait = list_configurations( self.apiclient, name='network.gc.wait' ) self.assertEqual( isinstance(gcwait, list), True, "Check for list intervals response return valid data" ) self.debug("network.gc.wait: %s" % gcwait[0].value) total_wait = int(gcinterval[0].value) + int(gcwait[0].value) # Router is stopped after (network.gc.interval *2) time. Wait for # (network.gc.interval+network.gc.wait) * 2 for moving # router to 'Stopped' time.sleep(total_wait * 2) routers = list_routers( self.apiclient, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(routers, list), True, "Check for list routers response return valid data" ) self.assertNotEqual( len(routers), 0, "Check list router response" ) for router in routers: self.debug("Router ID: %s & Router state: %s" % ( router.id, router.state )) self.assertEqual( router.state, 'Stopped', "Check list router response for router state" ) # Cleanup Vm_2 - Not required for further tests self.cleanup.append(self.vm_2) return
def test_03_RouterStartOnVmDeploy(self): """Test router start on VM deploy """ # Validate the following # 1. deployVirtualMachine in the account # 2. listVirtualMachines account=user, domainid=1 # 3. when listVirtualMachines reports the userVM to be in state=Running # 4. listRouters should report router to have come back to "Running" # state # 5. All other VMs in the account should remain in "Stopped" state # stop all pre-existing virtual machines if they are in 'Running' state virtual_machines = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(virtual_machines, list), True, "Check for list virtual machines response return valid data" ) self.assertNotEqual( len(virtual_machines), 0, "Check list virtual machines response" ) for virtual_machine in virtual_machines: self.debug("VM ID: %s & VM state: %s" % ( virtual_machine.id, virtual_machine.state )) if virtual_machine.state == 'Running': # Stop virtual machine cmd = stopVirtualMachine.stopVirtualMachineCmd() cmd.id = virtual_machine.id self.apiclient.stopVirtualMachine(cmd) vm = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deployed a VM with ID: %s" % vm.id) virtual_machines = list_virtual_machines( self.apiclient, id=vm.id, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(virtual_machines, list), True, "Check for list virtual machines response return valid data" ) self.assertNotEqual( len(virtual_machines), 0, "Check list virtual machines response" ) # VM state should be 'Running' for virtual_machine in virtual_machines: self.assertEqual( virtual_machine.state, 'Running', "Check list VM response for Running state" ) routers = list_routers( self.apiclient, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(routers, list), True, "Check for list routers response return valid data" ) self.assertNotEqual( len(routers), 0, "Check list router response" ) # Routers associated with account should be 'Running' after deployment # of VM for router in routers: self.debug("Router ID: %s & Router state: %s" % ( router.id, router.state )) self.assertEqual( router.state, 'Running', "Check list router response for router state" ) # All other VMs (VM_1) should be in 'Stopped' virtual_machines = list_virtual_machines( self.apiclient, id=self.vm_1.id, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(virtual_machines, list), True, "Check for list VMs response return valid data" ) self.assertNotEqual( len(virtual_machines), 0, "Check list virtual machines response" ) for virtual_machine in virtual_machines: self.debug("VM ID: %s & VM state: %s" % ( virtual_machine.id, virtual_machine.state )) self.assertEqual( virtual_machine.state, 'Stopped', "Check list VM response for Stopped state" ) return
def test_reboot_router(self): """Test for reboot router""" # Validate the Following # 1. Post restart PF and LB rules should still function # 2. verify if the ssh into the virtual machine # still works through the sourceNAT Ip # Retrieve router for the user account self.debug("Public IP: %s" % self.vm_1.ssh_ip) self.debug("Public IP: %s" % self.public_ip.ipaddress.ipaddress) routers = list_routers( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(routers, list), True, "Check list routers returns a valid list" ) router = routers[0] self.debug("Rebooting the router (ID: %s)" % router.id) cmd = rebootRouter.rebootRouterCmd() cmd.id = router.id self.apiclient.rebootRouter(cmd) # Poll listVM to ensure VM is stopped properly timeout = self.services["timeout"] while True: time.sleep(self.services["sleep"]) # Ensure that VM is in stopped state list_vm_response = list_virtual_machines( self.apiclient, id=self.vm_1.id ) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Running': self.debug("VM state: %s" % vm.state) break if timeout == 0: raise Exception( "Failed to start VM (ID: %s) in change service offering" % vm.id) timeout = timeout - 1 # we should be able to SSH after successful reboot try: self.debug("SSH into VM (ID : %s ) after reboot" % self.vm_1.id) SshClient( self.public_ip.ipaddress.ipaddress, self.services["natrule"]["publicport"], self.vm_1.username, self.vm_1.password ) except Exception as e: self.fail( "SSH Access failed for %s: %s" % (self.public_ip.ipaddress.ipaddress, e)) return
def _get_vm(self, vm_id): list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) sf_util.check_list(list_vms_response, 1, self, TestVolumes._should_only_be_one_vm_in_list_err_msg) return list_vms_response[0]
def test_07_destroy_expunge_VM_with_volume(self): """Destroy and expunge VM with attached volume""" ####################################### ####################################### # STEP 1: Create VM and attach volume # ####################################### ####################################### test_virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine2], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True, ) self.volume = test_virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) vm = self._get_vm(test_virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg) self.assertEqual(vm.state.lower(), "running", TestVolumes._vm_not_in_running_state_err_msg) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) ####################################### ####################################### # STEP 2: Destroy and Expunge VM # ####################################### ####################################### test_virtual_machine.delete(self.apiClient, True) self.attached = False vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) self.assertEqual(vol.virtualmachineid, None, "Check if attached to virtual machine") self.assertEqual(vol.vmname, None, "Check if VM was expunged") list_virtual_machine_response = list_virtual_machines(self.apiClient, id=test_virtual_machine.id) self.assertEqual(list_virtual_machine_response, None, "Check if VM was actually expunged") sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual(len(sf_volume["volumeAccessGroups"]), 0, TestVolumes._volume_should_not_be_in_a_vag) self._check_xen_sr(sf_iscsi_name, False)
def test_05_change_ip_from_different_Subnet_public_shared(self): """Validate that ip of a vm can be changed to a different subnet ip for shared_network_scope_all """ # Add subnet with same cidr self.debug("Adding subnet of same cidr to shared Network scope as all") subnet1 = self.add_subnet_verify( self.shared_network_all, self.nuagenetworkdata["publiciprange1"]) self.test_data["virtual_machine"]["ipaddress"] = \ self.nuagenetworkdata["publiciprange1"]["startip"] vm_1 = self.create_VM( self.shared_network_all, account=self.account_d11a) # Verify shared Network and VM in VSD self.verify_vsd_shared_network( self.account_d11a.domainid, self.shared_network_all, gateway=self.nuagenetworkdata["publiciprange1"]["gateway"]) subnet_id = self.get_subnet_id( self.shared_network_all.id, self.nuagenetworkdata["publiciprange1"]["gateway"]) self.verify_vsd_enterprise_vm( self.account_d11a.domainid, self.shared_network_all, vm_1, sharedsubnetid=subnet_id) # Add subnet with different cidr self.debug("Adding subnet of different cidr to shared Network scope " "as all") subnet2 = self.add_subnet_verify( self.shared_network_all, self.nuagenetworkdata["publiciprange2"]) # stop VM to update the ipaddress try: vm_1.stop(self.api_client) except Exception as e: self.fail("Failed to stop the virtual instances, %s" % e) vm_list = list_virtual_machines(self.api_client, id=vm_1.id) nics = [x for x in vm_list[0].nic if x.networkid == self.shared_network_all.id] self.debug("Filtered nics list: %s:" % nics) cmd = updateVmNicIp.updateVmNicIpCmd() for x in vm_list[0].nic: cmd.nicid = x.id cmd.ipaddress = self.nuagenetworkdata["publiciprange2"]["startip"] self.api_client.updateVmNicIp(cmd) try: vm_1.start(self.api_client) except Exception as e: self.fail("Failed to start the virtual instances, %s" % e) vm_list = list_virtual_machines(self.api_client, id=vm_1.id) vm_list_validation_result = validateList(vm_list) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) # Verify shared Network and VM in VSD self.verify_vsd_shared_network( self.account_d11a.domainid, self.shared_network_all, gateway=self.nuagenetworkdata["publiciprange2"]["gateway"]) subnet_id = self.get_subnet_id( self.shared_network_all.id, self.nuagenetworkdata["publiciprange2"]["gateway"]) self.verify_vsd_enterprise_vm( self.account_d11a.domainid, self.shared_network_all, vm_list[0], sharedsubnetid=subnet_id) self.delete_VM(vm_1) self.delete_subnet_verify(self.shared_network_all, subnet1) self.delete_subnet_verify(self.shared_network_all, subnet2)
def test_DeployVmAntiAffinityGroup(self): """ test DeployVM in anti-affinity groups deploy VM1 and VM2 in the same host-anti-affinity groups Verify that the vms are deployed on separate hosts """ #deploy VM1 in affinity group created in setUp vm1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, affinitygroupnames=[self.ag.name] ) list_vm1 = list_virtual_machines( self.apiclient, id=vm1.id ) self.assertEqual( isinstance(list_vm1, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_vm1), 0, "Check VM available in List Virtual Machines" ) vm1_response = list_vm1[0] self.assertEqual( vm1_response.state, 'Running', msg="VM is not in Running state" ) host_of_vm1 = vm1_response.hostid #deploy VM2 in affinity group created in setUp vm2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, affinitygroupnames=[self.ag.name] ) list_vm2 = list_virtual_machines( self.apiclient, id=vm2.id ) self.assertEqual( isinstance(list_vm2, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_vm2), 0, "Check VM available in List Virtual Machines" ) vm2_response = list_vm2[0] self.assertEqual( vm2_response.state, 'Running', msg="VM is not in Running state" ) host_of_vm2 = vm2_response.hostid self.assertNotEqual(host_of_vm1, host_of_vm2, msg="Both VMs of affinity group %s are on the same host" % self.ag.name)
def test_01_AdvancedZoneRouterServices(self): """Test advanced zone router services """ # Validate the following: # 1. Verify that list of services provided by this network are running # a. DNS # b. DHCP # c. Gateway # d. Firewall # e. LB # f. VPN # g. userdata # 2. wait for router to start and guest network to be created # a. listRouters account=user, domainid=1 (router state=Running) # b. listNetworks account=user domainid=1 # (network state=Implemented) # c. listVirtualMachines account=user domainid=1 (VM state=Running) # 3. listNetwork routers = list_routers( self.apiclient, account=self.account.name, domainid=self.account.domainid, ) self.assertEqual( isinstance(routers, list), True, "Check for list routers response return valid data" ) self.assertNotEqual( len(routers), 0, "Check list router response" ) for router in routers: self.debug("Router ID: %s & Router state: %s" % ( router.id, router.state )) self.assertEqual( router.state, 'Running', "Router state is not running but is %s" % router.state ) # Network state associated with account should be 'Implemented' networks = list_networks( self.apiclient, account=self.account.name, domainid=self.account.domainid, type='Isolated' ) self.assertEqual( isinstance(networks, list), True, "Check for list networks response return valid data" ) self.assertNotEqual( len(networks), 0, "Check list networks response" ) for network in networks: self.debug("Network ID: %s & Network state: %s" % ( network.id, network.state )) self.assertIn( network.state, ['Implemented', 'Allocated'], "Check list network response for network state" ) # VM state associated with account should be 'Running' virtual_machines = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(virtual_machines, list), True, "Check for list virtual machines response return valid data" ) self.assertNotEqual( len(virtual_machines), 0, "Check list virtual machines response" ) for virtual_machine in virtual_machines: self.assertEqual( virtual_machine.state, 'Running', "Check list VM response for Running state" ) self.debug("VM ID: %s & VM state: %s" % ( virtual_machine.id, virtual_machine.state )) # Check status of DNS, DHCP, FIrewall, LB VPN processes networks = list_networks( self.apiclient, account=self.account.name, domainid=self.account.domainid, type='Isolated' ) self.assertEqual( isinstance(networks, list), True, "Check for list networks response return valid data" ) self.assertNotEqual( len(networks), 0, "Check list networks response" ) # Load Balancer, Userdata, VPN, Firewall, Gateway, DNS processes should # be running for network in networks: self.assertEqual( 'Lb' in str(network.service), True, "Check Load balancing process in list networks" ) self.assertEqual( 'UserData' in str(network.service), True, "Check UserData service in list networks" ) self.assertEqual( 'Vpn' in str(network.service), True, "Check Vpn service in list networks" ) self.assertEqual( 'Firewall' in str(network.service), True, "Check Firewall service in list networks" ) self.assertEqual( 'Dns' in str(network.service), True, "Check Dns service in list networks" ) return
def setUpClass(cls): cls.testClient = super(TestVMPasswordEnabled, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates domain = get_domain(cls.api_client) zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = zone.networktype template = get_template( cls.api_client, zone.id, cls.services["ostype"] ) # Set Zones and disk offerings cls.services["small"]["zoneid"] = zone.id cls.services["small"]["template"] = template.id # Create VMs, NAT Rules etc cls.account = Account.create( cls.api_client, cls.services["account"], domainid=domain.id ) cls.small_offering = ServiceOffering.create( cls.api_client, cls.services["service_offerings"]["small"] ) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.small_offering.id, mode=cls.services["mode"] ) networkid = cls.virtual_machine.nic[0].networkid cls.hypervisor = cls.testClient.getHypervisorInfo() # create egress rule to allow wget of my cloud-set-guest-password # script if zone.networktype.lower() == 'advanced': EgressFireWallRule.create( cls.api_client, networkid=networkid, protocol=cls.services["egress"]["protocol"], startport=cls.services["egress"]["startport"], endport=cls.services["egress"]["endport"], cidrlist=cls.services["egress"]["cidrlist"]) cls.virtual_machine.password = cls.services["small"]["password"] ssh = cls.virtual_machine.get_ssh_client() # below steps are required to get the new password from VR # (reset password) # http://cloudstack.org/dl/cloud-set-guest-password # Copy this file to /etc/init.d # chmod +x /etc/init.d/cloud-set-guest-password # chkconfig --add cloud-set-guest-password cmds = [ "cd /etc/init.d;wget http://people.apache.org/~tsp/cloud-set-guest-password", "chmod +x /etc/init.d/cloud-set-guest-password", "chkconfig --add cloud-set-guest-password", ] for c in cmds: ssh.execute(c) # Adding delay of 120 sec to avoid data loss due to timing issue time.sleep(120) # Stop virtual machine cls.virtual_machine.stop(cls.api_client) # Poll listVM to ensure VM is stopped properly timeout = cls.services["timeout"] while True: time.sleep(cls.services["sleep"]) # Ensure that VM is in stopped state list_vm_response = list_virtual_machines( cls.api_client, id=cls.virtual_machine.id ) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Stopped': break if timeout == 0: raise Exception( "Failed to stop VM (ID: %s) " % vm.id) timeout = timeout - 1 list_volume = list_volumes( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) if isinstance(list_volume, list): cls.volume = list_volume[0] else: raise Exception( "Exception: Unable to find root volume for VM: %s" % cls.virtual_machine.id) cls.services["template"]["ostype"] = cls.services["ostype"] cls.services["template"]["ispublic"] = True # Create templates for Edit, Delete & update permissions testcases cls.pw_enabled_template = Template.create( cls.api_client, cls.services["template"], cls.volume.id, ) # Delete the VM - No longer needed cls.virtual_machine.delete(cls.api_client, expunge=True) cls.services["small"]["template"] = cls.pw_enabled_template.id cls.vm = VirtualMachine.create( cls.api_client, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.small_offering.id, mode=cls.services["mode"] ) cls._cleanup = [ cls.small_offering, cls.pw_enabled_template, cls.account ]
def test_04_change_offering_small(self): """Test to change service to a small capacity """ # Validate the following # 1. Log in to the Vm .We should see that the CPU and memory Info of # this Vm matches the one specified for "Small" service offering. # 2. Using listVM command verify that this Vm # has Small service offering Id. if self.hypervisor.lower() == "lxc": self.skipTest("Skipping this test for {} due to bug CS-38153".format(self.hypervisor)) try: self.medium_virtual_machine.stop(self.apiclient) except Exception as e: self.fail("Failed to stop VM: %s" % e) cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd() cmd.id = self.medium_virtual_machine.id cmd.serviceofferingid = self.small_offering.id self.apiclient.changeServiceForVirtualMachine(cmd) self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id) self.medium_virtual_machine.start(self.apiclient) # Ensure that VM is in running state list_vm_response = list_virtual_machines( self.apiclient, id=self.medium_virtual_machine.id ) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Running': self.debug("VM state: %s" % vm.state) else: raise Exception( "Failed to start VM (ID: %s) after changing\ service offering" % vm.id) try: ssh = self.medium_virtual_machine.get_ssh_client() except Exception as e: self.fail( "SSH Access failed for %s: %s" % (self.medium_virtual_machine.ipaddress, e) ) cpuinfo = ssh.execute("cat /proc/cpuinfo") cpu_cnt = len([i for i in cpuinfo if "processor" in i]) # 'cpu MHz\t\t: 2660.499' cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3] meminfo = ssh.execute("cat /proc/meminfo") # MemTotal: 1017464 kB total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1] self.debug( "CPU count: %s, CPU Speed: %s, Mem Info: %s" % ( cpu_cnt, cpu_speed, total_mem )) self.assertAlmostEqual( int(cpu_cnt), self.small_offering.cpunumber, "Check CPU Count for small offering" ) self.assertAlmostEqual( list_vm_response[0].cpuspeed, self.small_offering.cpuspeed, "Check CPU Speed for small offering" ) range = 20 if self.hypervisor.lower() == "hyperv": range = 200 # TODO: Find the memory allocated to VM on hyperv hypervisor using # powershell commands and use that value to equate instead of # manipulating range, currently we get the memory count much less # because of the UI component self.assertTrue( isAlmostEqual(int(int(total_mem) / 1024), int(self.small_offering.memory), range=range ), "Check Memory(kb) for small offering" ) return
def test_01_createVM_snapshotTemplate(self): """Test create VM, Snapshot and Template """ # Validate the following # 1. Deploy VM using default template, small service offering # and small data disk offering. # 2. Perform snapshot on the root disk of this VM. # 3. Create a template from snapshot. # 4. Create a instance from above created template. # 5. listSnapshots should list the snapshot that was created. # 6. verify that secondary storage NFS share contains the reqd # volume under /secondary/snapshots/$accountid/ # $volumeid/$snapshot_uuid # 7. verify backup_snap_id was non null in the `snapshots` table # 8. listTemplates() should return the newly created Template, # and check for template state as READY" # 9. listVirtualMachines() command should return the deployed VM. # State of this VM should be Running. # Create Virtual Machine if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") userapiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.virtual_machine = VirtualMachine.create( userapiclient, self.services["server"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) self.debug("Created VM with ID: %s" % self.virtual_machine.id) # Get the Root disk of VM volumes = list_volumes( userapiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) volume = volumes[0] # Create a snapshot from the ROOTDISK snapshot = Snapshot.create(userapiclient, volume.id) self.debug("Snapshot created: ID - %s" % snapshot.id) self.cleanup.append(snapshot) snapshots = list_snapshots( userapiclient, id=snapshot.id ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list snapshots call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check snapshot id in list resources call" ) self.debug( "select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" % snapshot.id) snapshot_uuid = snapshot.id # Generate template from the snapshot template = Template.create_from_snapshot( userapiclient, snapshot, self.services["templates"] ) self.debug("Created template from snapshot: %s" % template.id) self.cleanup.append(template) templates = list_templates( userapiclient, templatefilter=self.services["templates"]["templatefilter"], id=template.id ) self.assertNotEqual( templates, None, "Check if result exists in list item call" ) self.assertEqual( templates[0].isready, True, "Check new template state in list templates call" ) # Deploy new virtual machine using template new_virtual_machine = VirtualMachine.create( userapiclient, self.services["server"], templateid=template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) self.debug("Created VM with ID: %s from template: %s" % ( new_virtual_machine.id, template.id )) self.cleanup.append(new_virtual_machine) # Newly deployed VM should be 'Running' virtual_machines = list_virtual_machines( userapiclient, id=new_virtual_machine.id, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(virtual_machines, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(virtual_machines), 0, "Check list virtual machines response" ) for virtual_machine in virtual_machines: self.assertEqual( virtual_machine.state, 'Running', "Check list VM response for Running state" ) self.assertTrue( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, snapshot_uuid)) return
def test_01_recover_VM(self): """ Test Restore VM on VMWare 1. Deploy a VM without datadisk 2. Restore the VM 3. Verify that VM comes up in Running state """ try: self.pools = StoragePool.list( self.apiclient, zoneid=self.zone.id, scope="CLUSTER") status = validateList(self.pools) # Step 3 self.assertEqual( status[0], PASS, "Check: Failed to list cluster wide storage pools") if len(self.pools) < 2: self.skipTest("There must be at atleast two cluster wide\ storage pools available in the setup") except Exception as e: self.skipTest(e) # Adding tags to Storage Pools cluster_no = 1 StoragePool.update( self.apiclient, id=self.pools[0].id, tags=[CLUSTERTAG1[:-1] + repr(cluster_no)]) self.vm = VirtualMachine.create( self.apiclient, self.testdata["small"], accountid=self.account.name, templateid=self.template.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_cwps.id, zoneid=self.zone.id, ) # Step 2 volumes_root_list = list_volumes( self.apiclient, virtualmachineid=self.vm.id, type=ROOT, listall=True ) root_volume = volumes_root_list[0] # Restore VM till its ROOT disk is recreated on onother Primary Storage while True: self.vm.restore(self.apiclient) volumes_root_list = list_volumes( self.apiclient, virtualmachineid=self.vm.id, type=ROOT, listall=True ) root_volume = volumes_root_list[0] if root_volume.storage != self.pools[0].name: break # Step 3 vm_list = list_virtual_machines( self.apiclient, id=self.vm.id) state = vm_list[0].state i = 0 while(state != "Running"): vm_list = list_virtual_machines( self.apiclient, id=self.vm.id) time.sleep(10) i = i + 1 state = vm_list[0].state if i >= 10: self.fail("Restore VM Failed") break return
def test_02_revert_vm_snapshots(self): """Test to revert VM snapshots """ try: ssh_client = self.virtual_machine.get_ssh_client() cmds = [ "rm -rf %s/%s" % (self.test_dir, self.random_data), "ls %s/%s" % (self.test_dir, self.random_data) ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception: self.fail("SSH failed for Virtual machine: %s" % self.virtual_machine.ipaddress) if str(result[0]).index("No such file or directory") == -1: self.fail("Check the random data has be delete from temp file!") time.sleep(self.services["sleep"]) list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True) self.assertEqual( isinstance(list_snapshot_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( list_snapshot_response, None, "Check if snapshot exists in ListSnapshot" ) self.assertEqual( list_snapshot_response[0].state, "Ready", "Check the snapshot of vm is ready!" ) VmSnapshot.revertToSnapshot(self.apiclient, list_snapshot_response[0].id) list_vm_response = list_virtual_machines( self.apiclient, id=self.virtual_machine.id ) self.assertEqual( list_vm_response[0].state, "Stopped", "Check the state of vm is Stopped!" ) cmd = startVirtualMachine.startVirtualMachineCmd() cmd.id = list_vm_response[0].id self.apiclient.startVirtualMachine(cmd) time.sleep(self.services["sleep"]) try: ssh_client = self.virtual_machine.get_ssh_client(reconnect=True) cmds = [ "cat %s/%s" % (self.test_dir, self.random_data) ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception: self.fail("SSH failed for Virtual machine: %s" % self.virtual_machine.ipaddress) self.assertEqual( self.random_data_0, result[0], "Check the random data is equal with the ramdom file!" )