def tearDown(self): try: for host in self.hosts: Host.update(self.apiclient, id=host.id, hosttags="") cleanup_resources(self.apiclient, self.cleanup) except Exception as e: self.debug("Warning! Exception in tearDown: %s" % e)
def setUpClass(cls): cls.apiclient = super(TestDeployVmWithVariedPlanners, cls).getClsTestClient().getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient, cls.services) cls.zone = get_zone(cls.apiclient, cls.services) cls.template = get_template( cls.apiclient, cls.zone.id, cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["template"] = cls.template.id cls.services["zoneid"] = cls.zone.id cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls.services["account"] = cls.account.name cls.hosts = Host.list(cls.apiclient, type='Routing') cls.clusters = Cluster.list(cls.apiclient) cls.cleanup = [ cls.account ]
def find_suitable_host(apiclient, vm): """Returns a suitable host for VM migration""" hosts = Host.list(apiclient, virtualmachineid=vm.id, listall=True) if isinstance(hosts, list): assert len(hosts) > 0, "List host should return valid response" else: raise Exception("Exception: List host should return valid response") return hosts[0]
def AddCluster(self, cluster_size): #Create clusters with Hypervisor type XEN/KVM/VWare for k, v in self.services["clusters"].items(): v["clustername"] = str(uuid.uuid1()) cluster = Cluster.create( self.apiclient, v, zoneid=self.zone.id, podid=self.pod.id ) self.debug( "Created Cluster for hypervisor type %s & ID: %s" %( v["hypervisor"], cluster.id )) self.assertEqual( cluster.allocationstate, 'Enabled', "Check whether allocation state of cluster is enabled" ) hypervisor_type = str(cluster.hypervisortype.lower()) i = 1 while i < cluster_size: i+=1 host_services = self.services["hosts"][hypervisor_type] host_services["url"] = "http://sim/" + str(uuid.uuid1()) + "/" host = Host.create( self.apiclient, cluster, host_services, zoneid=self.zone.id, podid=self.pod.id ) self.debug( "Created host (ID: %s) in cluster ID %s" %( host.id, cluster.id )) storage_services = { "url": "nfs://nfsstor:/export/home/sandbox/" + str(uuid.uuid1()) + "/", "name": str(uuid.uuid1()) } storage = StoragePool.create(self.apiclient, storage_services, clusterid=cluster.id, zoneid=self.zone.id, podid=self.pod.id ) return
def findSuitableHostForMigration(apiclient, vmid): """Returns a suitable host for VM migration""" suitableHost = None try: hosts = Host.listForMigration(apiclient, virtualmachineid=vmid, ) except Exception as e: raise Exception("Exception while getting hosts list suitable for migration: %s" % e) suitablehosts = [] if isinstance(hosts, list) and len(hosts) > 0: suitablehosts = [host for host in hosts if (str(host.resourcestate).lower() == "enabled"\ and str(host.state).lower() == "up")] if len(suitablehosts)>0: suitableHost = suitablehosts[0] return suitableHost
def GetHostStats(self): hosts_list = Host.list(self.apiclient) statarr = [] for host in hosts_list: if "cpuallocated" in host.__dict__: statarr.append({ "name": host.__dict__["name"], "cpuwithoverprovisioning": self.CheckForKeyElseReturnZero(host.__dict__, "cpuwithoverprovisioning"), "cpunumber": self.CheckForKeyElseReturnZero(host.__dict__, "cpunumber"), "cpuallocated": self.CheckForKeyElseReturnZero(host.__dict__, "cpuallocated"), "cpuused": self.CheckForKeyElseReturnZero(host.__dict__, "cpuused"), "cpuspeed": self.CheckForKeyElseReturnZero(host.__dict__, "cpuspeed"), "memorytotal": self.CheckForKeyElseReturnZero(host.__dict__, "memorytotal"), "memoryused": self.CheckForKeyElseReturnZero(host.__dict__, "memoryused"), "memoryallocated": self.CheckForKeyElseReturnZero(host.__dict__, "memoryallocated") }) return statarr
def setUpClass(cls): cls.apiclient = super(TestDeployVmWithVariedPlanners, cls).getClsTestClient().getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient, cls.services) cls.zone = get_zone(cls.apiclient, cls.services) cls.template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["template"] = cls.template.id cls.services["zoneid"] = cls.zone.id cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.services["account"] = cls.account.name cls.hosts = Host.list(cls.apiclient, type='Routing') cls.clusters = Cluster.list(cls.apiclient) cls.cleanup = [cls.account]
def test_vmware_affinity(self): """ Test Set up affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy 2 VMs on same hosts # 2. Migrate one VM from one host to another # 3. The second VM should also get migrated hosts = Host.list(self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return valid host response") self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster") host_1 = hosts[0].id host_2 = hosts[1].id aff_grp = self.create_aff_grp(aff_grp=self.services["host_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name], hostid=host_1) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name]) vms = VirtualMachine.list(self.apiclient, id=vm_1.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_1 = vm_list_validation_result[1] self.assertEqual(virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state") self.debug("Deploying VM on account: %s" % self.account.name) vms = VirtualMachine.list(self.apiclient, id=vm_2.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_2 = vm_list_validation_result[1] self.assertEqual(virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state") self.debug("Migrate VM from host_1 to host_2") cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.virtualmachineid = virtual_machine_2.id cmd.hostid = host_2 self.apiclient.migrateVirtualMachine(cmd) self.debug("Migrated VM from host_1 to host_2") vms = VirtualMachine.list(self.apiclient, hostid=host_2, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vmids = [vm.id for vm in vms] self.assertIn(virtual_machine_1.id, vmids, "VM 1 should be successfully migrated to host 2") self.assertIn(virtual_machine_2.id, vmids, "VM 2 should be automatically migrated to host 2") return
def test_vmware_anti_affinity(self): """ Test Set up anti-affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy VMs on host 1 and 2 # 2. Enable maintenance mode for host 1 # 3. VM should be migrated to 3rd host hosts = Host.list(self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return valid host response") self.debug(len(hosts)) self.assertGreaterEqual( len(hosts), 3, "There must be at least 3 hosts present in a cluster") aff_grp = self.create_aff_grp( aff_grp=self.services["host_anti_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name]) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name]) host_1 = vm_1.hostid host_2 = vm_2.hostid vms = VirtualMachine.list(self.apiclient, id=vm_1.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_1 = vm_list_validation_result[1] self.debug("VM State: %s" % virtual_machine_1.state) self.assertEqual(virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state") vms = VirtualMachine.list(self.apiclient, id=vm_2.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_2 = vm_list_validation_result[1] self.debug("VM %s State: %s" % (virtual_machine_2.name, virtual_machine_2.state)) self.assertEqual(virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state") self.debug("Enabling maintenance mode on host_1: %s" % host_1) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = host_1 self.apiclient.prepareHostForMaintenance(cmd) timeout = self.services["timeout"] while True: hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', id=host_1) host_list_validation_result = validateList(hosts) self.assertEqual( host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) host = host_list_validation_result[1] if host.resourcestate == 'Maintenance': break elif timeout == 0: self.fail("Failed to put host: %s in maintenance mode" % host.name) time.sleep(self.services["sleep"]) timeout = timeout - 1 vms = VirtualMachine.list(self.apiclient, id=virtual_machine_1.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vm = vm_list_validation_result[0] self.assertEqual(vm.state, "Running", "Deployed VM should be in RUnning state") self.assertNotEqual( vm.hostid, host_2, "The host name should not match with second host name") self.debug("Canceling host maintenance for ID: %s" % host_1.id) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = host_1.id self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % host_1.id) return
def test_vm_creation_in_fully_automated_mode(self): """ Test VM Creation in automation mode = Fully automated This test requires following preconditions: - DRS Cluster is configured in "Fully automated" mode """ # Validate the following # 1. Create a new VM in a host which is almost fully utilized # 2 Automatically places VM on the other host # 3. VM state is running after deployment hosts = Host.list(self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return valid host response") self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster") host_1 = hosts[0] #Convert available memory( Keep some margin) into MBs and assign to service offering self.services["service_offering_max_memory"]["memory"] = int( (int(hosts[0].memorytotal) - int(hosts[0].memoryused)) / 1048576 - 1024) self.debug("max memory: %s" % self.services["service_offering_max_memory"]["memory"]) service_offering_max_memory = ServiceOffering.create( self.apiclient, self.services["service_offering_max_memory"]) VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=service_offering_max_memory.id, hostid=host_1.id) # Host 1 has only 1024 MB memory available now after deploying the instance # We are trying to deploy an instance with 2048 MB memory, this should automatically # get deployed on other host which has the enough capacity self.debug( "Trying to deploy instance with memory requirement more than that is available on\ the first host") self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id) vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM") self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM") vm = vms[0] self.assertEqual(vm.state, "Running", "Deployed VM should be in RUnning state") self.assertNotEqual( vm.hostid, host_1.id, "Host Ids of two should not match as one host is full") self.debug( "The host ids of two virtual machines are different as expected\ they are %s and %s" % (vm.hostid, host_1.id)) return
def test_03_restore_vm_with_new_template(self): """ Test restoring a vm with different template than the one it was created with """ hosts = Host.list(self.apiclient, type="Routing", listall=True) host_list_validation_result = validateList(hosts) self.assertEqual( host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) hypervisor = host_list_validation_result[1].hypervisor for k, v in self.services["templates"].items(): if k == hypervisor: # Register new template template = Template.register(self.apiclient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid) self.debug("Registered a template of format: %s with ID: %s" % (v["format"], template.id)) self.debug("Downloading template with ID: %s" % (template.id)) template.download(self.apiclient) self.cleanup.append(template) # Wait for template status to be changed across time.sleep(self.services["sleep"]) self.verify_template_listing(template) # Restore a vm with the new template. self.vm_with_reset.restore(self.apiclient, templateid=template.id) self.vm_without_reset.restore(self.apiclient, templateid=template.id) # Make sure the VMs now have the new template ID # Make sure the Ip address of the VMs haven't changed self.debug("Checking template id of VM with isVolatile=True") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_with_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_with_reset.templateid, vm_with_reset.templateid, "VM created with IsVolatile=True has same templateid : %s after restore" % vm_with_reset.templateid) self.assertNotEqual( self.vm_with_reset.templateid, template.id, "VM created with IsVolatile=True has wrong templateid after restore Got:%s Expected: %s" % (self.vm_with_reset.templateid, template.id)) # Make sure it has the same IP after reboot self.assertEqual( self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after restore. Got : %s Expected : %s" % (vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress)) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking template id of VM with isVolatile=False") vms = VirtualMachine.list(self.apiclient, id=self.vm_without_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_without_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_without_reset.templateid, vm_without_reset.templateid, "VM created with IsVolatile=False has same templateid : %s after restore" % vm_with_reset.templateid) self.assertNotEqual( self.vm_without_reset.templateid, template.id, "VM created with IsVolatile=False has wrong templateid after restore Got:%s Expected: %s" % (self.vm_without_reset.templateid, template.id)) # Make sure it has the same IP after reboot self.assertEqual( self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=False doesn't have same ip after restore. Got : %s Expected : %s" % (vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress)) return
def test_08_migrate_vm(self): """Test migrate VM """ # Validate the following # 1. Environment has enough hosts for migration # 2. DeployVM on suitable host (with another host in the cluster) # 3. Migrate the VM and assert migration successful suitable_hosts = None hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing') self.assertEqual( validateList(hosts)[0], PASS, "hosts list validation failed") if len(hosts) < 2: self.skipTest( "At least two hosts should be present in the zone for migration" ) hypervisor = str(get_hypervisor_type(self.apiclient)).lower() # For KVM, two hosts used for migration should be present in same cluster # For XenServer and VMware, migration is possible between hosts belonging to different clusters # with the help of XenMotion and Vmotion respectively. if hypervisor == "kvm": #identify suitable host clusters = [h.clusterid for h in hosts] #find hosts withe same clusterid clusters = [ cluster for index, cluster in enumerate(clusters) if clusters.count(cluster) > 1 ] if len(clusters) <= 1: self.skipTest( "In KVM, Live Migration needs two hosts within same cluster" ) suitable_hosts = [ host for host in hosts if host.clusterid == clusters[0] ] else: suitable_hosts = hosts target_host = suitable_hosts[0] migrate_host = suitable_hosts[1] #deploy VM on target host self.vm_to_migrate = VirtualMachine.create( self.api_client, self.services["small"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.small_offering.id, mode=self.services["mode"], hostid=target_host.id) self.debug("Migrating VM-ID: %s to Host: %s" % (self.vm_to_migrate.id, migrate_host.id)) self.vm_to_migrate.migrate(self.api_client, migrate_host.id) list_vm_response = list_virtual_machines(self.apiclient, id=self.vm_to_migrate.id) self.assertNotEqual(list_vm_response, None, "Check virtual machine is listed") vm_response = list_vm_response[0] self.assertEqual(vm_response.id, self.vm_to_migrate.id, "Check virtual machine ID of migrated VM") self.assertEqual(vm_response.hostid, migrate_host.id, "Check destination hostID of migrated VM") return
def test_03_restore_vm_with_new_template(self): """ Test restoring a vm with different template than the one it was created with """ hosts = Host.list(self.apiclient, type="Routing", listall=True) host_list_validation_result = validateList(hosts) self.assertEqual( host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2], ) hypervisor = host_list_validation_result[1].hypervisor for k, v in self.services["templates"].items(): if k.lower() == hypervisor.lower(): # Register new template template = Template.register( self.apiclient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Registered a template of format: %s with ID: %s" % (v["format"], template.id)) self.debug("Downloading template with ID: %s" % (template.id)) template.download(self.apiclient) self.cleanup.append(template) # Wait for template status to be changed across time.sleep(self.services["sleep"]) self.verify_template_listing(template) # Restore a vm with the new template. self.vm_with_reset.restore(self.apiclient, templateid=template.id) self.vm_without_reset.restore(self.apiclient, templateid=template.id) # Make sure the VMs now have the new template ID # Make sure the Ip address of the VMs haven't changed self.debug("Checking template id of VM with isVolatile=True") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2], ) vm_with_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_with_reset.templateid, vm_with_reset.templateid, "VM created with IsVolatile=True has same templateid : %s after restore" % vm_with_reset.templateid, ) self.assertNotEqual( self.vm_with_reset.templateid, template.id, "VM created with IsVolatile=True has wrong templateid after restore Got:%s Expected: %s" % (self.vm_with_reset.templateid, template.id), ) # Make sure it has the same IP after reboot self.assertEqual( self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after restore. Got : %s Expected : %s" % (vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress), ) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking template id of VM with isVolatile=False") vms = VirtualMachine.list(self.apiclient, id=self.vm_without_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2], ) vm_without_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_without_reset.templateid, vm_without_reset.templateid, "VM created with IsVolatile=False has same templateid : %s after restore" % vm_with_reset.templateid, ) self.assertNotEqual( self.vm_without_reset.templateid, template.id, "VM created with IsVolatile=False has wrong templateid after restore Got:%s Expected: %s" % (self.vm_without_reset.templateid, template.id), ) # Make sure it has the same IP after reboot self.assertEqual( self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=False doesn't have same ip after restore. Got : %s Expected : %s" % (vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress), ) return
def test_vmware_affinity(self): """ Test Set up affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy 2 VMs on same hosts # 2. Migrate one VM from one host to another # 3. The second VM should also get migrated hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster" ) host_1 = hosts[0].id host_2 = hosts[1].id aff_grp = self.create_aff_grp(aff_grp=self.services["host_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name], hostid = host_1 ) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) vms = VirtualMachine.list( self.apiclient, id= vm_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_1 = vm_list_validation_result[1] self.assertEqual( virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Deploying VM on account: %s" % self.account.name) vms = VirtualMachine.list( self.apiclient, id=vm_2.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_2 = vm_list_validation_result[1] self.assertEqual( virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Migrate VM from host_1 to host_2") cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.virtualmachineid = virtual_machine_2.id cmd.hostid = host_2 self.apiclient.migrateVirtualMachine(cmd) self.debug("Migrated VM from host_1 to host_2") vms = VirtualMachine.list( self.apiclient, hostid=host_2, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vmids = [vm.id for vm in vms] self.assertIn( virtual_machine_1.id, vmids, "VM 1 should be successfully migrated to host 2" ) self.assertIn( virtual_machine_2.id, vmids, "VM 2 should be automatically migrated to host 2" ) return
def test_vmware_anti_affinity(self): """ Test Set up anti-affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy VMs on host 1 and 2 # 2. Enable maintenance mode for host 1 # 3. VM should be migrated to 3rd host hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.debug(len(hosts)) self.assertGreaterEqual( len(hosts), 3, "There must be at least 3 hosts present in a cluster" ) aff_grp = self.create_aff_grp(aff_grp=self.services["host_anti_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) host_1 = vm_1.hostid host_2 = vm_2.hostid vms = VirtualMachine.list( self.apiclient, id=vm_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_1 = vm_list_validation_result[1] self.debug("VM State: %s" % virtual_machine_1.state) self.assertEqual( virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state" ) vms = VirtualMachine.list( self.apiclient, id=vm_2.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_2 = vm_list_validation_result[1] self.debug("VM %s State: %s" % ( virtual_machine_2.name, virtual_machine_2.state )) self.assertEqual( virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Enabling maintenance mode on host_1: %s" % host_1) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = host_1 self.apiclient.prepareHostForMaintenance(cmd) timeout = self.services["timeout"] while True: hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing', id=host_1 ) host_list_validation_result = validateList(hosts) self.assertEqual(host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) host = host_list_validation_result[1] if host.resourcestate == 'Maintenance': break elif timeout == 0: self.fail("Failed to put host: %s in maintenance mode" % host.name) time.sleep(self.services["sleep"]) timeout = timeout - 1 vms = VirtualMachine.list( self.apiclient, id=virtual_machine_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vm = vm_list_validation_result[0] self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) self.assertNotEqual( vm.hostid, host_2, "The host name should not match with second host name" ) self.debug("Canceling host maintenance for ID: %s" % host_1.id) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = host_1.id self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % host_1.id) return
def test_vm_creation_in_fully_automated_mode(self): """ Test VM Creation in automation mode = Fully automated This test requires following preconditions: - DRS Cluster is configured in "Fully automated" mode """ # Validate the following # 1. Create a new VM in a host which is almost fully utilized # 2 Automatically places VM on the other host # 3. VM state is running after deployment hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster" ) host_1 = hosts[0] #Convert available memory( Keep some margin) into MBs and assign to service offering self.services["service_offering_max_memory"]["memory"] = int((int(hosts[0].memorytotal) - int(hosts[0].memoryused))/1048576 - 1024) self.debug("max memory: %s" % self.services["service_offering_max_memory"]["memory"]) service_offering_max_memory = ServiceOffering.create( self.apiclient, self.services["service_offering_max_memory"] ) VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=service_offering_max_memory.id, hostid = host_1.id ) # Host 1 has only 1024 MB memory available now after deploying the instance # We are trying to deploy an instance with 2048 MB memory, this should automatically # get deployed on other host which has the enough capacity self.debug("Trying to deploy instance with memory requirement more than that is available on\ the first host") self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) self.assertNotEqual( vm.hostid, host_1.id, "Host Ids of two should not match as one host is full" ) self.debug("The host ids of two virtual machines are different as expected\ they are %s and %s" % (vm.hostid, host_1.id)) return
def test_08_migrate_vm(self): """Test migrate VM """ # Validate the following # 1. Environment has enough hosts for migration # 2. DeployVM on suitable host (with another host in the cluster) # 3. Migrate the VM and assert migration successful suitable_hosts = None hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing' ) self.assertEqual(validateList(hosts)[0], PASS, "hosts list validation failed") if len(hosts) < 2: self.skipTest("At least two hosts should be present in the zone for migration") hypervisor = str(get_hypervisor_type(self.apiclient)).lower() # For KVM, two hosts used for migration should be present in same cluster # For XenServer and VMware, migration is possible between hosts belonging to different clusters # with the help of XenMotion and Vmotion respectively. if hypervisor == "kvm": #identify suitable host clusters = [h.clusterid for h in hosts] #find hosts withe same clusterid clusters = [cluster for index, cluster in enumerate(clusters) if clusters.count(cluster) > 1] if len(clusters) <= 1: self.skipTest("In KVM, Live Migration needs two hosts within same cluster") suitable_hosts = [host for host in hosts if host.clusterid == clusters[0]] else: suitable_hosts = hosts target_host = suitable_hosts[0] migrate_host = suitable_hosts[1] #deploy VM on target host self.vm_to_migrate = VirtualMachine.create( self.api_client, self.services["small"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.small_offering.id, mode=self.services["mode"], hostid=target_host.id ) self.debug("Migrating VM-ID: %s to Host: %s" % ( self.vm_to_migrate.id, migrate_host.id )) self.vm_to_migrate.migrate(self.api_client, migrate_host.id) list_vm_response = list_virtual_machines( self.apiclient, id=self.vm_to_migrate.id ) self.assertNotEqual( list_vm_response, None, "Check virtual machine is listed" ) vm_response = list_vm_response[0] self.assertEqual( vm_response.id, self.vm_to_migrate.id, "Check virtual machine ID of migrated VM" ) self.assertEqual( vm_response.hostid, migrate_host.id, "Check destination hostID of migrated VM" ) return
def setUp(self): self.testdata = TestData().testdata self.apiclient = self.testClient.getApiClient() # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient, self.testdata) self.zone = get_zone(self.apiclient, self.testdata) self.testdata["mode"] = self.zone.networktype self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) self.hosts = [] suitablecluster = None clusters = Cluster.list(self.apiclient) self.assertTrue(isinstance(clusters, list) and len(clusters) > 0, msg = "No clusters found") for cluster in clusters: self.hosts = Host.list(self.apiclient, clusterid=cluster.id, type='Routing') if isinstance(self.hosts, list) and len(self.hosts) >= 2: suitablecluster = cluster break self.assertTrue(isinstance(self.hosts, list) and len(self.hosts) >= 2, msg = "Atleast 2 hosts required in cluster for VM HA test") #update host tags for host in self.hosts: Host.update(self.apiclient, id=host.id, hosttags=self.testdata["service_offering"]["hasmall"]["hosttags"]) #create a user account self.account = Account.create( self.apiclient, self.testdata["account"], domainid=self.domain.id ) #create a service offering self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offering"]["hasmall"] ) #deploy ha vm self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty") self.virtual_machine = list_vms[0] self.mock_checkhealth = SimulatorMock.create( apiclient=self.apiclient, command="CheckHealthCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=self.virtual_machine.hostid, value="result:fail") self.mock_ping = SimulatorMock.create( apiclient=self.apiclient, command="PingCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=self.virtual_machine.hostid, value="result:fail") self.mock_checkvirtualmachine = SimulatorMock.create( apiclient=self.apiclient, command="CheckVirtualMachineCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=self.virtual_machine.hostid, value="result:fail") self.mock_pingtest = SimulatorMock.create( apiclient=self.apiclient, command="PingTestCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, value="result:fail") self.mock_checkonhost_list = [] for host in self.hosts: if host.id != self.virtual_machine.hostid: self.mock_checkonhost_list.append(SimulatorMock.create( apiclient=self.apiclient, command="CheckOnHostCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=host.id, value="result:fail")) #build cleanup list self.cleanup = [ self.service_offering, self.account, self.mock_checkhealth, self.mock_ping, self.mock_checkvirtualmachine, self.mock_pingtest ] self.cleanup = self.cleanup + self.mock_checkonhost_list