def setUpClass(cls): cls.testClient = super(TestHostsForMigration, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) clusterWithSufficientHosts = None clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id) for cluster in clusters: cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing") if len(cls.hosts) >= 2: clusterWithSufficientHosts = cluster break if clusterWithSufficientHosts is None: raise unittest.SkipTest("No Cluster with 2 hosts found") Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="PREMIUM") cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.service_offering_with_tag = ServiceOffering.create( cls.api_client, cls.services["service_offering_with_tag"] ) cls._cleanup = [cls.service_offering_with_tag] return
def test_06_secondary_storage(self): """Check the status of secondary storage""" # Validate the following # 1. List secondary storage # 2. Check state is "Up" or not sec_storages = Host.list( self.apiclient, zoneid=self.zone.id, type='SecondaryStorageVM', listall=True ) self.assertEqual( isinstance(sec_storages, list), True, "Check if listHosts returns a valid response" ) for sec_storage in sec_storages: self.assertEqual( sec_storage.state, 'Up', "Secondary storage should be in Up state" ) return
def test_04_hosts(self): """Check the status of hosts""" # Validate the following # 1. List hosts with type=Routing # 2. Check state is "Up" or not hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing', listall=True ) self.assertEqual( isinstance(hosts, list), True, "Check if listHosts returns a valid response" ) for host in hosts: self.assertEqual( host.state, 'Up', "Host should be in Up state and running" ) return
def setUpClass(cls): testClient = super(TestDeployVmWithVariedPlanners, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.template = get_template( cls.apiclient, cls.zone.id, cls.services["ostype"] ) if cls.template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["template"] = cls.template.id cls.services["zoneid"] = cls.zone.id cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls.hosts = Host.list(cls.apiclient, type='Routing') cls.clusters = Cluster.list(cls.apiclient) cls.cleanup = [ cls.account ]
def test_02_migrate_vm(self): """Test migrate VM in project # Validate the following # 1. Create VM with custom disk offering in a project and check # initial primary storage count # 2. List the hosts suitable for migrating the VM # 3. Migrate the VM and verify that primary storage count of project remains same""" try: hosts = Host.list(self.apiclient,virtualmachineid=self.vm.id, listall=True) self.assertEqual(validateList(hosts)[0], PASS, "hosts list validation failed") host = hosts[0] self.vm.migrate(self.apiclient, host.id) except Exception as e: self.fail("Exception occured" % e) expectedCount = self.initialResourceCount response = matchResourceCount( self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, projectid=self.project.id) self.assertEqual(response[0], PASS, response[1]) return
def test_06_secondary_storage(self): """Check the status of secondary storage""" # Validate the following # 1. List secondary storage # 2. Check state is "Up" or not if self.hypervisor.lower() == 'simulator': self.skipTest("Hypervisor is simulator skipping") sec_storages = Host.list( self.apiclient, zoneid=self.zone.id, type='SecondaryStorageVM', listall=True ) if sec_storages is None: self.skipTest("SSVM is not provisioned yet, skipping") self.assertEqual( isinstance(sec_storages, list), True, "Check if listHosts returns a valid response" ) for sec_storage in sec_storages: self.assertEqual( sec_storage.state, 'Up', "Secondary storage should be in Up state" ) return
def test_01_cluster_settings(self): """change cpu/mem.overprovisioning.factor at cluster level and verify the change """ listHost = Host.list(self.apiclient, id=self.deployVmResponse.hostid ) self.assertEqual( validateList(listHost)[0], PASS, "check list host response for host id %s" % self.deployVmResponse.hostid) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=2) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="cpu.overprovisioning.factor", value=3) list_cluster = Cluster.list(self.apiclient, id=listHost[0].clusterid) self.assertEqual( validateList(list_cluster)[0], PASS, "check list cluster response for cluster id %s" % listHost[0].clusterid) self.assertEqual(int(list_cluster[0].cpuovercommitratio), 3, "check the cpu overcommit value at cluster level ") self.assertEqual(int(list_cluster[0].memoryovercommitratio), 2, "check memory overcommit value at cluster level") Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=1) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="cpu.overprovisioning.factor", value=1) list_cluster1 = Cluster.list(self.apiclient, id=listHost[0].clusterid) self.assertEqual( validateList(list_cluster1)[0], PASS, "check the list cluster response for id %s" % listHost[0].clusterid) self.assertEqual(int(list_cluster1[0].cpuovercommitratio), 1, "check the cpu overcommit value at cluster level ") self.assertEqual(int(list_cluster1[0].memoryovercommitratio), 1, "check memory overcommit value at cluster level")
def setUpClass(cls): cls.testClient = super(TestAttachVolumeISO, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.pod = get_pod(cls.api_client, cls.zone.id) cls.services['mode'] = cls.zone.networktype cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() == 'lxc': if not find_storage_pool_type(cls.api_client, storagetype='rbd'): raise unittest.SkipTest( "RBD storage type is required for data volumes for LXC") cls.disk_offering = DiskOffering.create(cls.api_client, cls.services["disk_offering"]) template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["iso"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = template.id # get max data volumes limit based on the hypervisor type and version listHost = Host.list( cls.api_client, type='Routing', zoneid=cls.zone.id, podid=cls.pod.id, ) ver = listHost[0].hypervisorversion hv = listHost[0].hypervisor cmd = listHypervisorCapabilities.listHypervisorCapabilitiesCmd() cmd.hypervisor = hv res = cls.api_client.listHypervisorCapabilities(cmd) cls.debug('Hypervisor Capabilities: {}'.format(res)) for i in range(len(res)): if res[i].hypervisorversion == ver: break cls.max_data_volumes = int(res[i].maxdatavolumeslimit) cls.debug('max data volumes:{}'.format(cls.max_data_volumes)) cls.services["volume"]["max"] = cls.max_data_volumes # Create VMs, NAT Rules etc cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) cls._cleanup = [cls.service_offering, cls.disk_offering, cls.account]
def migrate_router(self, router): """ Migrate the router """ self.debug("Checking if the host is available for migration?") hosts = Host.list(self.api_client, zoneid=self.zone.id, type='Routing') self.assertEqual( isinstance(hosts, list), True, "List hosts should return a valid list" ) if len(hosts) < 2: self.skipTest( "No host available for migration. Test requires atleast 2 hosts") # Remove the host of current VM from the hosts list hosts[:] = [host for host in hosts if host.id != router.hostid] host = hosts[0] self.debug("Validating if the network rules work properly or not?") self.debug("Migrating VM-ID: %s from %s to Host: %s" % ( router.id, router.hostid, host.id )) try: # Migrate the router cmd = migrateSystemVm.migrateSystemVmCmd() cmd.isAsync = "false" cmd.hostid = host.id cmd.virtualmachineid = router.id self.api_client.migrateSystemVm(cmd) except Exception as e: self.fail("Failed to migrate instance, %s" % e) self.debug("Waiting for Router mgiration ....") time.sleep(240) # List routers to check state of router router_response = list_routers( self.api_client, id=router.id ) self.assertEqual( isinstance(router_response, list), True, "Check list response returns a valid list" ) router.hostid = router_response[0].hostid self.assertEqual( router.hostid, host.id, "Migration to host %s failed. The router host is" "still %s" % (host.id, router.hostid)) return
def make_all_hosts_secure(self): hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing') for host in hosts: cmd = provisionCertificate.provisionCertificateCmd() cmd.hostid = host.id self.apiclient.updateConfiguration(cmd) for host in hosts: self.check_connection(secured='true', host=host)
def list_all_hosts_in_zone(self, zone_id): hosts = Host.list( self.apiclient, type='Routing', resourcestate='Enabled', state='Up', zoneid=zone_id ) return hosts
def test_03_reconnect_host(self): """ Test reconnect Host which has VPC elements """ # Steps: # 1.Reconnect one of the host on which VPC Virtual Router is present. # Validate the following # 1. Host should successfully reconnect. # 2. Network connectivity to all the VMs on the host should not be # effected due to reconnection. try: timeout = self.services["timeout"] while True: list_host_response = Host.list( self.apiclient, id=self.vpcvr.hostid, resourcestate="Enabled") if list_host_response is not None: break elif timeout == 0: raise Exception("Failed to list the Host in Up State") time.sleep(self.services["sleep"]) timeout = timeout - 1 self.debug("Verified that the Host is in Up State") except: self.fail("Failed to find the Host in Up State") self.debug("Reconnecting the host where VPC VR is running") try: Host.reconnect(self.apiclient, id=self.vpcvr.hostid) except Exception as e: self.fail("Failed to reconnect to host: %s" % e) self.debug("Check the status of router after migration") routers = Router.list( self.apiclient, id=self.vpcvr.id, listall=True ) self.assertEqual( isinstance(routers, list), True, "List routers shall return the valid response" ) self.assertEqual( routers[0].state, "Running", "Router state should be running" ) # TODO: Check for the network connectivity return
def test_03_reconnect_host(self): """ Test reconnect Host which has VPC elements """ # Steps: # 1.Reconnect one of the host on which VPC Virtual Router is present. # Validate the following # 1. Host should successfully reconnect. # 2. Network connectivity to all the VMs on the host should not be # effected due to reconnection. try: timeout = self.services["timeout"] while True: list_host_response = Host.list( self.apiclient, id=self.vpcvr.hostid, resourcestate="Enabled") if list_host_response is not None: break elif timeout == 0: raise Exception("Failed to list the Host in Up State") time.sleep(self.services["sleep"]) timeout = timeout - 1 self.debug("Verified that the Host is in Up State") except: self.fail("Failed to find the Host in Up State") self.debug("Reconnecting the host where VPC VR is running") try: Host.reconnect(self.apiclient, id=self.vpcvr.hostid) except Exception as e: self.fail("Failed to reconnect to host: %s" % e) self.debug("Check the status of router after migration") routers = Router.list( self.apiclient, id=self.vpcvr.id, listall=True ) self.assertEqual( isinstance(routers, list), True, "List routers shall return the valid response" ) self.assertEqual( routers[0].state, "Running", "Router state should be running" ) # TODO: Check for the network connectivity return
def test_10_list_volumes(self): """ # Validate the following # # 1. List Root Volume and waits until it has the newly introduced attributes # # 2. Verifies return attributes has values different from none, when instance is running # """ list_vm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)[0] host = Host.list( self.apiclient, type='Routing', id=list_vm.hostid )[0] list_pods = get_pod(self.apiclient, self.zone.id, host.podid) root_volume = self.wait_for_attributes_and_return_root_vol() self.assertTrue(hasattr(root_volume, "utilization")) self.assertTrue(root_volume.utilization is not None) self.assertTrue(hasattr(root_volume, "virtualsize")) self.assertTrue(root_volume.virtualsize is not None) self.assertTrue(hasattr(root_volume, "physicalsize")) self.assertTrue(root_volume.physicalsize is not None) self.assertTrue(hasattr(root_volume, "vmname")) self.assertEqual(root_volume.vmname, list_vm.name) self.assertTrue(hasattr(root_volume, "clustername")) self.assertTrue(root_volume.clustername is not None) self.assertTrue(hasattr(root_volume, "clusterid")) self.assertTrue(root_volume.clusterid is not None) self.assertTrue(hasattr(root_volume, "storageid")) self.assertTrue(root_volume.storageid is not None) self.assertTrue(hasattr(root_volume, "storage")) self.assertTrue(root_volume.storage is not None) self.assertTrue(hasattr(root_volume, "zoneid")) self.assertEqual(root_volume.zoneid, self.zone.id) self.assertTrue(hasattr(root_volume, "zonename")) self.assertEqual(root_volume.zonename, self.zone.name) self.assertTrue(hasattr(root_volume, "podid")) self.assertEqual(root_volume.podid, list_pods.id) self.assertTrue(hasattr(root_volume, "podname")) self.assertEqual(root_volume.podname, list_pods.name)
def setUpClass(cls): cls.testClient = super(TestAttachDetachVolume, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.pod = get_pod(cls.api_client, cls.zone.id) cls.services['mode'] = cls.zone.networktype cls._cleanup = [] cls.unsupportedStorageType = False cls.hypervisor = cls.testClient.getHypervisorInfo() cls.disk_offering = DiskOffering.create(cls.api_client, cls.services["disk_offering"]) cls._cleanup.append(cls.disk_offering) template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = template.id # get max data volumes limit based on the hypervisor type and version listHost = Host.list( cls.api_client, type='Routing', zoneid=cls.zone.id, podid=cls.pod.id, ) ver = listHost[0].hypervisorversion hv = listHost[0].hypervisor cmd = listHypervisorCapabilities.listHypervisorCapabilitiesCmd() cmd.hypervisor = hv # cls.services["virtual_machine"]["hypervisor"] res = cls.api_client.listHypervisorCapabilities(cmd) cls.debug('Hypervisor Capabilities: {}'.format(res)) for i in range(len(res)): if res[i].hypervisorversion == ver: break cls.max_data_volumes = int(res[i].maxdatavolumeslimit) cls.debug('max data volumes:{}'.format(cls.max_data_volumes)) cls.services["volume"]["max"] = cls.max_data_volumes # Create VMs, NAT Rules etc cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls._cleanup.append(cls.service_offering) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, )
def test_01_deploy_vm_on_specific_host(self): hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing') target_id = hosts[0].id vm = self.deploy_vm(target_id) self.assertEqual(target_id, vm.hostid, "VM instance was not deployed on target host ID") self.destroy_vm(vm.id)
def test_02_Overcommit_factor(self): """change mem.overprovisioning.factor and verify vm memory """ listHost = Host.list(self.apiclient, id=self.deployVmResponse.hostid) self.assertEqual( validateList(listHost)[0], PASS, "check list host for host id %s" % self.deployVmResponse.hostid) if listHost[0].hypervisor.lower() not in ['kvm', 'xenserver']: self.skipTest( "Skiping test because of not supported hypervisor type %s" % listHost[0].hypervisor) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=1) self.deployVmResponse.stop(self.apiclient) self.deployVmResponse.start(self.apiclient) if listHost[0].hypervisor.lower() == 'xenserver': k = ssh_xen_host(self.testdata["configurableData"]["password"], listHost[0].ipaddress, self.deployVmResponse.instancename) elif listHost[0].hypervisor.lower() == 'kvm': k = ssh_kvm_host(self.testdata["configurableData"]["password"], listHost[0].ipaddress, self.deployVmResponse.instancename) self.assertEqual(k[0], k[1], "Check static max ,min on host for overcommit 1 ") Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=2) self.deployVmResponse.stop(self.apiclient) self.deployVmResponse.start(self.apiclient) if listHost[0].hypervisor.lower() == 'xenserver': k1 = ssh_xen_host(self.testdata["configurableData"]["password"], listHost[0].ipaddress, self.deployVmResponse.instancename) elif listHost[0].hypervisor.lower() == 'kvm': time.sleep(200) k1 = ssh_kvm_host(self.testdata["configurableData"]["password"], listHost[0].ipaddress, self.deployVmResponse.instancename) self.assertEqual(k1[0], 2 * k1[1], "Check static max ,min on host for overcommit 2")
def test_10_list_volumes(self): # Validate the following # # 1. List Root Volume and waits until it has the newly introduced attributes # # 2. Verifies return attributes has values different from none, when instance is running # list_vm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)[0] host = Host.list( self.apiclient, type='Routing', id=list_vm.hostid )[0] list_pods = get_pod(self.apiclient, self.zone.id, host.podid) root_volume = self.wait_for_attributes_and_return_root_vol() self.assertTrue(hasattr(root_volume, "utilization")) self.assertTrue(root_volume.utilization is not None) self.assertTrue(hasattr(root_volume, "virtualsize")) self.assertTrue(root_volume.virtualsize is not None) self.assertTrue(hasattr(root_volume, "physicalsize")) self.assertTrue(root_volume.physicalsize is not None) self.assertTrue(hasattr(root_volume, "vmname")) self.assertEqual(root_volume.vmname, list_vm.name) self.assertTrue(hasattr(root_volume, "clustername")) self.assertTrue(root_volume.clustername is not None) self.assertTrue(hasattr(root_volume, "clusterid")) self.assertTrue(root_volume.clusterid is not None) self.assertTrue(hasattr(root_volume, "storageid")) self.assertTrue(root_volume.storageid is not None) self.assertTrue(hasattr(root_volume, "storage")) self.assertTrue(root_volume.storage is not None) self.assertTrue(hasattr(root_volume, "zoneid")) self.assertEqual(root_volume.zoneid, self.zone.id) self.assertTrue(hasattr(root_volume, "zonename")) self.assertEqual(root_volume.zonename, self.zone.name) self.assertTrue(hasattr(root_volume, "podid")) self.assertEqual(root_volume.podid, list_pods.id) self.assertTrue(hasattr(root_volume, "podname")) self.assertEqual(root_volume.podname, list_pods.name)
def test_06_migrate_vm_with_multiple_nic(self): """ Migrate a VM with Multple NIC Steps: # 1) Create a Vm with multiple NIC's # 2) Configure secondary IP's on the VM # 3) Try to stop/start the VM # 4) Ping the IP's of the vm :return: """ # Skipping adding Secondary IP to NIC since its already # done in the previous test cases virtual_machine = VirtualMachine.list( self.apiclient, id=self.virtual_machine1.id ) old_host_id = virtual_machine[0]['hostid'] try: hosts = Host.list( self.apiclient, virtualmachineid=self.virtual_machine1.id, listall=True) self.assertEqual( validateList(hosts)[0], PASS, "hosts list validation failed") # Get a host which is not already assigned to VM for host in hosts: if host.id == old_host_id: continue else: host_id = host.id break self.virtual_machine1.migrate(self.apiclient, host_id) except Exception as e: self.fail("Exception occured: %s" % e) # List the vm again virtual_machine = VirtualMachine.list( self.apiclient, id=self.virtual_machine1.id) new_host_id = virtual_machine[0]['hostid'] self.assertNotEqual( old_host_id, new_host_id, "Migration of VM to new host failed" ) self.verify_network_rules(self.virtual_machine1.id)
def test_03_cluste_capacity_check(self): """change cpu/mem.overprovisioning.factor at cluster level and verify cluster capacity """ listHost = Host.list(self.apiclient, id=self.deployVmResponse.hostid) self.assertEqual( validateList(listHost)[0], PASS, "check list host for host id %s" % self.deployVmResponse.hostid) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=1) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="cpu.overprovisioning.factor", value=1) time.sleep(self.wait_time) capacity = Capacities.list(self.apiclient, clusterid=listHost[0].clusterid) self.assertEqual( validateList(capacity)[0], PASS, "check list capacity response for cluster id %s" % listHost[0].clusterid) cpu, mem = capacity_parser(capacity) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=2) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="cpu.overprovisioning.factor", value=2) time.sleep(self.wait_time) capacity1 = Capacities.list(self.apiclient, clusterid=listHost[0].clusterid) self.assertEqual( validateList(capacity1)[0], PASS, "check list capacity response for cluster id %s" % listHost[0].clusterid) cpu1, mem1 = capacity_parser(capacity1) self.assertEqual(2 * cpu[0], cpu1[0], "check total capacity ") self.assertEqual(2 * cpu[1], cpu1[1], "check capacity used") self.assertEqual(cpu[2], cpu1[2], "check capacity % used") self.assertEqual(2 * mem[0], mem1[0], "check mem total capacity ") self.assertEqual(2 * mem[1], mem1[1], "check mem capacity used") self.assertEqual(mem[2], mem1[2], "check mem capacity % used")
def test_12_resize_volume_with_only_size_parameter(self): """Test resize a volume by providing only size parameter, disk offering id is not mandatory""" # Verify the size is the new size is what we wanted it to be. self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (self.volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() == "hyperv": self.skipTest("Resize Volume is unsupported on Hyper-V") # resize the data disk self.debug("Resize Volume ID: %s" % self.volume.id) cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.size = 20 self.apiClient.resizeVolume(cmd) count = 0 success = False while count < 3: list_volume_response = Volume.list(self.apiClient, id=self.volume.id, type='DATADISK') for vol in list_volume_response: if vol.id == self.volume.id and int( vol.size) == (20 * (1024**3)) and vol.state == 'Ready': success = True if success: break else: time.sleep(10) count += 1 self.assertEqual(success, True, "Check if the data volume resized appropriately") # start the vm if it is on xenserver if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) time.sleep(30) return
def waitUntilHostInState(self, hostId, state="Up", interval=5, retries=20): while retries > -1: print("Waiting for host: %s to be %s. %s retries left." % (hostId, state, retries)) time.sleep(interval) host = Host.list(self.apiclient, hostid=hostId, type='Routing')[0] if host.state != state: if retries >= 0: retries = retries - 1 continue else: print("Host %s now showing as %s" % (hostId, state)) return
def setUpClass(cls): cls.testClient = super(TestAttachVolumeISO, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.pod = get_pod(cls.api_client, cls.zone.id) cls.services["mode"] = cls.zone.networktype cls._cleanup = [] cls.unsupportedStorageType = False cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() == "lxc": if not find_storage_pool_type(cls.api_client, storagetype="rbd"): cls.unsupportedStorageType = True return cls.disk_offering = DiskOffering.create(cls.api_client, cls.services["disk_offering"]) cls._cleanup.append(cls.disk_offering) template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["iso"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = template.id # get max data volumes limit based on the hypervisor type and version listHost = Host.list(cls.api_client, type="Routing", zoneid=cls.zone.id, podid=cls.pod.id) ver = listHost[0].hypervisorversion hv = listHost[0].hypervisor cmd = listHypervisorCapabilities.listHypervisorCapabilitiesCmd() cmd.hypervisor = hv res = cls.api_client.listHypervisorCapabilities(cmd) cls.debug("Hypervisor Capabilities: {}".format(res)) for i in range(len(res)): if res[i].hypervisorversion == ver: break cls.max_data_volumes = int(res[i].maxdatavolumeslimit) cls.debug("max data volumes:{}".format(cls.max_data_volumes)) cls.services["volume"]["max"] = cls.max_data_volumes # Create VMs, NAT Rules etc cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"]) cls._cleanup.append(cls.service_offering) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, )
def check_storage_pools(self, virtualmachineid): """ list storage pools available to the VM """ vm = VirtualMachine.list(self.apiClient, id=virtualmachineid)[0] hostid = vm.histid host = Host.list(self.apiClient, id=hostid)[0] clusterid = host.clusterid storage_pools = StoragePool.list(self.apiClient, clusterid=clusterid) if len(storage_pools) < 2: self.skipTest( "at least two accesible primary storage pools needed for the vm to perform this test" ) return storage_pools
def tearDownClass(cls): if cls.hypervisor.lower() in ["kvm"]: cls.apiclient = super(Test03SecuredVmMigration, cls).getClsTestClient().getApiClient() cls.hosts = Host.list(cls.apiclient, zoneid=cls.zone.id, type='Routing', hypervisor='KVM') for cls.host in cls.hosts: Test03SecuredVmMigration.secure_host(cls.host) try: cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e)
def test_03_deploy_vm_on_specific_pod(self): pods = Pod.list(self.apiclient, ) target_pod = pods[0] # Get host by Pod ID host = Host.list(self.apiclient, podid=target_pod.id) # deploy vm on pod cmd = deployVirtualMachine.deployVirtualMachineCmd() cmd.zoneid = target_pod.zoneid cmd.serviceofferingid = self.service_offering.id template = get_template(self.apiclient, hypervisor=host[0].hypervisortype) cmd.templateid = template.id cmd.podid = target_pod.id vm = self.apiclient.deployVirtualMachine(cmd) vm_host = Host.list(self.apiclient, id=vm.hostid) self.assertEqual(target_pod.id, vm_host[0].podid, "VM was not deployed on the target pod") self.destroy_vm(vm.id)
def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) for host in cls.hosts: Host.cancelMaintenance(cls.api_client, id=host.id) hosts_states = Host.list(cls.api_client, id=host.id, listall=True) if hosts_states[0].resourcestate != 'Enabled': raise Exception("Failed to cancel maintenance mode on %s" % (host.name)) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def getRouterProcessStatus(self, router, cmd): if router.id not in self.routerDetailsMap or self.routerDetailsMap[router.id] is None: connect_ip = self.apiclient.connection.mgtSvr connect_user = self.apiclient.connection.user connect_passwd = self.apiclient.connection.passwd hypervisor = self.hypervisor if self.hypervisor.lower() not in ('vmware', 'hyperv'): hosts = Host.list( self.apiclient, zoneid=router.zoneid, type='Routing', state='Up', id=router.hostid ) self.assertEqual( isinstance(hosts, list), True, "Check list host returns a valid list" ) host = hosts[0] connect_ip = host.ipaddress hypervisor = None try: connect_user, connect_passwd= get_host_credentials( self.config, host.ipaddress) except KeyError: self.skipTest( "Marvin configuration has no host credentials to\ check router services") details = {} details['connect_ip'] = connect_ip details['connect_user'] = connect_user details['connect_passwd'] = connect_passwd details['hypervisor'] = hypervisor self.routerDetailsMap[router.id] = details result = get_process_status( self.routerDetailsMap[router.id]['connect_ip'], 22, self.routerDetailsMap[router.id]['connect_user'], self.routerDetailsMap[router.id]['connect_passwd'], router.linklocalip, cmd, hypervisor=self.routerDetailsMap[router.id]['hypervisor'] ) self.assertTrue(type(result) == list, "%s on router %s returned invalid result" % (cmd, router.id)) result = '\n'.join(result) return result
def test_01_cluster_settings(self): """change cpu/mem.overprovisioning.factor at cluster level and verify the change """ listHost = Host.list(self.apiclient, id=self.deployVmResponse.hostid) self.assertEqual( validateList(listHost)[0], PASS, "check list host response for host id %s" % self.deployVmResponse.hostid) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=2) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="cpu.overprovisioning.factor", value=3) list_cluster = Cluster.list(self.apiclient, id=listHost[0].clusterid) self.assertEqual( validateList(list_cluster)[0], PASS, "check list cluster response for cluster id %s" % listHost[0].clusterid) self.assertEqual(int(list_cluster[0].cpuovercommitratio), 3, "check the cpu overcommit value at cluster level ") self.assertEqual(int(list_cluster[0].memoryovercommitratio), 2, "check memory overcommit value at cluster level") Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="mem.overprovisioning.factor", value=1) Configurations.update(self.apiclient, clusterid=listHost[0].clusterid, name="cpu.overprovisioning.factor", value=1) list_cluster1 = Cluster.list(self.apiclient, id=listHost[0].clusterid) self.assertEqual( validateList(list_cluster1)[0], PASS, "check the list cluster response for id %s" % listHost[0].clusterid) self.assertEqual(int(list_cluster1[0].cpuovercommitratio), 1, "check the cpu overcommit value at cluster level ") self.assertEqual(int(list_cluster1[0].memoryovercommitratio), 1, "check memory overcommit value at cluster level")
def tearDownClass(cls): try: for hostid in cls.disabledHosts: hosts = Host.list(cls.apiclient, id=hostid) assert ( validateList(hosts)[0] == PASS ), "hosts\ list validation failed" if hosts[0].resourcestate.lower() == DISABLED.lower(): cmd = updateHost.updateHostCmd() cmd.id = hostid cmd.resourcestate = ENABLED cmd.allocationstate = ENABLE cls.apiclient.updateHost(cmd) cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e)
def test_deployVmOnGivenHost(self): """Test deploy VM on specific host """ # Steps for validation # 1. as admin list available hosts that are Up # 2. deployVM with hostid=above host # 3. listVirtualMachines # 4. destroy VM # Validate the following # 1. listHosts returns at least one host in Up state # 2. VM should be in Running # 3. VM should be on the host that it was deployed on hosts = Host.list(self.apiclient, zoneid=self.zone.id, type="Routing", state="Up", listall=True) self.assertEqual(isinstance(hosts, list), True, "CS should have atleast one host Up and Running") host = hosts[0] self.debug("Deploting VM on host: %s" % host.name) try: vm = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, hostid=host.id, ) self.debug("Deploy VM succeeded") except Exception as e: self.fail("Deploy VM failed with exception: %s" % e) self.debug("Cheking the state of deployed VM") vms = VirtualMachine.list( self.apiclient, id=vm.id, listall=True, account=self.account.name, domainid=self.account.domainid ) self.assertEqual(isinstance(vms, list), True, "List Vm should return a valid response") vm_response = vms[0] self.assertEqual(vm_response.state, "Running", "VM should be in running state after deployment") self.assertEqual(vm_response.hostid, host.id, "Host id where VM is deployed should match") return
def set_hosts_hugepages(cls): hosts_hugepages = [] listHost = Host.list(cls.apiclient, type='Routing', zoneid=cls.zone.id) for host in listHost: if host.hypervisor.lower() == 'kvm': sshClient = SshClient(host.ipaddress, port=22, user=cls.hostConfig["username"], passwd=cls.hostConfig["password"]) result = sshClient.execute("sysctl -n vm.nr_hugepages") sshClient.execute("sysctl -w vm.nr_hugepages=1024") if result and len(result) > 0: hosts_hugepages.append({ "ipaddress": host.ipaddress, "vm.nr_hugepages": result[0].strip() }) return hosts_hugepages
def tearDownClass(cls): try: for host in cls.hosts: Host.cancelMaintenance(cls.api_client, id=host.id) hosts_states = Host.list(cls.api_client, id=host.id, listall=True) if hosts_states[0].resourcestate != 'Enabled': raise Exception("Failed to cancel maintenance mode on %s" % (host.name)) except Exception as e: raise Exception( "Warning: Exception during resetting hosts maintenance : %s" % e) finally: super(TestVPCHostMaintenance, cls).tearDownClass() return
def migrate_router(self, router): """ Migrate the router """ self.debug("Checking if the host is available for migration?") hosts = Host.list(self.api_client, zoneid=self.zone.id, type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return a valid list") if len(hosts) < 2: self.skipTest( "No host available for migration. Test requires atleast 2 hosts" ) # Remove the host of current VM from the hosts list hosts[:] = [host for host in hosts if host.id != router.hostid] host = hosts[0] self.debug("Validating if the network rules work properly or not?") self.debug("Migrating VM-ID: %s from %s to Host: %s" % (router.id, router.hostid, host.id)) try: # Migrate the router cmd = migrateSystemVm.migrateSystemVmCmd() cmd.isAsync = "false" cmd.hostid = host.id cmd.virtualmachineid = router.id self.api_client.migrateSystemVm(cmd) except Exception as e: self.fail("Failed to migrate instance, %s" % e) self.debug("Waiting for Router mgiration ....") time.sleep(240) # List routers to check state of router router_response = list_routers(self.api_client, id=router.id) self.assertEqual(isinstance(router_response, list), True, "Check list response returns a valid list") router.hostid = router_response[0].hostid self.assertEqual( router.hostid, host.id, "Migration to host %s failed. The router host is" "still %s" % (host.id, router.hostid)) return
def test_06_secondary_storage(self): """Check the status of secondary storage""" # Validate the following # 1. List secondary storage # 2. Check state is "Up" or not sec_storages = Host.list(self.apiclient, zoneid=self.zone.id, type='SecondaryStorageVM', listall=True) self.assertEqual(isinstance(sec_storages, list), True, "Check if listHosts returns a valid response") for sec_storage in sec_storages: self.assertEqual(sec_storage.state, 'Up', "Secondary storage should be in Up state") return
def check_connection(self, secured, host, retries=5, interval=5): while retries > -1: time.sleep(interval) host = Host.list(self.apiclient, zoneid=self.zone.id, hostid=host.id, type='Routing')[0] if host.details.secured != secured: if retries >= 0: retries = retries - 1 continue else: return raise Exception("Host communication is not as expected: " + secured + ". Instead it's: " + host.details.secured)
def setUpClass(cls): testClient = super(TestDisableEnableHost, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() cls.hypervisor = cls.testClient.getHypervisorInfo() cls.snapshotSupported = True # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.pod = get_pod(cls.apiclient, zone_id=cls.zone.id) hostList = Host.list(cls.apiclient, zoneid=cls.zone.id, type="routing") clusterList = Cluster.list(cls.apiclient, id=hostList[0].clusterid) cls.host = Host(hostList[0].__dict__) cls.cluster = Cluster(clusterList[0].__dict__) cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"]) cls._cleanup = [] cls.disabledHosts = [] try: cls.service_offering = ServiceOffering.create(cls.apiclient, cls.testdata["service_offering"]) cls._cleanup.append(cls.service_offering) cls.disk_offering = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"]) cls._cleanup.append(cls.disk_offering) # Create an account cls.account = Account.create(cls.apiclient, cls.testdata["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) # Create root admin account cls.admin_account = Account.create(cls.apiclient, cls.testdata["account2"], admin=True) cls._cleanup.append(cls.admin_account) # Create user api client of the account cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name, DomainName=cls.account.domain) except Exception as e: cls.tearDownClass() raise e return
def test_08_resize_volume(self): """Test resize a volume""" # Verify the size is the new size is what we wanted it to be. self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() == "vmware": self.skipTest("Resize Volume is unsupported on VmWare") # resize the data disk self.debug("Resize Volume ID: %s" % self.volume.id) cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = self.services['resizeddiskofferingid'] self.apiClient.resizeVolume(cmd) count = 0 success = False while count < 3: list_volume_response = Volume.list( self.apiClient, id=self.volume.id, type='DATADISK' ) for vol in list_volume_response: if vol.id == self.volume.id and vol.size == 3221225472L and vol.state == 'Ready': success = True if success: break else: time.sleep(10) count += 1
def test_08_resize_volume(self): """Test resize a volume""" # Verify the size is the new size is what we wanted it to be. self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() == "vmware": self.skipTest("Resize Volume is unsupported on VmWare") # resize the data disk self.debug("Resize Volume ID: %s" % self.volume.id) cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = self.services['resizeddiskofferingid'] self.apiClient.resizeVolume(cmd) count = 0 success = False while count < 3: list_volume_response = Volume.list( self.apiClient, id=self.volume.id, type='DATADISK' ) for vol in list_volume_response: if vol.id == self.volume.id and vol.size == 3221225472L and vol.state == 'Ready': success = True if success: break else: time.sleep(10) count += 1
def check_connection(self, secured, host, retries=20, interval=6): while retries > -1: time.sleep(interval) host = Host.list(self.apiclient, zoneid=self.zone.id, hostid=host.id, type='Routing')[0] if host.details.secured != secured: if retries >= 0: retries = retries - 1 continue else: return raise Exception("Host detail 'secured' was expected: " + secured + ", actual is: " + host.details.secured)
def test_04_hosts(self): """Check the status of hosts""" # Validate the following # 1. List hosts with type=Routing # 2. Check state is "Up" or not hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', listall=True) self.assertEqual(isinstance(hosts, list), True, "Check if listHosts returns a valid response") for host in hosts: self.assertEqual(host.state, 'Up', "Host should be in Up state and running") return
def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] if self.hypervisor.lower() not in ["vmware"]: self.skipTest( "VM Migration with Volumes is not supported on other than VMware" ) self.hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', hypervisor='KVM') if len(self.hosts) < 2: self.skipTest( "Requires at least two hosts for performing migration related tests" )
def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] if self.hypervisor.lower() not in ["kvm"]: self.skipTest("Secured migration is not supported on other than KVM") self.hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing', hypervisor='KVM') if len(self.hosts) < 2: self.skipTest("Requires at least two hosts for performing migration related tests") self.secure_all_hosts() self.updateConfiguration("ca.plugin.root.auth.strictness", "false")
def check_connection(self, secured, host, retries=20, interval=6): while retries > -1: time.sleep(interval) host = Host.list( self.apiclient, zoneid=self.zone.id, hostid=host.id, type='Routing' )[0] if host.details.secured != secured: if retries >= 0: retries = retries - 1 continue else: return raise Exception("Host detail 'secured' was expected: " + secured + ", actual is: " + host.details.secured)
def setUpClass(cls): cls.testClient = super(TestHostsForMigration, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) clusterWithSufficientHosts = None clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id) for cluster in clusters: cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing") if len(cls.hosts) >= 2: clusterWithSufficientHosts = cluster break if clusterWithSufficientHosts is None: raise unittest.SkipTest("No Cluster with 2 hosts found") Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="PREMIUM") cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.service_offering_with_tag = ServiceOffering.create( cls.api_client, cls.services["service_offering_with_tag"] ) cls._cleanup = [ cls.service_offering_with_tag, ] return
def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) for host in cls.hosts: Host.cancelMaintenance( cls.api_client, id=host.id ) hosts_states = Host.list( cls.api_client, id=host.id, listall=True ) if hosts_states[0].resourcestate != 'Enabled': raise Exception( "Failed to cancel maintenance mode on %s" % (host.name)) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] if self.hypervisor.lower() not in ["kvm"]: self.skipTest( "Secured migration is not supported on other than KVM") self.hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', hypervisor='KVM') if len(self.hosts) < 2: self.skipTest( "Requires at least two hosts for performing migration related tests" ) self.secure_all_hosts() self.updateConfiguration("ca.plugin.root.auth.strictness", "false")
def test_02_deploy_vm_on_specific_cluster(self): # Select deployment cluster clusters = Cluster.list(self.apiclient, ) target_cluster = clusters[0] target_id = target_cluster.id cluster_hypervisor = target_cluster.hypervisortype template = get_template(self.apiclient, hypervisor=cluster_hypervisor) # deploy vm on cluster cmd = deployVirtualMachine.deployVirtualMachineCmd() cmd.zoneid = target_cluster.zoneid cmd.serviceofferingid = self.service_offering.id cmd.templateid = template.id cmd.clusterid = target_id vm = self.apiclient.deployVirtualMachine(cmd) vm_host = Host.list(self.apiclient, id=vm.hostid) self.assertEqual(target_id, vm_host[0].clusterid, "VM was not deployed on the provided cluster") self.destroy_vm(vm.id)
def setUpClass(cls): cls.testClient = super(TestVMLifeCycleHostmaintenance, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id clusterWithSufficientHosts = None clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id) for cluster in clusters: cls.hosts = Host.list(cls.api_client, clusterid=cluster.id) if len(cls.hosts) >= 2: clusterWithSufficientHosts = cluster if clusterWithSufficientHosts is None: raise unittest.SkipTest("No Cluster with 2 hosts found") Host.update(cls.api_client, id=cls.hosts[0].id, hosttags="hosttag1") Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="hosttag2") cls.service_offering_1 = ServiceOffering.create(cls.api_client, cls.services["service_offering_1"]) cls.service_offering_2 = ServiceOffering.create(cls.api_client, cls.services["service_offering_2"]) cls.vpc_off = VpcOffering.create(cls.api_client, cls.services["vpc_offering"]) cls.vpc_off.update(cls.api_client, state="Enabled") cls.account = Account.create(cls.api_client, cls.services["account"], admin=True, domainid=cls.domain.id) cls.vpc_off = VpcOffering.create(cls.api_client, cls.services["vpc_offering"]) cls.vpc_off.update(cls.api_client, state="Enabled") cls.services["vpc"]["cidr"] = "10.1.1.1/16" cls.vpc = VPC.create( cls.api_client, cls.services["vpc"], vpcofferingid=cls.vpc_off.id, zoneid=cls.zone.id, account=cls.account.name, domainid=cls.account.domainid, ) cls.nw_off = NetworkOffering.create(cls.api_client, cls.services["network_offering"], conservemode=False) # Enable Network offering cls.nw_off.update(cls.api_client, state="Enabled") # Creating network using the network offering created cls.network_1 = Network.create( cls.api_client, cls.services["network"], accountid=cls.account.name, domainid=cls.account.domainid, networkofferingid=cls.nw_off.id, zoneid=cls.zone.id, gateway="10.1.1.1", vpcid=cls.vpc.id, ) cls.nw_off_no_lb = NetworkOffering.create( cls.api_client, cls.services["network_offering_no_lb"], conservemode=False ) # Enable Network offering cls.nw_off_no_lb.update(cls.api_client, state="Enabled") # Creating network using the network offering created cls.network_2 = Network.create( cls.api_client, cls.services["network"], accountid=cls.account.name, domainid=cls.account.domainid, networkofferingid=cls.nw_off_no_lb.id, zoneid=cls.zone.id, gateway="10.1.2.1", vpcid=cls.vpc.id, ) # Spawn an instance in that network cls.vm_1 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_1.id, networkids=[str(cls.network_1.id)], ) # Spawn an instance in that network cls.vm_2 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_1.id, networkids=[str(cls.network_1.id)], ) cls.vm_3 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_2.id, networkids=[str(cls.network_2.id)], ) routers = Router.list(cls.api_client, account=cls.account.name, domainid=cls.account.domainid, listall=True) if isinstance(routers, list): cls.vpcvr = routers[0] cls._cleanup = [cls.service_offering_1, cls.service_offering_2, cls.nw_off, cls.nw_off_no_lb] return
def test_03_restore_vm_with_new_template(self): """ Test restoring a vm with different template than the one it was created with """ hosts = Host.list( self.apiclient, type="Routing", listall=True ) host_list_validation_result = validateList(hosts) self.assertEqual(host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) hypervisor = host_list_validation_result[1].hypervisor for k, v in self.services["templates"].items(): if k.lower() == hypervisor.lower(): # Register new template template = Template.register( self.apiclient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, hypervisor=self.hypervisor ) self.debug( "Registered a template of format: %s with ID: %s" % ( v["format"], template.id )) self.debug( "Downloading template with ID: %s" % ( template.id )) template.download(self.apiclient) self._cleanup.append(template) # Wait for template status to be changed across time.sleep(self.services["sleep"]) self.verify_template_listing(template) # Restore a vm with the new template. self.vm_with_reset.restore(self.apiclient, templateid=template.id) self.vm_without_reset.restore(self.apiclient, templateid=template.id) # Make sure the VMs now have the new template ID # Make sure the Ip address of the VMs haven't changed self.debug("Checking template id of VM with isVolatile=True") vms = VirtualMachine.list( self.apiclient, id=self.vm_with_reset.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_with_reset = vm_list_validation_result[1] self.assertNotEqual(self.vm_with_reset.templateid, vm_with_reset.templateid, "VM created with IsVolatile=True has same templateid : %s after restore" %vm_with_reset.templateid ) self.assertNotEqual(self.vm_with_reset.templateid, template.id, "VM created with IsVolatile=True has wrong templateid after restore Got:%s Expected: %s" %(self.vm_with_reset.templateid, template.id) ) # Make sure it has the same IP after reboot self.assertEqual(self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after restore. Got : %s Expected : %s" %(vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress) ) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking template id of VM with isVolatile=False") vms = VirtualMachine.list( self.apiclient, id=self.vm_without_reset.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_without_reset = vm_list_validation_result[1] self.assertNotEqual(self.vm_without_reset.templateid, vm_without_reset.templateid, "VM created with IsVolatile=False has same templateid : %s after restore" %vm_with_reset.templateid ) self.assertNotEqual(self.vm_without_reset.templateid, template.id, "VM created with IsVolatile=False has wrong templateid after restore Got:%s Expected: %s" %(self.vm_without_reset.templateid, template.id) ) # Make sure it has the same IP after reboot self.assertEqual(self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=False doesn't have same ip after restore. Got : %s Expected : %s" %(vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress) ) return
def test_vmware_anti_affinity(self): """ Test Set up anti-affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy VMs on host 1 and 2 # 2. Enable maintenance mode for host 1 # 3. VM should be migrated to 3rd host hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.debug(len(hosts)) self.assertGreaterEqual( len(hosts), 3, "There must be at least 3 hosts present in a cluster" ) aff_grp = self.create_aff_grp(aff_grp=self.services["host_anti_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) host_1 = vm_1.hostid host_2 = vm_2.hostid vms = VirtualMachine.list( self.apiclient, id=vm_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_1 = vm_list_validation_result[1] self.debug("VM State: %s" % virtual_machine_1.state) self.assertEqual( virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state" ) vms = VirtualMachine.list( self.apiclient, id=vm_2.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_2 = vm_list_validation_result[1] self.debug("VM %s State: %s" % ( virtual_machine_2.name, virtual_machine_2.state )) self.assertEqual( virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Enabling maintenance mode on host_1: %s" % host_1) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = host_1 self.apiclient.prepareHostForMaintenance(cmd) timeout = self.services["timeout"] while True: hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing', id=host_1 ) host_list_validation_result = validateList(hosts) self.assertEqual(host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) host = host_list_validation_result[1] if host.resourcestate == 'Maintenance': break elif timeout == 0: self.fail("Failed to put host: %s in maintenance mode" % host.name) time.sleep(self.services["sleep"]) timeout = timeout - 1 vms = VirtualMachine.list( self.apiclient, id=virtual_machine_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vm = vm_list_validation_result[0] self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) self.assertNotEqual( vm.hostid, host_2, "The host name should not match with second host name" ) self.debug("Canceling host maintenance for ID: %s" % host_1.id) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = host_1.id self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % host_1.id) return
def test_07_resize_fail(self): """Test resize (negative) non-existent volume""" # Verify the size is the new size is what we wanted it to be. self.debug("Fail Resize Volume ID: %s" % self.volume.id) # first, an invalid id cmd = resizeVolume.resizeVolumeCmd() cmd.id = "invalid id" cmd.diskofferingid = self.services['resizeddiskofferingid'] success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: #print str(ex) if "invalid" in str(ex): success = True self.assertEqual( success, True, "ResizeVolume - verify invalid id is handled appropriately") # Next, we'll try an invalid disk offering id cmd.id = self.volume.id cmd.diskofferingid = "invalid id" success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: if "invalid" in str(ex): success = True self.assertEqual( success, True, "ResizeVolume - verify disk offering is handled appropriately") # try to resize a root disk with a disk offering, root can only be resized by size= # get root vol from created vm list_volume_response = Volume.list( self.apiClient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) rootvolume = list_volume_response[0] cmd.id = rootvolume.id cmd.diskofferingid = self.services['diskofferingid'] success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: if "Can only resize Data volumes" in str(ex): success = True self.assertEqual( success, True, "ResizeVolume - verify root disks cannot be resized by disk offering id") # Ok, now let's try and resize a volume that is not custom. cmd.id = self.volume.id cmd.diskofferingid = self.services['diskofferingid'] cmd.size = 4 currentSize = self.volume.size self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id) ) #attach the volume self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True #stop the vm if it is on xenserver hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() == "vmware": self.skipTest("Resize Volume is unsupported on VmWare") self.apiClient.resizeVolume(cmd) count = 0 success = True while count < 10: list_volume_response = Volume.list( self.apiClient, id=self.volume.id, type='DATADISK' ) for vol in list_volume_response: if vol.id == self.volume.id and vol.size != currentSize and vol.state != "Resizing": success = False if success: break else: time.sleep(1) count += 1 self.assertEqual( success, True, "Verify the volume did not resize" ) if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) time.sleep(30) return
s.id = storage.id s.forced = 'True' s.delete(apiClient) # hosts = Host.list(apiClient) # if hosts: # for host in hosts: # print "host name={}, id={}".format(host.name, host.id) # if host.type == 'Routing': # h = Host(tmp_dict) # if host.resourcestate != 'PrepareForMaintenance' \ # and host.resourcestate != 'Maintenance': # print "Maintenance for host" # h.enableMaintenance(apiClient, host.id) hosts = Host.list(apiClient) if hosts: for host in hosts: print "host name={}, id={}".format(host.name, host.id) if host.type == 'Routing': if host.resourcestate == 'PrepareForMaintenance' \ or host.resourcestate == 'Maintenance': print "delete host" cmd = deleteHost.deleteHostCmd() cmd.id = host.id # cmd.forced = 'True' apiClient.deleteHost(cmd) else: print "Delete host" h = Host(tmp_dict) # h.forced = 'True'
def test_08_resize_volume(self): """Test resize a volume""" # Verify the size is the new size is what we wanted it to be. self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (self.volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() == "vmware": self.skipTest("Resize Volume is unsupported on VmWare") # resize the data disk self.debug("Resize Volume ID: %s" % self.volume.id) self.services["disk_offering"]["disksize"] = 20 disk_offering_20_GB = DiskOffering.create(self.apiclient, self.services["disk_offering"]) self.cleanup.append(disk_offering_20_GB) cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = disk_offering_20_GB.id self.apiClient.resizeVolume(cmd) count = 0 success = False while count < 3: list_volume_response = Volume.list(self.apiClient, id=self.volume.id, type="DATADISK") for vol in list_volume_response: if ( vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == "Ready" ): success = True if success: break else: time.sleep(10) count += 1 self.assertEqual(success, True, "Check if the data volume resized appropriately") self.services["disk_offering"]["disksize"] = 10 disk_offering_10_GB = DiskOffering.create(self.apiclient, self.services["disk_offering"]) self.cleanup.append(disk_offering_10_GB) cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = disk_offering_10_GB.id cmd.shrinkok = "true" self.apiClient.resizeVolume(cmd) count = 0 success = False while count < 3: list_volume_response = Volume.list(self.apiClient, id=self.volume.id) for vol in list_volume_response: if ( vol.id == self.volume.id and int(vol.size) == (int(disk_offering_10_GB.disksize) * (1024 ** 3)) and vol.state == "Ready" ): success = True if success: break else: time.sleep(10) count += 1 self.assertEqual(success, True, "Check if the root volume resized appropriately") # start the vm if it is on xenserver if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) time.sleep(30) return
def test_vmware_affinity(self): """ Test Set up affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy 2 VMs on same hosts # 2. Migrate one VM from one host to another # 3. The second VM should also get migrated hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster" ) host_1 = hosts[0].id host_2 = hosts[1].id aff_grp = self.create_aff_grp(aff_grp=self.services["host_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name], hostid = host_1 ) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) vms = VirtualMachine.list( self.apiclient, id= vm_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_1 = vm_list_validation_result[1] self.assertEqual( virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Deploying VM on account: %s" % self.account.name) vms = VirtualMachine.list( self.apiclient, id=vm_2.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_2 = vm_list_validation_result[1] self.assertEqual( virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Migrate VM from host_1 to host_2") cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.virtualmachineid = virtual_machine_2.id cmd.hostid = host_2 self.apiclient.migrateVirtualMachine(cmd) self.debug("Migrated VM from host_1 to host_2") vms = VirtualMachine.list( self.apiclient, hostid=host_2, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vmids = [vm.id for vm in vms] self.assertIn( virtual_machine_1.id, vmids, "VM 1 should be successfully migrated to host 2" ) self.assertIn( virtual_machine_2.id, vmids, "VM 2 should be automatically migrated to host 2" ) return
def test_02_cancel_maintenance(self): """ Test cancel Maintenance Mode on the above Hosts + Migrate VMs Back """ # Steps # 1. Cancel Maintenance Mode on the host. # 2. Migrate the VMs back onto the host on which Maintenance mode is # cancelled. # Validate the following # 1. Successfully cancel the Maintenance mode on the host. # 2. Migrate the VMs back successfully onto the host. # 3. Check that the network connectivity exists with the migrated VMs. try: timeout = self.services["timeout"] while True: list_host_response = Host.list(self.apiclient, id=self.vpcvr.hostid, resourcestate="Maintenance") if list_host_response is not None: break elif timeout == 0: raise Exception("Failed to list the Host in Maintenance State") time.sleep(self.services["sleep"]) timeout = timeout - 1 self.debug("Verified that the Host is in Maintenance State") except: self.fail("Failed to find the Host in maintenance state") self.debug("Cancel host maintenence on which the VPCVR is running") try: Host.cancelMaintenance(self.apiclient, id=self.vpcvr.hostid) timeout = self.services["timeout"] while True: list_host_response = Host.list(self.apiclient, id=self.vpcvr.hostid, state="Up") if list_host_response is not None: break elif timeout == 0: raise Exception("Failed to list the Host in Up State after Canceling Maintenance Mode") time.sleep(self.services["sleep"]) timeout = timeout - 1 self.debug("Verified that the Host is in Up State after Canceling Maintenance Mode") except Exception as e: self.fail("Failed to cancel maintenance mode on host: %s" % e) self.debug("Migrating the instances back to the host: %s" % self.vpcvr.hostid) try: cmd = migrateSystemVm.migrateSystemVmCmd() cmd.hostid = self.vpcvr.hostid cmd.virtualmachineid = self.vpcvr.id self.apiclient.migrateSystemVm(cmd) except Exception as e: self.fail("Failed to migrate VPCVR back: %s" % e) self.debug("Check the status of router after migration") routers = Router.list(self.apiclient, id=self.vpcvr.id, listall=True) self.assertEqual(isinstance(routers, list), True, "List routers shall return the valid response") self.assertEqual(routers[0].state, "Running", "Router state should be running") # TODO: Check for the network connectivity return
def test_08_migrate_vm(self): """Test migrate VM """ # Validate the following # 1. Environment has enough hosts for migration # 2. DeployVM on suitable host (with another host in the cluster) # 3. Migrate the VM and assert migration successful suitable_hosts = None hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing' ) self.assertEqual(validateList(hosts)[0], PASS, "hosts list validation failed") if len(hosts) < 2: self.skipTest("At least two hosts should be present in the zone for migration") hypervisor = str(get_hypervisor_type(self.apiclient)).lower() # For KVM, two hosts used for migration should be present in same cluster # For XenServer and VMware, migration is possible between hosts belonging to different clusters # with the help of XenMotion and Vmotion respectively. if hypervisor.lower() in ["kvm","simulator"]: #identify suitable host clusters = [h.clusterid for h in hosts] #find hosts withe same clusterid clusters = [cluster for index, cluster in enumerate(clusters) if clusters.count(cluster) > 1] if len(clusters) <= 1: self.skipTest("In " + hypervisor.lower() + " Live Migration needs two hosts within same cluster") suitable_hosts = [host for host in hosts if host.clusterid == clusters[0]] else: suitable_hosts = hosts target_host = suitable_hosts[0] migrate_host = suitable_hosts[1] #deploy VM on target host self.vm_to_migrate = VirtualMachine.create( self.apiclient, self.services["small"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.small_offering.id, mode=self.services["mode"], hostid=target_host.id ) self.debug("Migrating VM-ID: %s to Host: %s" % ( self.vm_to_migrate.id, migrate_host.id )) self.vm_to_migrate.migrate(self.apiclient, migrate_host.id) retries_cnt = 3 while retries_cnt >=0: list_vm_response = VirtualMachine.list(self.apiclient, id=self.vm_to_migrate.id) self.assertNotEqual( list_vm_response, None, "Check virtual machine is listed" ) vm_response = list_vm_response[0] self.assertEqual(vm_response.id,self.vm_to_migrate.id,"Check virtual machine ID of migrated VM") self.assertEqual(vm_response.hostid,migrate_host.id,"Check destination hostID of migrated VM") retries_cnt = retries_cnt - 1 return
def test_02_host_maintenance_mode_with_activities(self): """Test host maintenance mode with activities """ # Validate the following # 1. Create Vms. Acquire IP. Create port forwarding & load balancing # rules for Vms. # 2. While activities are ongoing: Create snapshots, recurring # snapshots, create templates, download volumes, Host 1: put to # maintenance mode. All Vms should failover to Host 2 in cluster # Vms should be in running state. All port forwarding rules and # load balancing Rules should work. # 3. After failover to Host 2 succeeds, deploy Vms. Deploy Vms on host # 2 should succeed. All ongoing activities in step 3 should succeed # 4. Host 1: cancel maintenance mode. # 5. While activities are ongoing: Create snapshots, recurring # snapshots, create templates, download volumes, Host 2: put to # maintenance mode. All Vms should failover to Host 1 in cluster. # 6. After failover to Host 1 succeeds, deploy VMs. Deploy Vms on # host 1 should succeed. All ongoing activities in step 6 should # succeed. hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) if len(hosts) < 2: self.skipTest("There must be at least 2 hosts present in cluster") self.debug("Checking HA with hosts: %s, %s" % ( hosts[0].name, hosts[1].name )) self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("Deployed VM on host: %s" % vm.hostid) self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) networks = Network.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(networks, list), True, "List networks should return valid list for the account" ) network = networks[0] self.debug("Associating public IP for account: %s" % self.account.name) public_ip = PublicIPAddress.create( self.apiclient, accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, networkid=network.id ) self.debug("Associated %s with network %s" % ( public_ip.ipaddress.ipaddress, network.id )) self.debug("Creating PF rule for IP address: %s" % public_ip.ipaddress.ipaddress) NATRule.create( self.apiclient, virtual_machine, self.services["natrule"], ipaddressid=public_ip.ipaddress.id ) self.debug("Creating LB rule on IP with NAT: %s" % public_ip.ipaddress.ipaddress) # Create Load Balancer rule on IP already having NAT rule lb_rule = LoadBalancerRule.create( self.apiclient, self.services["lbrule"], ipaddressid=public_ip.ipaddress.id, accountid=self.account.name ) self.debug("Created LB rule with ID: %s" % lb_rule.id) # Should be able to SSH VM try: self.debug("SSH into VM: %s" % virtual_machine.id) virtual_machine.get_ssh_client( ipaddress=public_ip.ipaddress.ipaddress) except Exception as e: self.fail("SSH Access failed for %s: %s" % (virtual_machine.ipaddress, e) ) # Get the Root disk of VM volumes = list_volumes( self.apiclient, virtualmachineid=virtual_machine.id, type='ROOT', listall=True ) volume = volumes[0] self.debug( "Root volume of VM(%s): %s" % ( virtual_machine.name, volume.name )) # Create a snapshot from the ROOTDISK self.debug("Creating snapshot on ROOT volume: %s" % volume.name) snapshot = Snapshot.create(self.apiclient, volumes[0].id) self.debug("Snapshot created: ID - %s" % snapshot.id) snapshots = list_snapshots( self.apiclient, id=snapshot.id, listall=True ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list snapshots call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check snapshot id in list resources call" ) # Generate template from the snapshot self.debug("Generating template from snapshot: %s" % snapshot.name) template = Template.create_from_snapshot( self.apiclient, snapshot, self.services["templates"] ) self.debug("Created template from snapshot: %s" % template.id) templates = list_templates( self.apiclient, templatefilter=self.services["templates"]["templatefilter"], id=template.id ) self.assertEqual( isinstance(templates, list), True, "List template call should return the newly created template" ) self.assertEqual( templates[0].isready, True, "The newly created template should be in ready state" ) first_host = vm.hostid self.debug("Enabling maintenance mode for host %s" % vm.hostid) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = first_host self.apiclient.prepareHostForMaintenance(cmd) self.debug("Waiting for SSVMs to come up") wait_for_ssvms( self.apiclient, zoneid=self.zone.id, podid=self.pod.id, ) timeout = self.services["timeout"] # Poll and check state of VM while it migrates from one host to another while True: vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("VM 1 state: %s" % vm.state) if vm.state in ["Stopping", "Stopped", "Running", "Starting", "Migrating"]: if vm.state == "Running": break else: time.sleep(self.services["sleep"]) timeout = timeout - 1 else: self.fail( "VM migration from one-host-to-other failed\ while enabling maintenance" ) second_host = vm.hostid self.assertEqual( vm.state, "Running", "VM should be in Running state after enabling host maintenance" ) # Should be able to SSH VM try: self.debug("SSH into VM: %s" % virtual_machine.id) virtual_machine.get_ssh_client( ipaddress=public_ip.ipaddress.ipaddress) except Exception as e: self.fail("SSH Access failed for %s: %s" % (virtual_machine.ipaddress, e) ) self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance on other host virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine_2.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("Deployed VM on host: %s" % vm.hostid) self.debug("VM 2 state: %s" % vm.state) self.assertEqual( vm.state, "Running", "Deployed VM should be in Running state" ) self.debug("Canceling host maintenance for ID: %s" % first_host) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = first_host self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % first_host) # Get the Root disk of VM volumes = list_volumes( self.apiclient, virtualmachineid=virtual_machine_2.id, type='ROOT', listall=True ) volume = volumes[0] self.debug( "Root volume of VM(%s): %s" % ( virtual_machine_2.name, volume.name )) # Create a snapshot from the ROOTDISK self.debug("Creating snapshot on ROOT volume: %s" % volume.name) snapshot = Snapshot.create(self.apiclient, volumes[0].id) self.debug("Snapshot created: ID - %s" % snapshot.id) snapshots = list_snapshots( self.apiclient, id=snapshot.id, listall=True ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list snapshots call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check snapshot id in list resources call" ) # Generate template from the snapshot self.debug("Generating template from snapshot: %s" % snapshot.name) template = Template.create_from_snapshot( self.apiclient, snapshot, self.services["templates"] ) self.debug("Created template from snapshot: %s" % template.id) templates = list_templates( self.apiclient, templatefilter=self.services["templates"]["templatefilter"], id=template.id ) self.assertEqual( isinstance(templates, list), True, "List template call should return the newly created template" ) self.assertEqual( templates[0].isready, True, "The newly created template should be in ready state" ) self.debug("Enabling maintenance mode for host %s" % second_host) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = second_host self.apiclient.prepareHostForMaintenance(cmd) self.debug("Maintenance mode enabled for host: %s" % second_host) self.debug("Waiting for SSVMs to come up") wait_for_ssvms( self.apiclient, zoneid=self.zone.id, podid=self.pod.id, ) # Poll and check the status of VMs timeout = self.services["timeout"] while True: vms = VirtualMachine.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug( "VM state after enabling maintenance on first host: %s" % vm.state) if vm.state in ["Stopping", "Stopped", "Running", "Starting", "Migrating"]: if vm.state == "Running": break else: time.sleep(self.services["sleep"]) timeout = timeout - 1 else: self.fail( "VM migration from one-host-to-other failed\ while enabling maintenance" ) # Poll and check the status of VMs timeout = self.services["timeout"] while True: vms = VirtualMachine.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[1] self.debug( "VM state after enabling maintenance on first host: %s" % vm.state) if vm.state in ["Stopping", "Stopped", "Running", "Starting", "Migrating"]: if vm.state == "Running": break else: time.sleep(self.services["sleep"]) timeout = timeout - 1 else: self.fail( "VM migration from one-host-to-other failed\ while enabling maintenance" ) for vm in vms: self.debug( "VM states after enabling maintenance mode on host: %s - %s" % (first_host, vm.state)) self.assertEqual( vm.state, "Running", "Deployed VM should be in Running state" ) # Spawn an instance on other host virtual_machine_3 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine_3.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("Deployed VM on host: %s" % vm.hostid) self.debug("VM 3 state: %s" % vm.state) self.assertEqual( vm.state, "Running", "Deployed VM should be in Running state" ) self.debug("Canceling host maintenance for ID: %s" % second_host) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = second_host self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % second_host) self.debug("Waiting for SSVMs to come up") wait_for_ssvms( self.apiclient, zoneid=self.zone.id, podid=self.pod.id, ) return
def test_07_resize_fail(self): """Test resize (negative) non-existent volume""" # Verify the size is the new size is what we wanted it to be. self.debug("Fail Resize Volume ID: %s" % self.volume.id) # first, an invalid id cmd = resizeVolume.resizeVolumeCmd() cmd.id = "invalid id" cmd.diskofferingid = self.services['customresizeddiskofferingid'] success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: #print str(ex) if "invalid" in str(ex): success = True self.assertEqual( success, True, "ResizeVolume - verify invalid id is handled appropriately") # Next, we'll try an invalid disk offering id cmd.id = self.volume.id cmd.diskofferingid = "invalid id" success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: if "invalid" in str(ex): success = True self.assertEqual( success, True, "ResizeVolume - verify disk offering is handled appropriately") # try to resize a root disk with a disk offering, root can only be resized by size= # get root vol from created vm list_volume_response = Volume.list( self.apiClient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) rootvolume = list_volume_response[0] cmd.id = rootvolume.id cmd.diskofferingid = self.services['diskofferingid'] with self.assertRaises(Exception): self.apiClient.resizeVolume(cmd) # Ok, now let's try and resize a volume that is not custom. cmd.id = self.volume.id cmd.diskofferingid = self.services['diskofferingid'] cmd.size = 4 currentSize = self.volume.size self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id) ) #attach the volume self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True #stop the vm if it is on xenserver hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) self.debug("Found %s host" % hosts[0].hypervisor) if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"): self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V") # Attempting to resize it should throw an exception, as we're using a non # customisable disk offering, therefore our size parameter should be ignored with self.assertRaises(Exception): self.apiClient.resizeVolume(cmd) if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) time.sleep(30) return
def test_vm_creation_in_fully_automated_mode(self): """ Test VM Creation in automation mode = Fully automated This test requires following preconditions: - DRS Cluster is configured in "Fully automated" mode """ # Validate the following # 1. Create a new VM in a host which is almost fully utilized # 2 Automatically places VM on the other host # 3. VM state is running after deployment hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster" ) host_1 = hosts[0] #Convert available memory( Keep some margin) into MBs and assign to service offering self.services["service_offering_max_memory"]["memory"] = int((int(hosts[0].memorytotal) - int(hosts[0].memoryused))/1048576 - 1024) self.debug("max memory: %s" % self.services["service_offering_max_memory"]["memory"]) service_offering_max_memory = ServiceOffering.create( self.apiclient, self.services["service_offering_max_memory"] ) VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=service_offering_max_memory.id, hostid = host_1.id ) # Host 1 has only 1024 MB memory available now after deploying the instance # We are trying to deploy an instance with 2048 MB memory, this should automatically # get deployed on other host which has the enough capacity self.debug("Trying to deploy instance with memory requirement more than that is available on\ the first host") self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) self.assertNotEqual( vm.hostid, host_1.id, "Host Ids of two should not match as one host is full" ) self.debug("The host ids of two virtual machines are different as expected\ they are %s and %s" % (vm.hostid, host_1.id)) return