def setUpClass(cls):
        cls.testClient = super(TestHostsForMigration, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing")
            if len(cls.hosts) >= 2:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 2 hosts found")

        Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="PREMIUM")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_tag = ServiceOffering.create(
            cls.api_client, cls.services["service_offering_with_tag"]
        )

        cls._cleanup = [cls.service_offering_with_tag]
        return
    def test_03_reconnect_host(self):
        """ Test reconnect Host which has VPC elements
        """

        # Steps:
        # 1.Reconnect one of the host on which VPC Virtual Router is present.
        # Validate the following
        # 1. Host should successfully reconnect.
        # 2. Network connectivity to all the VMs on the host should not be
        #    effected due to reconnection.

        self.debug("Reconnecting the host where VPC VR is running")
        try:
            Host.reconnect(self.apiclient, id=self.vpcvr.hostid)
        except Exception as e:
            self.fail("Failed to reconnect to host: %s" % e)

        self.debug("Check the status of router after migration")
        routers = Router.list(
                              self.apiclient,
                              id=self.vpcvr.id,
                              listall=True
                              )
        self.assertEqual(
                         isinstance(routers, list),
                         True,
                         "List routers shall return the valid response"
                         )
        self.assertEqual(
                         routers[0].state,
                         "Running",
                         "Router state should be running"
                         )
        #  TODO: Check for the network connectivity
        return
Example #3
0
    def tearDown(self):
        try:
            for host in self.hosts:
                Host.update(self.apiclient, id=host.id, hosttags="")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            self.debug("Warning! Exception in tearDown: %s" % e)
Example #4
0
    def tearDown(self):
        try:
            for host in self.hosts:
                Host.update(self.apiclient, id=host.id, hosttags="")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
 def tearDownClass(cls):
     try:
         # Remove the host from HA
         Host.update(cls.api_client, id=cls.hosts[2].id, hosttags="")
         #Cleanup resources used
         cleanup_resources(cls.api_client, cls._cleanup)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Example #6
0
    def test_validateState_succeeds_at_retry_limit(self):
        retries = 3
        timeout = 3
        api_client = MockApiClient(retries, 'initial state', 'final state')
        host = Host({'id': 'host_id'})
        state = host.validateState(api_client, ['final state', 'final state'], timeout=timeout, interval=1)

        self.assertEqual(state, [PASS, None])
        self.assertEqual(retries, api_client.retry_counter)
    def test_03_reconnect_host(self):
        """ Test reconnect Host which has VPC elements
        """

        # Steps:
        # 1.Reconnect one of the host on which VPC Virtual Router is present.
        # Validate the following
        # 1. Host should successfully reconnect.
        # 2. Network connectivity to all the VMs on the host should not be
        #    effected due to reconnection.

        try:
            timeout = self.services["timeout"]        
            while True:
                list_host_response = Host.list(
                    self.apiclient,
                    id=self.vpcvr.hostid,
                    resourcestate="Enabled")
                
                if list_host_response is not None:
                    break
                elif timeout == 0:
                    raise Exception("Failed to list the Host in Up State")

                time.sleep(self.services["sleep"])
                timeout = timeout - 1
            
            self.debug("Verified that the Host is in Up State")
            
        except:
            self.fail("Failed to find the Host in Up State")

        self.debug("Reconnecting the host where VPC VR is running")
        try:
            Host.reconnect(self.apiclient, id=self.vpcvr.hostid)
        except Exception as e:
            self.fail("Failed to reconnect to host: %s" % e)

        self.debug("Check the status of router after migration")
        routers = Router.list(
                              self.apiclient,
                              id=self.vpcvr.id,
                              listall=True
                              )
        self.assertEqual(
                         isinstance(routers, list),
                         True,
                         "List routers shall return the valid response"
                         )
        self.assertEqual(
                         routers[0].state,
                         "Running",
                         "Router state should be running"
                         )
        #  TODO: Check for the network connectivity
        return
Example #8
0
    def test_validateState_fails_after_retry_limit(self):
        retries = 3
        timeout = 2
        api_client = MockApiClient(retries, 'initial state', 'final state')
        host = Host({'id': 'host_id'})
        state = host.validateState(api_client, ['final state', 'final state'], timeout=timeout, interval=1)

        self.assertEqual(state,
                         [FAIL, "Host state not transited to %s, operation timed out" % ['final state', 'final state']])
        self.assertEqual(retries, api_client.retry_counter)
 def tearDownClass(cls):
     try:
         # Delete the host tags
         Host.update(cls.api_client, id=cls.hosts[0].id, hosttags="")
         Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="")
         cls.account.delete(cls.api_client)
         wait_for_cleanup(cls.api_client, ["account.cleanup.interval"])
         # Cleanup resources used
         cleanup_resources(cls.api_client, cls._cleanup)
         cls.vpc_off.delete(cls.api_client)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
    def test_02_cancel_maintenance(self):
        """ Test cancel Maintenance Mode on the above Hosts + Migrate VMs Back
        """

        # Steps
        # 1. Cancel Maintenance Mode on the host.
        # 2. Migrate the VMs back onto the host on which Maintenance mode is
        #    cancelled.
        # Validate the following
        # 1. Successfully cancel the Maintenance mode on the host.
        # 2. Migrate the VMs back successfully onto the host.
        # 3. Check that the network connectivity exists with the migrated VMs.

        self.debug("Cancel host maintenence on which the VPCVR is running")
        try:
            Host.cancelMaintenance(self.apiclient, id=self.vpcvr.hostid)
        except Exception as e:
            self.fail("Failed to enable maintenance mode on host: %s" % e)

        self.debug(
            "Migrating the instances back to the host: %s" %
                                                        self.vpcvr.hostid)
        try:
            cmd = migrateSystemVm.migrateSystemVmCmd()
            cmd.hostid = self.vpcvr.hostid
            cmd.virtualmachineid = self.vpcvr.id
            self.apiclient.migrateSystemVm(cmd)
        except Exception as e:
            self.fail("Failed to migrate VPCVR back: %s" % e)

        self.debug("Check the status of router after migration")
        routers = Router.list(
                              self.apiclient,
                              id=self.vpcvr.id,
                              listall=True
                              )
        self.assertEqual(
                         isinstance(routers, list),
                         True,
                         "List routers shall return the valid response"
                         )
        self.assertEqual(
                         routers[0].state,
                         "Running",
                         "Router state should be running"
                         )
        #  TODO: Check for the network connectivity
        return
    def test_06_secondary_storage(self):
        """Check the status of secondary storage"""

        # Validate the following
        # 1. List secondary storage
        # 2. Check state is "Up" or not

        if self.hypervisor.lower() == 'simulator':
            self.skipTest("Hypervisor is simulator skipping")

        sec_storages = Host.list(
                          self.apiclient,
                          zoneid=self.zone.id,
                          type='SecondaryStorageVM',
                          listall=True
                          )

        if sec_storages is None:
            self.skipTest("SSVM is not provisioned yet, skipping")

        self.assertEqual(
                         isinstance(sec_storages, list),
                         True,
                         "Check if listHosts returns a valid response"
                         )
        for sec_storage in sec_storages:
            self.assertEqual(
                             sec_storage.state,
                             'Up',
                             "Secondary storage should be in Up state"
                             )
        return
    def setUpClass(cls):
        testClient = super(TestDeployVmWithVariedPlanners, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.services = testClient.getParsedTestDataConfig()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.services["ostype"]
        )

        if cls.template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["template"] = cls.template.id
        cls.services["zoneid"] = cls.zone.id

        cls.account = Account.create(
            cls.apiclient,
            cls.services["account"],
            domainid=cls.domain.id
        )
        cls.hosts = Host.list(cls.apiclient, type='Routing')
        cls.clusters = Cluster.list(cls.apiclient)
        cls.cleanup = [
            cls.account
        ]
    def test_02_migrate_vm(self):
        """Test migrate VM in project

        # Validate the following
        # 1. Create VM with custom disk offering in a project and check
        #    initial primary storage count
        # 2. List the hosts suitable for migrating the VM
        # 3. Migrate the VM and verify that primary storage count of project remains same"""

        try:
            hosts = Host.list(self.apiclient,virtualmachineid=self.vm.id,
                              listall=True)
            self.assertEqual(validateList(hosts)[0], PASS, "hosts list validation failed")
            host = hosts[0]
            self.vm.migrate(self.apiclient, host.id)
        except Exception as e:
            self.fail("Exception occured" % e)

        expectedCount = self.initialResourceCount
        response = matchResourceCount(
                        self.apiclient, expectedCount,
                        RESOURCE_PRIMARY_STORAGE,
                        projectid=self.project.id)
        self.assertEqual(response[0], PASS, response[1])
        return
    def test_06_secondary_storage(self):
        """Check the status of secondary storage"""

        # Validate the following
        # 1. List secondary storage
        # 2. Check state is "Up" or not

        sec_storages = Host.list(
                          self.apiclient,
                          zoneid=self.zone.id,
                          type='SecondaryStorageVM',
              listall=True
                          )
        self.assertEqual(
                         isinstance(sec_storages, list),
                         True,
                         "Check if listHosts returns a valid response"
                         )
        for sec_storage in sec_storages:
            self.assertEqual(
                             sec_storage.state,
                             'Up',
                             "Secondary storage should be in Up state"
                             )
        return
    def test_04_hosts(self):
        """Check the status of hosts"""

        # Validate the following
        # 1. List hosts with type=Routing
        # 2. Check state is "Up" or not

        hosts = Host.list(
                          self.apiclient,
                          zoneid=self.zone.id,
                          type='Routing',
              listall=True
                          )
        self.assertEqual(
                         isinstance(hosts, list),
                         True,
                         "Check if listHosts returns a valid response"
                         )
        for host in hosts:
            self.assertEqual(
                             host.state,
                             'Up',
                             "Host should be in Up state and running"
                             )
        return
Example #16
0
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
    def get_target_host(self, secured, virtualmachineid):
        target_hosts = Host.listForMigration(self.apiclient,
                                             virtualmachineid=virtualmachineid)
        for host in target_hosts:
            h = list_hosts(self.apiclient,type='Routing', id=host.id)[0]
            if h.details.secured == secured:
                return h

        cloudstackTestCase.skipTest(self, "No target hosts available, skipping test.")
Example #18
0
    def migrate_router(self, router):
        """ Migrate the router """

        self.debug("Checking if the host is available for migration?")
        hosts = Host.list(self.api_client, zoneid=self.zone.id, type='Routing')

        self.assertEqual(
            isinstance(hosts, list),
            True,
            "List hosts should return a valid list"
        )
        if len(hosts) < 2:
            self.skipTest(
                "No host available for migration. Test requires atleast 2 hosts")

        # Remove the host of current VM from the hosts list
        hosts[:] = [host for host in hosts if host.id != router.hostid]
        host = hosts[0]
        self.debug("Validating if the network rules work properly or not?")

        self.debug("Migrating VM-ID: %s from %s to Host: %s" % (
            router.id,
            router.hostid,
            host.id
        ))
        try:

            # Migrate  the router
            cmd = migrateSystemVm.migrateSystemVmCmd()
            cmd.isAsync = "false"
            cmd.hostid = host.id
            cmd.virtualmachineid = router.id
            self.api_client.migrateSystemVm(cmd)

        except Exception as e:
            self.fail("Failed to migrate instance, %s" % e)

        self.debug("Waiting for Router mgiration ....")
        time.sleep(240)

        # List routers to check state of router
        router_response = list_routers(
            self.api_client,
            id=router.id
        )
        self.assertEqual(
            isinstance(router_response, list),
            True,
            "Check list response returns a valid list"
        )

        router.hostid = router_response[0].hostid
        self.assertEqual(
            router.hostid, host.id, "Migration to host %s failed. The router host is"
            "still %s" %
            (host.id, router.hostid))
        return
def update_host(self, state, host_id):
    """
    Function to Enable/Disable Host
    """
    host_status = Host.update(
                                  self.apiclient,
                                  id=host_id,
                                  allocationstate=state
                                 )
    return host_status.resourcestate
Example #20
0
    def test_10_list_volumes(self):

        # Validate the following
        #
        # 1. List Root Volume and waits until it has the newly introduced attributes
        #
        # 2. Verifies return attributes has values different from none, when instance is running
        #

        list_vm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)[0]

        host = Host.list(
            self.apiclient,
            type='Routing',
            id=list_vm.hostid
        )[0]
        list_pods = get_pod(self.apiclient, self.zone.id, host.podid)

        root_volume = self.wait_for_attributes_and_return_root_vol()

        self.assertTrue(hasattr(root_volume, "utilization"))
        self.assertTrue(root_volume.utilization is not None)

        self.assertTrue(hasattr(root_volume, "virtualsize"))
        self.assertTrue(root_volume.virtualsize is not None)

        self.assertTrue(hasattr(root_volume, "physicalsize"))
        self.assertTrue(root_volume.physicalsize is not None)

        self.assertTrue(hasattr(root_volume, "vmname"))
        self.assertEqual(root_volume.vmname, list_vm.name)

        self.assertTrue(hasattr(root_volume, "clustername"))
        self.assertTrue(root_volume.clustername is not None)

        self.assertTrue(hasattr(root_volume, "clusterid"))
        self.assertTrue(root_volume.clusterid is not None)

        self.assertTrue(hasattr(root_volume, "storageid"))
        self.assertTrue(root_volume.storageid is not None)

        self.assertTrue(hasattr(root_volume, "storage"))
        self.assertTrue(root_volume.storage is not None)

        self.assertTrue(hasattr(root_volume, "zoneid"))
        self.assertEqual(root_volume.zoneid, self.zone.id)

        self.assertTrue(hasattr(root_volume, "zonename"))
        self.assertEqual(root_volume.zonename, self.zone.name)

        self.assertTrue(hasattr(root_volume, "podid"))
        self.assertEqual(root_volume.podid, list_pods.id)

        self.assertTrue(hasattr(root_volume, "podname"))
        self.assertEqual(root_volume.podname, list_pods.name)
 def tearDownClass(cls):
     try:
         # Cleanup resources used
         cleanup_resources(cls.api_client, cls._cleanup)
         for host in cls.hosts:
             Host.cancelMaintenance(
                 cls.api_client,
                 id=host.id
             )
             hosts_states = Host.list(
                 cls.api_client,
                 id=host.id,
                 listall=True
             )
             if hosts_states[0].resourcestate != 'Enabled':
                 raise Exception(
                     "Failed to cancel maintenance mode on %s" %
                     (host.name))
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Example #22
0
    def setUpClass(cls):
        cls.testClient = super(TestAttachVolumeISO, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
        cls.pod = get_pod(cls.api_client, cls.zone.id)
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.unsupportedStorageType = False
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        if cls.hypervisor.lower() == "lxc":
            if not find_storage_pool_type(cls.api_client, storagetype="rbd"):
                cls.unsupportedStorageType = True
                return
        cls.disk_offering = DiskOffering.create(cls.api_client, cls.services["disk_offering"])
        cls._cleanup.append(cls.disk_offering)
        template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
        cls.services["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["iso"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = template.id
        # get max data volumes limit based on the hypervisor type and version
        listHost = Host.list(cls.api_client, type="Routing", zoneid=cls.zone.id, podid=cls.pod.id)
        ver = listHost[0].hypervisorversion
        hv = listHost[0].hypervisor
        cmd = listHypervisorCapabilities.listHypervisorCapabilitiesCmd()
        cmd.hypervisor = hv
        res = cls.api_client.listHypervisorCapabilities(cmd)
        cls.debug("Hypervisor Capabilities: {}".format(res))
        for i in range(len(res)):
            if res[i].hypervisorversion == ver:
                break
        cls.max_data_volumes = int(res[i].maxdatavolumeslimit)
        cls.debug("max data volumes:{}".format(cls.max_data_volumes))
        cls.services["volume"]["max"] = cls.max_data_volumes
        # Create VMs, NAT Rules etc
        cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
        cls._cleanup.append(cls.account)

        cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
        cls._cleanup.append(cls.service_offering)
        cls.virtual_machine = VirtualMachine.create(
            cls.api_client,
            cls.services["virtual_machine"],
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
        )
Example #23
0
 def migrate_vm(self, vm):
     self.debug("Checking if a host is available for migration?")
     hosts = Host.listForMigration(self.api_client)
     self.assertEqual(isinstance(hosts, list), True,
                      "List hosts should return a valid list"
                      )
     # Remove the host of current VM from the hosts list
     hosts[:] = [host for host in hosts if host.id != vm.hostid]
     if len(hosts) <= 0:
         self.skipTest("No host available for migration. Test requires at-least 2 hosts")
     host = hosts[0]
     self.debug("Migrating VM-ID: %s to Host: %s" % (vm.id, host.id))
     try:
         vm.migrate(self.api_client, hostid=host.id)
     except Exception as e:
         self.fail("Failed to migrate instance, %s" % e)
Example #24
0
    def test_08_resize_volume(self):
        """Test resize a volume"""
        # Verify the size is the new size is what we wanted it to be.
        self.debug(
                "Attaching volume (ID: %s) to VM (ID: %s)" % (
                                                    self.volume.id,
                                                    self.virtual_machine.id
                                                    ))

        self.virtual_machine.attach_volume(self.apiClient, self.volume)
        self.attached = True
        hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
        self.assertTrue(isinstance(hosts, list))
        self.assertTrue(len(hosts) > 0)
        self.debug("Found %s host" % hosts[0].hypervisor)

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.stop(self.apiClient)
        elif hosts[0].hypervisor.lower() == "vmware":
            self.skipTest("Resize Volume is unsupported on VmWare")

        # resize the data disk
        self.debug("Resize Volume ID: %s" % self.volume.id)

        cmd                = resizeVolume.resizeVolumeCmd()
        cmd.id             = self.volume.id
        cmd.diskofferingid = self.services['resizeddiskofferingid']

        self.apiClient.resizeVolume(cmd)

        count = 0
        success = False
        while count < 3:
            list_volume_response = Volume.list(
                                                self.apiClient,
                                                id=self.volume.id,
                                                type='DATADISK'
                                                )
            for vol in list_volume_response:
                if vol.id == self.volume.id and vol.size == 3221225472L and vol.state == 'Ready':
                    success = True
            if success:
                break
            else:
                time.sleep(10)
                count += 1
Example #25
0
def findSuitableHostForMigration(apiclient, vmid):
    """Returns a suitable host for VM migration"""
    suitableHost = None
    try:
        hosts = Host.listForMigration(apiclient, virtualmachineid=vmid)
    except Exception as e:
        raise Exception("Exception while getting hosts list suitable for migration: %s" % e)

    suitablehosts = []
    if isinstance(hosts, list) and len(hosts) > 0:
        suitablehosts = [
            host for host in hosts if (str(host.resourcestate).lower() == "enabled" and str(host.state).lower() == "up")
        ]
        if len(suitablehosts) > 0:
            suitableHost = suitablehosts[0]

    return suitableHost
    def setUpClass(cls):
        testClient = super(TestDisableEnableHost, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        cls.hypervisor = cls.testClient.getHypervisorInfo()

        cls.snapshotSupported = True

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.pod = get_pod(cls.apiclient, zone_id=cls.zone.id)

        hostList = Host.list(cls.apiclient, zoneid=cls.zone.id, type="routing")
        clusterList = Cluster.list(cls.apiclient, id=hostList[0].clusterid)
        cls.host = Host(hostList[0].__dict__)
        cls.cluster = Cluster(clusterList[0].__dict__)

        cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"])

        cls._cleanup = []
        cls.disabledHosts = []

        try:
            cls.service_offering = ServiceOffering.create(cls.apiclient, cls.testdata["service_offering"])
            cls._cleanup.append(cls.service_offering)

            cls.disk_offering = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
            cls._cleanup.append(cls.disk_offering)

            # Create an account
            cls.account = Account.create(cls.apiclient, cls.testdata["account"], domainid=cls.domain.id)
            cls._cleanup.append(cls.account)

            # Create root admin account

            cls.admin_account = Account.create(cls.apiclient, cls.testdata["account2"], admin=True)
            cls._cleanup.append(cls.admin_account)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name, DomainName=cls.account.domain)

        except Exception as e:
            cls.tearDownClass()
            raise e
        return
 def tearDownClass(cls):
     try:
         for hostid in cls.disabledHosts:
             hosts = Host.list(cls.apiclient, id=hostid)
             assert (
                 validateList(hosts)[0] == PASS
             ), "hosts\
                     list validation failed"
             if hosts[0].resourcestate.lower() == DISABLED.lower():
                 cmd = updateHost.updateHostCmd()
                 cmd.id = hostid
                 cmd.resourcestate = ENABLED
                 cmd.allocationstate = ENABLE
                 cls.apiclient.updateHost(cmd)
         cleanup_resources(cls.apiclient, cls._cleanup)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
    def test_deployVmOnGivenHost(self):
        """Test deploy VM on specific host
        """

        # Steps for validation
        # 1. as admin list available hosts that are Up
        # 2. deployVM with hostid=above host
        # 3. listVirtualMachines
        # 4. destroy VM
        # Validate the following
        # 1. listHosts returns at least one host in Up state
        # 2. VM should be in Running
        # 3. VM should be on the host that it was deployed on

        hosts = Host.list(self.apiclient, zoneid=self.zone.id, type="Routing", state="Up", listall=True)

        self.assertEqual(isinstance(hosts, list), True, "CS should have atleast one host Up and Running")

        host = hosts[0]
        self.debug("Deploting VM on host: %s" % host.name)

        try:
            vm = VirtualMachine.create(
                self.apiclient,
                self.services["virtual_machine"],
                templateid=self.template.id,
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                hostid=host.id,
            )
            self.debug("Deploy VM succeeded")
        except Exception as e:
            self.fail("Deploy VM failed with exception: %s" % e)

        self.debug("Cheking the state of deployed VM")
        vms = VirtualMachine.list(
            self.apiclient, id=vm.id, listall=True, account=self.account.name, domainid=self.account.domainid
        )

        self.assertEqual(isinstance(vms, list), True, "List Vm should return a valid response")

        vm_response = vms[0]
        self.assertEqual(vm_response.state, "Running", "VM should be in running state after deployment")
        self.assertEqual(vm_response.hostid, host.id, "Host id where VM is deployed should match")
        return
    def check_connection(self, secured, host, retries=20, interval=6):

        while retries > -1:
            time.sleep(interval)
            host = Host.list(
                self.apiclient,
                zoneid=self.zone.id,
                hostid=host.id,
                type='Routing'
            )[0]
            if host.details.secured != secured:
                if retries >= 0:
                    retries = retries - 1
                    continue
            else:
                return

        raise Exception("Host detail 'secured' was expected: " + secured +
                        ", actual is: " + host.details.secured)
    def setUp(self):
        self.apiclient = self.testClient.getApiClient()
        self.dbclient = self.testClient.getDbConnection()
        self.cleanup = []

        if self.hypervisor.lower() not in ["kvm"]:
            self.skipTest("Secured migration is not supported on other than KVM")

        self.hosts = Host.list(
            self.apiclient,
            zoneid=self.zone.id,
            type='Routing',
            hypervisor='KVM')

        if len(self.hosts) < 2:
            self.skipTest("Requires at least two hosts for performing migration related tests")

        self.secure_all_hosts()
        self.updateConfiguration("ca.plugin.root.auth.strictness", "false")
Example #31
0
    def setUp(self):
        self.testdata = self.testClient.getParsedTestDataConfig()
        self.apiclient = self.testClient.getApiClient()

        # Get Zone, Domain and Default Built-in template
        self.domain = get_domain(self.apiclient)
        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())

        self.testdata["mode"] = self.zone.networktype
        self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"])

        self.hosts = []
        suitablecluster = None
        clusters = Cluster.list(self.apiclient)
        self.assertTrue(isinstance(clusters, list) and len(clusters) > 0, msg = "No clusters found")
        for cluster in clusters:
            self.hosts = Host.list(self.apiclient, clusterid=cluster.id, type='Routing')
            if isinstance(self.hosts, list) and len(self.hosts) >= 2:
                suitablecluster = cluster
                break
        self.assertEqual(validateList(self.hosts)[0], PASS, "hosts list validation failed")
        if len(self.hosts) < 2:
            self.skipTest("Atleast 2 hosts required in cluster for VM HA test")
        #update host tags
        for host in self.hosts:
            Host.update(self.apiclient, id=host.id, hosttags=self.testdata["service_offerings"]["hasmall"]["hosttags"])

        #create a user account
        self.account = Account.create(
            self.apiclient,
            self.testdata["account"],
            domainid=self.domain.id
        )
        #create a service offering
        self.service_offering = ServiceOffering.create(
            self.apiclient,
            self.testdata["service_offerings"]["hasmall"]
        )
        #deploy ha vm
        self.virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.testdata["virtual_machine"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id
        )
        list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
        self.debug(
            "Verify listVirtualMachines response for virtual machine: %s"\
            % self.virtual_machine.id
        )
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty")
        self.virtual_machine = list_vms[0]

        self.mock_checkhealth = SimulatorMock.create(
            apiclient=self.apiclient,
            command="CheckHealthCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_ping = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_checkvirtualmachine = SimulatorMock.create(
            apiclient=self.apiclient,
            command="CheckVirtualMachineCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_pingtest = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingTestCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            value="result:fail")
        self.mock_checkonhost_list = []
        for host in self.hosts:
            if host.id != self.virtual_machine.hostid:
                self.mock_checkonhost_list.append(SimulatorMock.create(
                    apiclient=self.apiclient,
                    command="CheckOnHostCommand",
                    zoneid=suitablecluster.zoneid,
                    podid=suitablecluster.podid,
                    clusterid=suitablecluster.id,
                    hostid=host.id,
                    value="result:fail"))
        #build cleanup list
        self.cleanup = [
            self.service_offering,
            self.account,
            self.mock_checkhealth,
            self.mock_ping,
            self.mock_checkvirtualmachine,
            self.mock_pingtest
        ]
        self.cleanup = self.cleanup + self.mock_checkonhost_list
Example #32
0
    def test_check_hypervisor_max_limit_effect(self):
        """ Test hypervisor max limits effect

        # 1. Read exsiting count of VM's on the host including SSVM and VR
                and modify maxguestcount accordingly
        # 2. Deploy a VM
        # 2. Try to deploy another vm
        # 3. Verify that second VM
                deployment fails (2 SSVMs 1 VR VM and 1 deployed VM)
        """

        hostList = Host.list(self.apiclient,
                             zoneid=self.zone.id,
                             type="Routing")
        event_validation_result = validateList(hostList)
        self.assertEqual(
            event_validation_result[0], PASS,
            "host list validation failed due to %s" %
            event_validation_result[2])

        self.host = Host(hostList[0])
        Host.update(self.apiclient, id=self.host.id, hosttags="host1")

        # Step 1
        # List VM's , SSVM's and VR on selected host
        listVm = list_virtual_machines(self.apiclient, hostid=self.host.id)

        listssvm = list_ssvms(self.apiclient, hostid=self.host.id)

        listvr = list_routers(self.apiclient, hostid=self.host.id)

        newValue = 1
        if listVm is not None:
            newValue = len(listVm) + newValue

        if listssvm is not None:
            newValue = len(listssvm) + newValue

        if listvr is not None:
            newValue = len(listvr) + newValue

        qresultset = self.dbclient.execute(
            "select hypervisor_version from host where uuid='%s'" %
            self.host.id)

        event_validation_result = validateList(qresultset)
        self.assertEqual(
            event_validation_result[0], PASS,
            "event list validation failed due to %s" %
            event_validation_result[2])

        cmdList = listHypervisorCapabilities.listHypervisorCapabilitiesCmd()
        cmdList.hypervisor = self.hypervisor
        config = self.apiclient.listHypervisorCapabilities(cmdList)

        for host in config:
            if host.hypervisorversion == qresultset[0][0]:
                self.hostCapId = host.id
                self.originalLimit = host.maxguestslimit
                break
        else:
            self.skipTest("No hypervisor capabilities found for %s \
                    with version %s" % (self.hypervisor, qresultset[0][0]))

        cmdUpdate = updateHypervisorCapabilities.\
            updateHypervisorCapabilitiesCmd()
        cmdUpdate.id = self.hostCapId
        cmdUpdate.maxguestslimit = newValue
        self.apiclient.updateHypervisorCapabilities(cmdUpdate)

        # Step 2
        vm = VirtualMachine.create(
            self.userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        self.cleanup.append(vm)
        # Step 3
        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
    def test_03_restore_vm_with_new_template(self):
        """ Test restoring a vm with different template than the one it was created with
        """

        hosts = Host.list(self.apiclient, type="Routing", listall=True)

        host_list_validation_result = validateList(hosts)

        self.assertEqual(
            host_list_validation_result[0], PASS,
            "host list validation failed due to %s" %
            host_list_validation_result[2])

        hypervisor = host_list_validation_result[1].hypervisor

        for k, v in self.services["templates"].items():
            if k.lower() == hypervisor.lower():
                # Register new template
                template = Template.register(self.apiclient,
                                             v,
                                             zoneid=self.zone.id,
                                             account=self.account.name,
                                             domainid=self.account.domainid,
                                             hypervisor=self.hypervisor)
                self.debug("Registered a template of format: %s with ID: %s" %
                           (v["format"], template.id))
                self.debug("Downloading template with ID: %s" % (template.id))
                template.download(self.apiclient)
                self.cleanup.append(template)

                # Wait for template status to be changed across
                time.sleep(self.services["sleep"])

                self.verify_template_listing(template)

                # Restore a vm with the new template.
                self.vm_with_reset.restore(self.apiclient,
                                           templateid=template.id)
                self.vm_without_reset.restore(self.apiclient,
                                              templateid=template.id)

                # Make sure the VMs now have the new template ID
                # Make sure the Ip address of the VMs haven't changed
                self.debug("Checking template id of VM with isVolatile=True")
                vms = VirtualMachine.list(self.apiclient,
                                          id=self.vm_with_reset.id,
                                          listall=True)

                vm_list_validation_result = validateList(vms)

                self.assertEqual(
                    vm_list_validation_result[0], PASS,
                    "VM list validation failed due to %s" %
                    vm_list_validation_result[2])

                vm_with_reset = vm_list_validation_result[1]

                self.assertNotEqual(
                    self.vm_with_reset.templateid, vm_with_reset.templateid,
                    "VM created with IsVolatile=True has same templateid : %s after restore"
                    % vm_with_reset.templateid)

                self.assertNotEqual(
                    self.vm_with_reset.templateid, template.id,
                    "VM created with IsVolatile=True has wrong templateid after restore Got:%s Expected: %s"
                    % (self.vm_with_reset.templateid, template.id))
                # Make sure it has the same IP after reboot
                self.assertEqual(
                    self.vm_with_reset.nic[0].ipaddress,
                    vm_with_reset.nic[0].ipaddress,
                    "VM created with IsVolatile=True doesn't have same ip after restore. Got : %s Expected : %s"
                    % (vm_with_reset.nic[0].ipaddress,
                       self.vm_with_reset.nic[0].ipaddress))

                # Check if the the root disk was not destroyed for isVolatile=False
                self.debug("Checking template id of VM with isVolatile=False")
                vms = VirtualMachine.list(self.apiclient,
                                          id=self.vm_without_reset.id,
                                          listall=True)

                vm_list_validation_result = validateList(vms)

                self.assertEqual(
                    vm_list_validation_result[0], PASS,
                    "VM list validation failed due to %s" %
                    vm_list_validation_result[2])

                vm_without_reset = vm_list_validation_result[1]

                self.assertNotEqual(
                    self.vm_without_reset.templateid,
                    vm_without_reset.templateid,
                    "VM created with IsVolatile=False has same templateid : %s after restore"
                    % vm_with_reset.templateid)

                self.assertNotEqual(
                    self.vm_without_reset.templateid, template.id,
                    "VM created with IsVolatile=False has wrong templateid after restore Got:%s Expected: %s"
                    % (self.vm_without_reset.templateid, template.id))
                # Make sure it has the same IP after reboot
                self.assertEqual(
                    self.vm_without_reset.nic[0].ipaddress,
                    vm_without_reset.nic[0].ipaddress,
                    "VM created with IsVolatile=False doesn't have same ip after restore. Got : %s Expected : %s"
                    % (vm_without_reset.nic[0].ipaddress,
                       self.vm_without_reset.nic[0].ipaddress))
        return
Example #34
0
    def test_07_resize_fail(self):
        """Test resize (negative) non-existent volume"""
        # Verify the size is the new size is what we wanted it to be.
        self.debug("Fail Resize Volume ID: %s" % self.volume.id)

        # first, an invalid id
        cmd                = resizeVolume.resizeVolumeCmd()
        cmd.id             = "invalid id"
        cmd.diskofferingid = self.services['customresizeddiskofferingid']
        success            = False
        try:
            self.apiClient.resizeVolume(cmd)
        except Exception as ex:
            #print str(ex)
            if "invalid" in str(ex):
                success = True
        self.assertEqual(
                success,
                True,
                "ResizeVolume - verify invalid id is handled appropriately")

        # Next, we'll try an invalid disk offering id
        cmd.id             = self.volume.id
        cmd.diskofferingid = "invalid id"
        success            = False
        try:
            self.apiClient.resizeVolume(cmd)
        except Exception as ex:
            if "invalid" in str(ex):
                success = True
        self.assertEqual(
                success,
                True,
                "ResizeVolume - verify disk offering is handled appropriately")

        # try to resize a root disk with a disk offering, root can only be resized by size=
        # get root vol from created vm
        list_volume_response = Volume.list(
                                            self.apiClient,
                                            virtualmachineid=self.virtual_machine.id,
                                            type='ROOT',
                                            listall=True
                                            )

        rootvolume = list_volume_response[0]

        cmd.id             = rootvolume.id
        cmd.diskofferingid = self.services['diskofferingid']
        with self.assertRaises(Exception):
            self.apiClient.resizeVolume(cmd)

        # Ok, now let's try and resize a volume that is not custom.
        cmd.id             = self.volume.id
        cmd.diskofferingid = self.services['diskofferingid']
        cmd.size           = 4
        currentSize        = self.volume.size

        self.debug(
                "Attaching volume (ID: %s) to VM (ID: %s)" % (
                                                    self.volume.id,
                                                    self.virtual_machine.id)
                 )
        #attach the volume
        self.virtual_machine.attach_volume(self.apiClient, self.volume)
        self.attached = True
        #stop the vm if it is on xenserver
        hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
        self.assertTrue(isinstance(hosts, list))
        self.assertTrue(len(hosts) > 0)
        self.debug("Found %s host" % hosts[0].hypervisor)

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.stop(self.apiClient)
        elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"):
            self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V")

        # Attempting to resize it should throw an exception, as we're using a non
        # customisable disk offering, therefore our size parameter should be ignored
        with self.assertRaises(Exception):
            self.apiClient.resizeVolume(cmd)

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.start(self.apiClient)
            time.sleep(30)
        return 
Example #35
0
    def test_08_resize_volume(self):
        """Test resize a volume"""
        # Verify the size is the new size is what we wanted it to be.
        self.debug(
                "Attaching volume (ID: %s) to VM (ID: %s)" % (
                                                    self.volume.id,
                                                    self.virtual_machine.id
                                                    ))

        self.virtual_machine.attach_volume(self.apiClient, self.volume)
        self.attached = True
        hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
        self.assertTrue(isinstance(hosts, list))
        self.assertTrue(len(hosts) > 0)
        self.debug("Found %s host" % hosts[0].hypervisor)

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.stop(self.apiClient)
        elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"):
            self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V")

        # resize the data disk
        self.debug("Resize Volume ID: %s" % self.volume.id)

        self.services["disk_offering"]["disksize"] = 20
        disk_offering_20_GB = DiskOffering.create(
                                    self.apiclient,
                                    self.services["disk_offering"]
                                    )
        self.cleanup.append(disk_offering_20_GB)

        cmd                = resizeVolume.resizeVolumeCmd()
        cmd.id             = self.volume.id
        cmd.diskofferingid = disk_offering_20_GB.id

        self.apiClient.resizeVolume(cmd)

        count = 0
        success = False
        while count < 3:
            list_volume_response = Volume.list(
                                                self.apiClient,
                                                id=self.volume.id,
                                                type='DATADISK'
                                                )
            for vol in list_volume_response:
                if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024** 3)) and vol.state == 'Ready':
                    success = True
            if success:
                break
            else:
                time.sleep(10)
                count += 1

        self.assertEqual(
                         success,
                         True,
                         "Check if the data volume resized appropriately"
                         )

        can_shrink = False

        list_volume_response = Volume.list(
                                            self.apiClient,
                                            id=self.volume.id,
                                            type='DATADISK'
                                            )
        storage_pool_id = [x.storageid for x in list_volume_response if x.id == self.volume.id][0]
        storage = StoragePool.list(self.apiclient, id=storage_pool_id)[0]
        # At present only CLVM supports shrinking volumes
        if storage.type.lower() == "clvm":
            can_shrink = True

        if can_shrink:
            self.services["disk_offering"]["disksize"] = 10
            disk_offering_10_GB = DiskOffering.create(
                                        self.apiclient,
                                        self.services["disk_offering"]
                                        )
            self.cleanup.append(disk_offering_10_GB)

            cmd                = resizeVolume.resizeVolumeCmd()
            cmd.id             = self.volume.id
            cmd.diskofferingid = disk_offering_10_GB.id
            cmd.shrinkok       = "true"

            self.apiClient.resizeVolume(cmd)

            count = 0
            success = False
            while count < 3:
                list_volume_response = Volume.list(
                                                    self.apiClient,
                                                    id=self.volume.id
                                                    )
                for vol in list_volume_response:
                    if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_10_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
                        success = True
                if success:
                    break
                else:
                    time.sleep(10)
                    count += 1

            self.assertEqual(
                             success,
                             True,
                             "Check if the root volume resized appropriately"
                             )

        #start the vm if it is on xenserver

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.start(self.apiClient)
            time.sleep(30)
        return
Example #36
0
    def test_07_resize_fail(self):
        """Test resize (negative) non-existent volume"""
        # Verify the size is the new size is what we wanted it to be.
        self.debug("Fail Resize Volume ID: %s" % self.volume.id)

        # first, an invalid id
        cmd                = resizeVolume.resizeVolumeCmd()
        cmd.id             = "invalid id"
        cmd.diskofferingid = self.services['resizeddiskofferingid']
        success            = False
        try:
            self.apiClient.resizeVolume(cmd)
        except Exception as ex:
            #print str(ex)
            if "invalid" in str(ex):
                success = True
        self.assertEqual(
                success,
                True,
                "ResizeVolume - verify invalid id is handled appropriately")

        # Next, we'll try an invalid disk offering id
        cmd.id             = self.volume.id
        cmd.diskofferingid = "invalid id"
        success            = False
        try:
            self.apiClient.resizeVolume(cmd)
        except Exception as ex:
            if "invalid" in str(ex):
                success = True
        self.assertEqual(
                success,
                True,
                "ResizeVolume - verify disk offering is handled appropriately")

        # try to resize a root disk with a disk offering, root can only be resized by size=
        # get root vol from created vm
        list_volume_response = Volume.list(
                                            self.apiClient,
                                            virtualmachineid=self.virtual_machine.id,
                                            type='ROOT',
                                            listall=True
                                            )

        rootvolume = list_volume_response[0]

        cmd.id             = rootvolume.id
        cmd.diskofferingid = self.services['diskofferingid']
        success            = False
        try:
            self.apiClient.resizeVolume(cmd)
        except Exception as ex:
            if "Can only resize Data volumes" in str(ex):
                success = True
        self.assertEqual(
                success,
                True,
                "ResizeVolume - verify root disks cannot be resized by disk offering id")

        # Ok, now let's try and resize a volume that is not custom.
        cmd.id             = self.volume.id
        cmd.diskofferingid = self.services['diskofferingid']
        cmd.size           = 4
        currentSize        = self.volume.size

        self.debug(
                "Attaching volume (ID: %s) to VM (ID: %s)" % (
                                                    self.volume.id,
                                                    self.virtual_machine.id)
                 )
        #attach the volume
        self.virtual_machine.attach_volume(self.apiClient, self.volume)
        self.attached = True
        #stop the vm if it is on xenserver
        hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
        self.assertTrue(isinstance(hosts, list))
        self.assertTrue(len(hosts) > 0)
        self.debug("Found %s host" % hosts[0].hypervisor)

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.stop(self.apiClient)
        elif hosts[0].hypervisor.lower() == "vmware":
            self.skipTest("Resize Volume is unsupported on VmWare")

        self.apiClient.resizeVolume(cmd)
        count = 0
        success = True
        while count < 10:
            list_volume_response = Volume.list(
                                                self.apiClient,
                                                id=self.volume.id,
                                                type='DATADISK'
                                                )
            for vol in list_volume_response:
                if vol.id == self.volume.id and vol.size != currentSize and vol.state != "Resizing":
                    success = False
            if success:
                break
            else:
                time.sleep(1)
                count += 1

        self.assertEqual(
                         success,
                         True,
                         "Verify the volume did not resize"
                         )
        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.start(self.apiClient)
            time.sleep(30)
        return 
Example #37
0
    def _perform_add_remove_host(self, primary_storage_id, sf_iscsi_name):
        xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0]

        pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)

        self._verify_all_pbds_attached(pbds)

        num_pbds = len(pbds)

        sf_vag_id = self._get_sf_vag_id(self.cluster.id, primary_storage_id)

        host_iscsi_iqns = self._get_host_iscsi_iqns()

        sf_vag = self._get_sf_vag(sf_vag_id)

        sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)

        self._verifyVag(host_iscsi_iqns, sf_vag_initiators)

        sf_vag_initiators_len_orig = len(sf_vag_initiators)

        xen_session = XenAPI.Session(self.testdata[TestData.urlOfNewHost])

        xenserver = self.testdata[TestData.xenServer]

        xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        xen_session.xenapi.pool.join(self.xs_pool_master_ip, xenserver[TestData.username], xenserver[TestData.password])

        time.sleep(60)

        pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)

        self.assertEqual(
            len(pbds),
            num_pbds + 1,
            "'len(pbds)' is not equal to 'num_pbds + 1'."
        )

        num_pbds = num_pbds + 1

        num_pbds_not_attached = 0

        for pbd in pbds:
            pbd_record = self.xen_session.xenapi.PBD.get_record(pbd)

            if pbd_record["currently_attached"] == False:
                num_pbds_not_attached = num_pbds_not_attached + 1

        self.assertEqual(
            num_pbds_not_attached,
            1,
            "'num_pbds_not_attached' is not equal to 1."
        )

        host = Host.create(
            self.apiClient,
            self.cluster,
            self.testdata[TestData.newHost],
            hypervisor="XenServer"
        )

        self.assertTrue(
            isinstance(host, Host),
            "'host' is not a 'Host'."
        )

        pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)

        self.assertEqual(
            len(pbds),
            num_pbds,
            "'len(pbds)' is not equal to 'num_pbds'."
        )

        self._verify_all_pbds_attached(pbds)

        host_iscsi_iqns = self._get_host_iscsi_iqns()

        sf_vag = self._get_sf_vag(sf_vag_id)

        sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)

        self._verifyVag(host_iscsi_iqns, sf_vag_initiators)

        sf_vag_initiators_len_new = len(sf_vag_initiators)

        self.assertEqual(
            sf_vag_initiators_len_new,
            sf_vag_initiators_len_orig + 1,
            "sf_vag_initiators_len_new' != sf_vag_initiators_len_orig + 1"
        )

        host.delete(self.apiClient)

        pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)

        self.assertEqual(
            len(pbds),
            num_pbds,
            "'len(pbds)' is not equal to 'num_pbds'."
        )

        self._verify_all_pbds_attached(pbds)

        host_iscsi_iqns = self._get_host_iscsi_iqns()

        sf_vag = self._get_sf_vag(sf_vag_id)

        sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)

        self.assertEqual(
            len(host_iscsi_iqns) - 1,
            len(sf_vag_initiators),
            "'len(host_iscsi_iqns) - 1' is not equal to 'len(sf_vag_initiators)'."
        )

        host_ref = self.xen_session.xenapi.host.get_by_name_label(self.testdata[TestData.newHostDisplayName])[0]

        self.xen_session.xenapi.pool.eject(host_ref)

        time.sleep(120)

        pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr)

        self.assertEqual(
            len(pbds),
            num_pbds - 1,
            "'len(pbds)' is not equal to 'num_pbds - 1'."
        )

        self._verify_all_pbds_attached(pbds)

        host_iscsi_iqns = self._get_host_iscsi_iqns()

        sf_vag = self._get_sf_vag(sf_vag_id)

        sf_vag_initiators = self._get_sf_vag_initiators(sf_vag)

        self._verifyVag(host_iscsi_iqns, sf_vag_initiators)

        sf_vag_initiators_len_new = len(sf_vag_initiators)

        self.assertEqual(
            sf_vag_initiators_len_new,
            sf_vag_initiators_len_orig,
            "sf_vag_initiators_len_new' != sf_vag_initiators_len_orig"
        )
    def test_08_migrate_vm(self):
        """Test migrate VM
        """
        # Validate the following
        # 1. Environment has enough hosts for migration
        # 2. DeployVM on suitable host (with another host in the cluster)
        # 3. Migrate the VM and assert migration successful

        suitable_hosts = None

        hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing')
        self.assertEqual(
            validateList(hosts)[0], PASS, "hosts list validation failed")

        if len(hosts) < 2:
            self.skipTest(
                "At least two hosts should be present in the zone for migration"
            )

        if self.hypervisor.lower() in ["lxc"]:
            self.skipTest("Migration is not supported on LXC")

        # For KVM, two hosts used for migration should  be present in same cluster
        # For XenServer and VMware, migration is possible between hosts belonging to different clusters
        # with the help of XenMotion and Vmotion respectively.

        if self.hypervisor.lower() in ["kvm", "simulator"]:
            # identify suitable host
            clusters = [h.clusterid for h in hosts]
            # find hosts withe same clusterid
            clusters = [
                cluster for index, cluster in enumerate(clusters)
                if clusters.count(cluster) > 1
            ]

            if len(clusters) <= 1:
                self.skipTest(
                    "In " + self.hypervisor.lower() +
                    " Live Migration needs two hosts within same cluster")

            suitable_hosts = [
                host for host in hosts if host.clusterid == clusters[0]
            ]
        else:
            suitable_hosts = hosts

        target_host = suitable_hosts[0]
        migrate_host = suitable_hosts[1]

        # deploy VM on target host
        vm_to_migrate = VirtualMachine.create(
            self.apiclient,
            self.services["small"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.small_offering.id,
            mode=self.services["mode"],
            hostid=target_host.id)
        self.debug("Migrating VM-ID: %s to Host: %s" %
                   (vm_to_migrate.id, migrate_host.id))

        vm_to_migrate.migrate(self.apiclient, migrate_host.id)

        retries_cnt = 3
        while retries_cnt >= 0:
            list_vm_response = VirtualMachine.list(self.apiclient,
                                                   id=vm_to_migrate.id)
            self.assertNotEqual(list_vm_response, None,
                                "Check virtual machine is listed")
            vm_response = list_vm_response[0]
            self.assertEqual(vm_response.id, vm_to_migrate.id,
                             "Check virtual machine ID of migrated VM")
            self.assertEqual(vm_response.hostid, migrate_host.id,
                             "Check destination hostID of migrated VM")
            retries_cnt = retries_cnt - 1
        return