예제 #1
0
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
예제 #2
0
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
예제 #3
0
 def cleanUpCloudStack(cls):
     try:
         clusters = Cluster.list(cls.apiclient, allocationstate="Disabled")
         if clusters is not None:
             for c in clusters:
                 cluster = Cluster.update(cls.apiclient,
                                          id=c.id,
                                          allocationstate="Enabled")
         # Cleanup resources used
         cleanup_resources(cls.apiclient, cls._cleanup)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
    def setUpClass(cls):
        cls.testClient = super(TestHostsForMigration, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing")
            if len(cls.hosts) >= 2:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 2 hosts found")

        Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="PREMIUM")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_tag = ServiceOffering.create(
            cls.api_client, cls.services["service_offering_with_tag"]
        )

        cls._cleanup = [cls.service_offering_with_tag]
        return
    def setUpClass(cls):
        testClient = super(TestDeployVmWithVariedPlanners, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.services = testClient.getParsedTestDataConfig()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.services["ostype"]
        )

        if cls.template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["template"] = cls.template.id
        cls.services["zoneid"] = cls.zone.id

        cls.account = Account.create(
            cls.apiclient,
            cls.services["account"],
            domainid=cls.domain.id
        )
        cls.hosts = Host.list(cls.apiclient, type='Routing')
        cls.clusters = Cluster.list(cls.apiclient)
        cls.cleanup = [
            cls.account
        ]
    def setUpClass(cls):
        testClient = super(TestDeployVmWithVariedPlanners,
                           cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.services = testClient.getParsedTestDataConfig()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.template = get_template(cls.apiclient, cls.zone.id,
                                    cls.services["ostype"])

        if cls.template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services[
                "ostype"]

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["template"] = cls.template.id
        cls.services["zoneid"] = cls.zone.id

        cls.account = Account.create(cls.apiclient,
                                     cls.services["account"],
                                     domainid=cls.domain.id)
        cls.hosts = Host.list(cls.apiclient, type='Routing')
        cls.clusters = Cluster.list(cls.apiclient)
        cls.cleanup = [cls.account]
예제 #7
0
    def test_05_deploy_vm_on_cluster_override_pod(self):

        # Optional parameters pod, cluster and host
        pod = Pod.list(self.apiclient, zoneid=self.zone.id)[0]
        clusters = Cluster.list(self.apiclient,
                                zoneid=self.zone.id,
                                podid=pod.id)

        self.assertEqual(isinstance(clusters, list), True,
                         "Check list response returns a valid list")

        cmd = deployVirtualMachine.deployVirtualMachineCmd()

        # Required parameters
        cmd.zoneid = self.zone.id
        cmd.serviceofferingid = self.service_offering.id
        template = get_template(self.apiclient,
                                zone_id=self.zone.id,
                                hypervisor=clusters[0].hypervisortype)
        cmd.templateid = template.id

        # Add optional deployment params
        cmd.podid = pod.id
        cmd.clusterid = clusters[0].id

        vm = self.apiclient.deployVirtualMachine(cmd)

        vm_host = Host.list(self.apiclient, id=vm.hostid)

        self.assertEqual(vm_host[0].clusterid, clusters[0].id,
                         "VM was not deployed on the target cluster")

        self.destroy_vm(vm.id)
 def verify_network_setup_on_host_per_cluster(self, hypervisor, vlan_id):
     clusters = Cluster.list(
         self.apiclient,
         zoneid=self.zone.id,
         allocationstate="Enabled",
         listall=True
     )
     for cluster in clusters:
         hosts = Host.list(self.apiclient,
                           clusterid=cluster.id,
                           type="Routing",
                           state="Up",
                           resourcestate="Enabled")
         host = hosts[0]
         if hypervisor == "xenserver":
             result = self.verify_vlan_network_creation(host, vlan_id)
             self.assertEqual(
                 int(result),
                 0,
                 "Failed to find vlan on host: " + host.name + " in cluster: " + cluster.name)
         if hypervisor == "vmware":
             result = self.verify_port_group_creation(vlan_id)
             self.assertEqual(
                     result,
                     True,
                     "Failed to find port group on hosts of cluster: " + cluster.name)
예제 #9
0
    def test_04_zone_capacity_check(self):
        """change cpu/mem.overprovisioning.factor at cluster level for
           all cluster in a zone  and  verify capacity at zone level """
        list_cluster = Cluster.list(self.apiclient, zoneid=self.zone.id)
        self.assertEqual(
            validateList(list_cluster)[0], PASS,
            "check list cluster response for zone id  %s" % self.zone.id)
        k = len(list_cluster)
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=1)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=1)

        time.sleep(self.wait_time)

        capacity = Capacities.list(self.apiclient, zoneid=self.zone.id)
        self.assertEqual(
            validateList(capacity)[0], PASS,
            "check list capacity response for zone id %s" % self.zone.id)
        cpu, mem = capacity_parser(capacity)
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=2)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=2)

        time.sleep(self.wait_time)

        capacity1 = Capacities.list(self.apiclient, zoneid=self.zone.id)
        self.assertEqual(
            validateList(capacity1)[0], PASS,
            "check list capacity for zone id %s" % self.zone.id)

        cpu1, mem1 = capacity_parser(capacity1)
        self.assertEqual(2 * cpu[0], cpu1[0], "check total capacity ")
        self.assertEqual(2 * cpu[1], cpu1[1], "check capacity used")
        self.assertEqual(cpu[2], cpu1[2], "check capacity % used")

        self.assertEqual(2 * mem[0], mem1[0], "check mem total capacity ")
        self.assertEqual(2 * mem[1], mem1[1], "check mem capacity used")
        self.assertEqual(mem[2], mem1[2], "check mem capacity % used")
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=1)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=1)
    def setUpClass(cls):
        cls.testClient = super(TestHostHighAvailability,
                               cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(cls.api_client, cls.zone.id,
                                    cls.services["ostype"])
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ['lxc']:
            raise unittest.SkipTest(
                "Template creation from root volume is not supported in LXC")

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client,
                                  clusterid=cluster.id,
                                  type="Routing")
            if len(cls.hosts) >= 3:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 3 hosts found")

        configs = Configurations.list(cls.api_client, name='ha.tag')

        assert isinstance(configs, list), "Config list not\
                retrieved for ha.tag"

        if configs[0].value != "ha":
            raise unittest.SkipTest("Please set the global config\
                    value for ha.tag as 'ha'")

        Host.update(cls.api_client, id=cls.hosts[2].id, hosttags="ha")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_with_ha"],
            offerha=True)

        cls.service_offering_without_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_without_ha"],
            offerha=False)

        cls._cleanup = [
            cls.service_offering_with_ha,
            cls.service_offering_without_ha,
        ]
        return
예제 #11
0
    def test_14_create_vm_on_second_cluster_with_template_from_first(self):
        """ Create Virtual Machine On Working Cluster With Template Created on Another """
        volume = Volume.list(
            self.apiclient,
            virtualmachineid = self.vm_cluster.id,
            type = "ROOT",
            listall= True
            )

        snapshot = Snapshot.create(
            self.apiclient, 
            volume[0].id,
            account=self.account.name,
            domainid=self.account.domainid,
            )

        template = self.helper.create_template_from_snapshot(
            self.apiclient,
            self.services,
            snapshotid = snapshot.id
            )

        cluster = Cluster.update(
            self.apiclient,
            id = self.local_cluster.id,
            allocationstate = "Disabled"
            )

        virtual_machine = VirtualMachine.create(self.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=self.zone.id,
            templateid=template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            hypervisor=self.hypervisor,
            rootdisksize=10
            )
        ssh_client = virtual_machine.get_ssh_client(reconnect=True)

        cluster = Cluster.update(
            self.apiclient,
            id = self.local_cluster.id,
            allocationstate = "Enabled"
            )
        self._cleanup.append(template)
def update_cluster(apiclient, state, cluster_id, managed_state):
    """
    Function to Enable/Disable cluster
    """
    cluster_status = Cluster.update(apiclient,
                                    id=cluster_id,
                                    allocationstate=state,
                                    managedstate=managed_state)
    return cluster_status.managedstate, cluster_status.allocationstate
예제 #13
0
    def setUpClass(cls):
        try:
            cls._cleanup = []
            cls.testClient = super(testHaPoolMaintenance,
                                   cls).getClsTestClient()
            cls.api_client = cls.testClient.getApiClient()
            cls.services = cls.testClient.getParsedTestDataConfig()
            # Get Domain, Zone, Template
            cls.domain = get_domain(cls.api_client)
            cls.zone = get_zone(cls.api_client,
                                cls.testClient.getZoneForTests())
            cls.template = get_template(cls.api_client, cls.zone.id,
                                        cls.services["ostype"])
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services['mode'] = cls.zone.networktype
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services["virtual_machine"]["zoneid"] = cls.zone.id
            cls.services["virtual_machine"]["template"] = cls.template.id
            cls.clusterWithSufficientPool = None
            cls.listResponse = None
            clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)

            if not validateList(clusters)[0]:

                cls.debug("check list cluster response for zone id %s" %
                          cls.zone.id)
                cls.listResponse = True
                return

            for cluster in clusters:
                cls.pool = StoragePool.list(cls.api_client,
                                            clusterid=cluster.id,
                                            keyword="NetworkFilesystem")

                if not validateList(cls.pool)[0]:

                    cls.debug("check list cluster response for zone id %s" %
                              cls.zone.id)
                    cls.listResponse = True
                    return

                if len(cls.pool) >= 2:
                    cls.clusterWithSufficientPool = cluster
                    break
            if not cls.clusterWithSufficientPool:
                return

            cls.services["service_offerings"]["tiny"]["offerha"] = "True"

            cls.services_off = ServiceOffering.create(
                cls.api_client, cls.services["service_offerings"]["tiny"])
            cls._cleanup.append(cls.services_off)

        except Exception as e:
            cls.tearDownClass()
            raise Exception("Warning: Exception in setup : %s" % e)
        return
예제 #14
0
def update_cluster(self, state, cluster_id, managed_state):
    """
    Function to Enable/Disable cluster
    """
    cluster_status = Cluster.update(
                                  self.apiclient,
                                  id=cluster_id,
                                  allocationstate=state,
                                  managedstate=managed_state
                                  )
    return cluster_status.managedstate,cluster_status.allocationstate
예제 #15
0
    def setUp(self):
        self.apiclient = self.testClient.getApiClient()
        self.dbclient = self.testClient.getDbConnection()

        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
        self.physicalnetworks = PhysicalNetwork.list(self.apiclient, zoneid=self.zone.id)
        self.assertNotEqual(len(self.physicalnetworks), 0, "Check if the list physical network API returns a non-empty response")
        self.clusters = Cluster.list(self.apiclient, hypervisor='VMware')
        self.assertNotEqual(len(self.clusters), 0, "Check if the list cluster API returns a non-empty response")

        self.cleanup = []
        return
예제 #16
0
    def test_03_clusters(self):
        """Check the status of clusters"""

        # Validate the following
        # 1. List clusters
        # 2. Check allocation state is "enabled" or not

        clusters = Cluster.list(self.apiclient,
                                zoneid=self.zone.id,
                                listall=True)
        self.assertEqual(isinstance(clusters, list), True,
                         "Check if listClusters returns a valid response")
        for cluster in clusters:
            self.assertEqual(cluster.allocationstate, 'Enabled',
                             "Clusters allocation state should be enabled")
        return
    def setUpClass(cls):
        testClient = super(TestDisableEnableHost, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        cls.hypervisor = cls.testClient.getHypervisorInfo()

        cls.snapshotSupported = True

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.pod = get_pod(cls.apiclient, zone_id=cls.zone.id)

        hostList = Host.list(cls.apiclient, zoneid=cls.zone.id, type="routing")
        clusterList = Cluster.list(cls.apiclient, id=hostList[0].clusterid)
        cls.host = Host(hostList[0].__dict__)
        cls.cluster = Cluster(clusterList[0].__dict__)

        cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"])

        cls._cleanup = []
        cls.disabledHosts = []

        try:
            cls.service_offering = ServiceOffering.create(cls.apiclient, cls.testdata["service_offering"])
            cls._cleanup.append(cls.service_offering)

            cls.disk_offering = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
            cls._cleanup.append(cls.disk_offering)

            # Create an account
            cls.account = Account.create(cls.apiclient, cls.testdata["account"], domainid=cls.domain.id)
            cls._cleanup.append(cls.account)

            # Create root admin account

            cls.admin_account = Account.create(cls.apiclient, cls.testdata["account2"], admin=True)
            cls._cleanup.append(cls.admin_account)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name, DomainName=cls.account.domain)

        except Exception as e:
            cls.tearDownClass()
            raise e
        return
    def tearDownClass(cls):
        try:
            clusterList = Cluster.list(cls.apiclient, id=cls.cluster.id)
            if clusterList[0].allocationstate.lower() == DISABLED.lower():
                cmd = updateCluster.updateClusterCmd()
                cmd.id = clusterList[0].id
                cmd.allocationstate = ENABLED
                cls.apiclient.updateCluster(cmd)

            if clusterList[0].managedstate.lower() == "unmanaged":
                cmd = updateCluster.updateClusterCmd()
                cmd.id = clusterList[0].id
                cmd.managedstate = "Managed"
                cls.apiclient.updateCluster(cmd)

            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
    def setUp(self):
        self.apiclient = self.testClient.getApiClient()
        self.dbclient = self.testClient.getDbConnection()

        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
        self.physicalnetworks = PhysicalNetwork.list(self.apiclient,
                                                     zoneid=self.zone.id)
        self.assertNotEqual(
            len(self.physicalnetworks), 0,
            "Check if the list physical network API returns a non-empty response"
        )
        self.clusters = Cluster.list(self.apiclient, hypervisor='VMware')
        self.assertNotEqual(
            len(self.clusters), 0,
            "Check if the list cluster API returns a non-empty response")

        self.cleanup = []
        return
    def setUpClass(cls):
        cls.testClient = super(TestHostsForMigration, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(
            cls.api_client,
            cls.zone.id,
            cls.services["ostype"]
        )


        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing")
            if len(cls.hosts) >= 2:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 2 hosts found")


        Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="PREMIUM")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_tag = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_with_tag"]
        )

        cls._cleanup = [
            cls.service_offering_with_tag,
            ]
        return
예제 #21
0
    def test_02_deploy_vm_on_specific_cluster(self):

        # Select deployment cluster
        clusters = Cluster.list(self.apiclient, )
        target_cluster = clusters[0]
        target_id = target_cluster.id
        cluster_hypervisor = target_cluster.hypervisortype

        template = get_template(self.apiclient, hypervisor=cluster_hypervisor)

        # deploy vm on cluster
        cmd = deployVirtualMachine.deployVirtualMachineCmd()
        cmd.zoneid = target_cluster.zoneid
        cmd.serviceofferingid = self.service_offering.id
        cmd.templateid = template.id
        cmd.clusterid = target_id
        vm = self.apiclient.deployVirtualMachine(cmd)

        vm_host = Host.list(self.apiclient, id=vm.hostid)

        self.assertEqual(target_id, vm_host[0].clusterid,
                         "VM was not deployed on the provided cluster")
        self.destroy_vm(vm.id)
    def test_03_clusters(self):
        """Check the status of clusters"""

        # Validate the following
        # 1. List clusters
        # 2. Check allocation state is "enabled" or not

        clusters = Cluster.list(
                          self.apiclient,
                          zoneid=self.zone.id,
              listall=True
                          )
        self.assertEqual(
                         isinstance(clusters, list),
                         True,
                         "Check if listClusters returns a valid response"
                         )
        for cluster in clusters:
            self.assertEqual(
                             cluster.allocationstate,
                             'Enabled',
                             "Clusters allocation state should be enabled"
                             )
        return
    def setUpClass(cls):
        try:
            cls._cleanup = []
            cls.testClient = super(
                testHaPoolMaintenance,
                cls).getClsTestClient()
            cls.api_client = cls.testClient.getApiClient()
            cls.services = cls.testClient.getParsedTestDataConfig()
            # Get Domain, Zone, Template
            cls.domain = get_domain(cls.api_client)
            cls.zone = get_zone(
                cls.api_client,
                cls.testClient.getZoneForTests())
            cls.template = get_template(
                cls.api_client,
                cls.zone.id,
                cls.services["ostype"]
            )
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services['mode'] = cls.zone.networktype
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services["virtual_machine"]["zoneid"] = cls.zone.id
            cls.services["virtual_machine"]["template"] = cls.template.id
            cls.clusterWithSufficientPool = None
            cls.listResponse = None
            clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)

            if not validateList(clusters)[0]:

                cls.debug(
                    "check list cluster response for zone id %s" %
                    cls.zone.id)
                cls.listResponse = True
                return

            for cluster in clusters:
                cls.pool = StoragePool.list(cls.api_client,
                                            clusterid=cluster.id,
                                            keyword="NetworkFilesystem"
                                            )

                if not validateList(cls.pool)[0]:

                    cls.debug(
                        "check list cluster response for zone id %s" %
                        cls.zone.id)
                    cls.listResponse = True
                    return

                if len(cls.pool) >= 2:
                    cls.clusterWithSufficientPool = cluster
                    break
            if not cls.clusterWithSufficientPool:
                return

            cls.services["service_offerings"][
                "tiny"]["offerha"] = "True"

            cls.services_off = ServiceOffering.create(
                cls.api_client,
                cls.services["service_offerings"]["tiny"])
            cls._cleanup.append(cls.services_off)

        except Exception as e:
            cls.tearDownClass()
            raise Exception("Warning: Exception in setup : %s" % e)
        return
예제 #24
0
    def test_03_test_settings_for_cluster(self):
        """
        1. Get the default value for the setting in cluster scope
        2. Change the default value to new value
        3. Make sure updated value is same as new value
        4. Reset the config value
        5. Make sure that current value is same as default value
        :return:
        """
        cluster = Cluster.list(
            self.apiclient
        )

        self.assertIsNotNone(cluster[0],
                             "There should be atleast 1 cluster in the zone")

        config_name = "cluster.storage.operations.exclude"
        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            clusterid=cluster[0].id
        )
        self.assertIsNotNone(configs, "Fail to get cluster setting %s " % config_name)

        orig_value = str(configs[0].value)
        new_value = "true"

        Configurations.update(
            self.apiclient,
            name=config_name,
            value=new_value,
            clusterid=cluster[0].id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            clusterid=cluster[0].id
        )
        self.assertIsNotNone(configs, "Fail to get cluster setting %s " % config_name)

        self.assertEqual(new_value,
                         str(configs[0].value),
                         "Failed to set new config value")

        Configurations.reset(
            self.apiclient,
            name=config_name,
            clusterid=cluster[0].id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            clusterid=cluster[0].id
        )
        self.assertIsNotNone(configs, "Fail to get cluster setting %s " % config_name)

        self.assertEqual(orig_value,
                         str(configs[0].value),
                         "Failed to reset the value")
예제 #25
0
        if host.type == 'Routing':
          if host.resourcestate == 'PrepareForMaintenance' \
              or host.resourcestate == 'Maintenance':
            print "delete host"
            cmd = deleteHost.deleteHostCmd()
            cmd.id = host.id
#            cmd.forced = 'True'
            apiClient.deleteHost(cmd)
          else:
            print "Delete host"
            h = Host(tmp_dict)
#            h.forced = 'True'
            h.id = host.id
            h.delete(apiClient)

    clusters = Cluster.list(apiClient)
    if clusters:
      for cluster in clusters:
        print "cluster name={}, id={}".format(cluster.name, cluster.id)
        if cluster.allocationstate == 'Enabled':
          print "Delete Cluster"
          c = Cluster(tmp_dict)
          c.id = cluster.id
          c.delete(apiClient)

    ipranges = PublicIpRange.list(apiClient)
    if ipranges:
      for iprange in ipranges:
        print "ip range name={}, id={}".format(iprange.name, iprange.id)

    if clusters:
    def test_managed_clustered_filesystems_limit(self):
        args = {
            "id": self.testdata[TestData.clusterId2],
            TestData.allocationstate: "Disabled"
        }

        Cluster.update(self.apiClient, **args)

        virtual_machine_names = {"name": "TestVM1", "displayname": "Test VM 1"}

        virtual_machine_1 = self._create_vm(virtual_machine_names)

        list_volumes_response = list_volumes(
            self.apiClient,
            virtualmachineid=virtual_machine_1.id,
            listall=True)

        sf_util.check_list(
            list_volumes_response, 1, self, TestManagedClusteredFilesystems.
            _should_only_be_one_volume_in_list_err_msg)

        vm_1_root_volume = list_volumes_response[0]

        virtual_machine_names = {"name": "TestVM2", "displayname": "Test VM 2"}

        virtual_machine_2 = self._create_vm(virtual_machine_names)

        virtual_machine_names = {"name": "TestVM3", "displayname": "Test VM 3"}

        class VMStartedException(Exception):
            def __init__(self, *args, **kwargs):
                Exception.__init__(self, *args, **kwargs)

        try:
            # The VM should fail to be created as there should be an insufficient number of clustered filesystems
            # remaining in the compute cluster.
            self._create_vm(virtual_machine_names)

            raise VMStartedException("The VM should have failed to start.")
        except VMStartedException:
            raise
        except Exception:
            pass

        vol_snap = Snapshot.create(self.apiClient,
                                   volume_id=vm_1_root_volume.id)

        services = {
            "diskname": "Vol-1",
            "zoneid": self.testdata[TestData.zoneId],
            "ispublic": True
        }

        volume_created_from_snapshot_1 = Volume.create_from_snapshot(
            self.apiClient,
            vol_snap.id,
            services,
            account=self.account.name,
            domainid=self.domain.id)

        class VolumeAttachedException(Exception):
            def __init__(self, *args, **kwargs):
                Exception.__init__(self, *args, **kwargs)

        try:
            # The volume should fail to be attached as there should be an insufficient number of clustered filesystems
            # remaining in the compute cluster.
            virtual_machine_2.attach_volume(self.apiClient,
                                            volume_created_from_snapshot_1)

            raise VolumeAttachedException(
                TestManagedClusteredFilesystems.
                _volume_should_have_failed_to_attach_to_vm)
        except VolumeAttachedException:
            raise
        except Exception:
            pass

        args = {
            "id": self.testdata[TestData.clusterId2],
            TestData.allocationstate: "Enabled"
        }

        Cluster.update(self.apiClient, **args)

        try:
            # The volume should fail to be attached as there should be an insufficient number of clustered filesystems
            # remaining in the compute cluster.
            virtual_machine_2.attach_volume(self.apiClient,
                                            volume_created_from_snapshot_1)

            raise VolumeAttachedException(
                TestManagedClusteredFilesystems.
                _volume_should_have_failed_to_attach_to_vm)
        except VolumeAttachedException:
            raise
        except Exception:
            pass

        virtual_machine_names = {"name": "TestVMA", "displayname": "Test VM A"}

        virtual_machine_a = self._create_vm(virtual_machine_names)

        host_for_vm_1 = list_hosts(self.apiClient,
                                   id=virtual_machine_1.hostid)[0]
        host_for_vm_a = list_hosts(self.apiClient,
                                   id=virtual_machine_a.hostid)[0]

        self.assertTrue(host_for_vm_1.clusterid != host_for_vm_a.clusterid,
                        "VMs 1 and VM a should be in different clusters.")

        virtual_machine_1.delete(self.apiClient, True)

        volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
            self.apiClient, volume_created_from_snapshot_1)

        virtual_machine_2.detach_volume(self.apiClient,
                                        volume_created_from_snapshot_1)

        volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
            self.apiClient, volume_created_from_snapshot_1)

        services = {
            "diskname": "Vol-2",
            "zoneid": self.testdata[TestData.zoneId],
            "ispublic": True
        }

        volume_created_from_snapshot_2 = Volume.create_from_snapshot(
            self.apiClient,
            vol_snap.id,
            services,
            account=self.account.name,
            domainid=self.domain.id)

        try:
            # The volume should fail to be attached as there should be an insufficient number of clustered filesystems
            # remaining in the compute cluster.
            virtual_machine_2.attach_volume(self.apiClient,
                                            volume_created_from_snapshot_2)

            raise VolumeAttachedException(
                TestManagedClusteredFilesystems.
                _volume_should_have_failed_to_attach_to_vm)
        except VolumeAttachedException:
            raise
        except Exception:
            pass

        virtual_machine_a.attach_volume(self.apiClient,
                                        volume_created_from_snapshot_2)
예제 #27
0
    def setUp(self):
        self.testdata = self.testClient.getParsedTestDataConfig()
        self.apiclient = self.testClient.getApiClient()

        # Get Zone, Domain and Default Built-in template
        self.domain = get_domain(self.apiclient)
        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())

        self.testdata["mode"] = self.zone.networktype
        self.template = get_template(self.apiclient, self.zone.id,
                                     self.testdata["ostype"])

        self.hosts = []
        suitablecluster = None
        clusters = Cluster.list(self.apiclient)
        self.assertTrue(isinstance(clusters, list) and len(clusters) > 0,
                        msg="No clusters found")
        for cluster in clusters:
            self.hosts = Host.list(self.apiclient,
                                   clusterid=cluster.id,
                                   type='Routing')
            if isinstance(self.hosts, list) and len(self.hosts) >= 2:
                suitablecluster = cluster
                break
        self.assertTrue(
            isinstance(self.hosts, list) and len(self.hosts) >= 2,
            msg="Atleast 2 hosts required in cluster for VM HA test")
        #update host tags
        for host in self.hosts:
            Host.update(self.apiclient,
                        id=host.id,
                        hosttags=self.testdata["service_offerings"]["hasmall"]
                        ["hosttags"])

        #create a user account
        self.account = Account.create(self.apiclient,
                                      self.testdata["account"],
                                      domainid=self.domain.id)
        #create a service offering
        self.service_offering = ServiceOffering.create(
            self.apiclient, self.testdata["service_offerings"]["hasmall"])
        #deploy ha vm
        self.virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.testdata["virtual_machine"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id)
        list_vms = VirtualMachine.list(self.apiclient,
                                       id=self.virtual_machine.id)
        self.debug(
            "Verify listVirtualMachines response for virtual machine: %s"\
            % self.virtual_machine.id
        )
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1,
                        msg="List VM response was empty")
        self.virtual_machine = list_vms[0]

        self.mock_checkhealth = SimulatorMock.create(
            apiclient=self.apiclient,
            command="CheckHealthCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_ping = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_checkvirtualmachine = SimulatorMock.create(
            apiclient=self.apiclient,
            command="CheckVirtualMachineCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_pingtest = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingTestCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            value="result:fail")
        self.mock_checkonhost_list = []
        for host in self.hosts:
            if host.id != self.virtual_machine.hostid:
                self.mock_checkonhost_list.append(
                    SimulatorMock.create(apiclient=self.apiclient,
                                         command="CheckOnHostCommand",
                                         zoneid=suitablecluster.zoneid,
                                         podid=suitablecluster.podid,
                                         clusterid=suitablecluster.id,
                                         hostid=host.id,
                                         value="result:fail"))
        #build cleanup list
        self.cleanup = [
            self.service_offering, self.account, self.mock_checkhealth,
            self.mock_ping, self.mock_checkvirtualmachine, self.mock_pingtest
        ]
        self.cleanup = self.cleanup + self.mock_checkonhost_list
예제 #28
0
    def test_09_stop_vm_migrate_vol(self):
        """Test Stopped Virtual Machine's ROOT volume migration
        """

        # Validate the following:
        # 1. deploy Vm with startvm=true
        # 2. Should not be able to login to the VM.
        # 3. listVM command should return the deployed VM.State of this VM
        #    should be "Running".
        # 4. Stop the vm
        # 5.list primary storages in the cluster , should be more than one
        # 6.Migrate voluem to another available primary storage
        clusters = Cluster.list(self.apiclient, zoneid=self.zone.id)
        self.assertEqual(isinstance(clusters, list), True, "Check list response returns a valid list")
        i = 0
        for cluster in clusters:
            storage_pools = StoragePool.list(self.apiclient, clusterid=cluster.id)
            if len(storage_pools) > 1:
                self.cluster_id = cluster.id
                i += 1
                break
        if i == 0:
            self.skipTest("No cluster with more than one primary storage pool to perform migrate volume test")

        hosts = Host.list(self.apiclient, clusterid=self.cluster_id)
        self.assertEqual(isinstance(hosts, list), True, "Check list response returns a valid list")
        host = hosts[0]
        self.debug("Deploying instance on host: %s" % host.id)
        self.debug("Deploying instance in the account: %s" % self.account.name)
        self.virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            diskofferingid=self.disk_offering.id,
            hostid=host.id,
            mode=self.zone.networktype,
        )

        response = self.virtual_machine.getState(self.apiclient, VirtualMachine.RUNNING)
        self.assertEqual(response[0], PASS, response[1])
        try:
            self.virtual_machine.stop(self.apiclient)
        except Exception as e:
            self.fail("failed to stop instance: %s" % e)
        volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True)
        self.assertEqual(isinstance(volumes, list), True, "Check volume list response returns a valid list")
        vol_response = volumes[0]
        # get the storage name in which volume is stored
        storage_name = vol_response.storage
        storage_pools = StoragePool.list(self.apiclient, clusterid=self.cluster_id)
        # Get storage pool to migrate volume
        for spool in storage_pools:
            if spool.name == storage_name:
                continue
            else:
                self.storage_id = spool.id
                self.storage_name = spool.name
                break
        self.debug("Migrating volume to storage pool: %s" % self.storage_name)
        Volume.migrate(self.apiclient, storageid=self.storage_id, volumeid=vol_response.id)
        volume = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True)
        self.assertEqual(volume[0].storage, self.storage_name, "Check volume migration response")

        return
예제 #29
0
    def test_04_zone_capacity_check(self):
        """change cpu/mem.overprovisioning.factor at cluster level for
           all cluster in a zone  and  verify capacity at zone level """
        list_cluster = Cluster.list(self.apiclient,
                                    zoneid=self.zone.id)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for zone id  %s" %
            self.zone.id)
        k = len(list_cluster)
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=1)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=1)

        time.sleep(self.wait_time)

        capacity = Capacities.list(self.apiclient,
                                   zoneid=self.zone.id)
        self.assertEqual(
            validateList(capacity)[0],
            PASS,
            "check list capacity response for zone id %s" %
            self.zone.id)
        cpu, mem = capacity_parser(capacity)
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=2)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=2)

        time.sleep(self.wait_time)

        capacity1 = Capacities.list(self.apiclient,
                                    zoneid=self.zone.id)
        self.assertEqual(validateList(capacity1)[0],
                         PASS,
                         "check list capacity for zone id %s" % self.zone.id)

        cpu1, mem1 = capacity_parser(capacity1)
        self.assertEqual(2 * cpu[0],
                         cpu1[0],
                         "check total capacity ")
        self.assertEqual(2 * cpu[1],
                         cpu1[1],
                         "check capacity used")
        self.assertEqual(cpu[2],
                         cpu1[2],
                         "check capacity % used")

        self.assertEqual(2 * mem[0],
                         mem1[0],
                         "check mem total capacity ")
        self.assertEqual(2 * mem[1],
                         mem1[1],
                         "check mem capacity used")
        self.assertEqual(mem[2],
                         mem1[2],
                         "check mem capacity % used")
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=1)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=1)
    def test_list_pod_with_overcommit(self):
        """Test List Pod Api with cluster CPU and Memory OverProvisioning
	    """

        podlist = Pod.list(self.apiclient)

        for pod in podlist:
            clusterlist = Cluster.list(self.apiclient, podid=pod.id)
            if len(clusterlist) > 1:

                updateCpuOvercommitCmd = updateConfiguration.updateConfigurationCmd(
                )
                updateCpuOvercommitCmd.clusterid = clusterlist[0].id
                updateCpuOvercommitCmd.name = "cpu.overprovisioning.factor"

                if clusterlist[0].cpuovercommitratio == clusterlist[
                        1].cpuovercommitratio and clusterlist[
                            0].cpuovercommitratio == "1.0":
                    cpuovercommit = "1.0"
                    updateCpuOvercommitCmd.value = "2.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                elif clusterlist[0].cpuovercommitratio != clusterlist[
                        1].cpuovercommitratio:
                    cpuovercommit = clusterlist[0].cpuovercommitratio

                else:
                    cpuovercommit = clusterlist[0].cpuovercommitratio
                    updateCpuOvercommitCmd.value = "1.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                updateMemoryOvercommitCmd = updateConfiguration.updateConfigurationCmd(
                )
                updateMemoryOvercommitCmd.clusterid = clusterlist[0].id
                updateMemoryOvercommitCmd.name = "mem.overprovisioning.factor"

                if clusterlist[0].memoryovercommitratio == clusterlist[
                        1].memoryovercommitratio and clusterlist[
                            0].memoryovercommitratio == "1.0":
                    memoryovercommit = "1.0"
                    updateMemoryOvercommitCmd.value = "2.0"
                    self.apiclient.updateConfiguration(
                        updateMemoryOvercommitCmd)

                elif clusterlist[0].memoryovercommitratio != clusterlist[
                        1].memoryovercommitratio:
                    memoryovercommit = clusterlist[0].memoryovercommitratio

                else:
                    memoryovercommit = clusterlist[0].memoryovercommitratio
                    updateMemoryOvercommitCmd.value = "1.0"
                    self.apiclient.updateConfiguration(
                        updateMemoryOvercommitCmd)

                podWithCap = Pod.list(self.apiclient,
                                      id=pod.id,
                                      showcapacities=True)
                cpucapacity = Capacities.list(self.apiclient,
                                              podid=pod.id,
                                              type=1)
                memorycapacity = Capacities.list(self.apiclient,
                                                 podid=pod.id,
                                                 type=0)

                updateCpuOvercommitCmd.value = cpuovercommit
                updateMemoryOvercommitCmd.value = memoryovercommit

                self.apiclient.updateConfiguration(updateCpuOvercommitCmd)
                self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                self.assertEqual([
                    cap for cap in podWithCap[0].capacity if cap.type == 1
                ][0].capacitytotal, cpucapacity[0].capacitytotal,
                                 "listPods api returns wrong CPU capacity ")

                self.assertEqual([
                    cap for cap in podWithCap[0].capacity if cap.type == 0
                ][0].capacitytotal, memorycapacity[0].capacitytotal,
                                 "listPods api returns wrong memory capacity")
예제 #31
0
    def setUp(self):
        self.testdata = self.testClient.getParsedTestDataConfig()
        self.apiclient = self.testClient.getApiClient()

        # Get Zone, Domain and Default Built-in template
        self.domain = get_domain(self.apiclient)
        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())

        self.testdata["mode"] = self.zone.networktype
        self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"])

        hosts = Host.list(self.apiclient, type="Routing")
        self.assertTrue(isinstance(hosts, list) and len(hosts) > 0, msg="No hosts found")
        self.host = hosts[0]
        # update host tags
        Host.update(
            self.apiclient, id=self.host.id, hosttags=self.testdata["service_offerings"]["taggedsmall"]["hosttags"]
        )

        # create a user account
        self.account = Account.create(self.apiclient, self.testdata["account"], domainid=self.domain.id)
        # create a service offering
        self.service_offering = ServiceOffering.create(
            self.apiclient, self.testdata["service_offerings"]["taggedsmall"]
        )
        # deploy vms
        self.vm1 = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id,
        )
        self.vm2 = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id,
        )
        self.vm3 = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id,
        )
        list_vms = VirtualMachine.list(self.apiclient, ids=[self.vm1.id, self.vm2.id, self.vm3.id], listAll=True)
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 3, msg="List VM response is empty")
        clusters = Cluster.list(self.apiclient, id=self.host.clusterid)
        self.assertTrue(isinstance(clusters, list) and len(clusters) > 0, msg="Cluster not found")

        json_response = '{"com.cloud.agent.api.PingRoutingWithNwGroupsCommand":{"newGroupStates":{},"newStates":{},"_hostVmStateReport":{"%s":{"state":"PowerOn","host":"%s"},"%s":{"state":"PowerOff","host":"%s"}},"_gatewayAccessible":true,"_vnetAccessible":true,"hostType":"Routing","hostId":0,"contextMap":{},"wait":0}}'
        json_response = json_response % (self.vm1.instancename, self.host.name, self.vm2.instancename, self.host.name)

        # create a mock to simulate vm1 as power-on, vm2 as power-off and vm3 as missing
        self.mock_ping = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingRoutingWithNwGroupsCommand",
            zoneid=self.zone.id,
            podid=clusters[0].podid,
            clusterid=clusters[0].id,
            hostid=self.host.id,
            value="",
            jsonresponse=json_response,
            method="POST",
        )

        # build cleanup list
        self.cleanup = [self.service_offering, self.account, self.mock_ping]
예제 #32
0
    def setUp(self):
        self.testdata = self.testClient.getParsedTestDataConfig()
        self.apiclient = self.testClient.getApiClient()

        # Get Zone, Domain and Default Built-in template
        self.domain = get_domain(self.apiclient)
        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())

        self.testdata["mode"] = self.zone.networktype
        self.template = get_template(self.apiclient, self.zone.id,
                                     self.testdata["ostype"])

        hosts = Host.list(self.apiclient, type='Routing')
        self.assertTrue(isinstance(hosts, list) and len(hosts) > 0,
                        msg="No hosts found")
        self.host = hosts[0]
        #update host tags
        Host.update(self.apiclient,
                    id=self.host.id,
                    hosttags=self.testdata["service_offerings"]["taggedsmall"]
                    ["hosttags"])

        #create a user account
        self.account = Account.create(self.apiclient,
                                      self.testdata["account"],
                                      domainid=self.domain.id)
        #create a service offering
        self.service_offering = ServiceOffering.create(
            self.apiclient, self.testdata["service_offerings"]["taggedsmall"])
        #deploy vms
        self.vm1 = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id)
        self.vm2 = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id)
        self.vm3 = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id)
        list_vms = VirtualMachine.list(
            self.apiclient,
            ids=[self.vm1.id, self.vm2.id, self.vm3.id],
            listAll=True)
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 3,
                        msg="List VM response is empty")
        clusters = Cluster.list(self.apiclient, id=self.host.clusterid)
        self.assertTrue(isinstance(clusters, list) and len(clusters) > 0,
                        msg="Cluster not found")

        json_response = '{"com.cloud.agent.api.PingRoutingWithNwGroupsCommand":{"newGroupStates":{},"newStates":{},"_hostVmStateReport":{"%s":{"state":"PowerOn","host":"%s"},"%s":{"state":"PowerOff","host":"%s"}},"_gatewayAccessible":true,"_vnetAccessible":true,"hostType":"Routing","hostId":0,"contextMap":{},"wait":0}}'
        json_response = json_response % (self.vm1.instancename, self.host.name,
                                         self.vm2.instancename, self.host.name)

        #create a mock to simulate vm1 as power-on, vm2 as power-off and vm3 as missing
        self.mock_ping = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingRoutingWithNwGroupsCommand",
            zoneid=self.zone.id,
            podid=clusters[0].podid,
            clusterid=clusters[0].id,
            hostid=self.host.id,
            value='',
            jsonresponse=json_response,
            method='POST')

        #build cleanup list
        self.cleanup = [self.service_offering, self.account, self.mock_ping]
예제 #33
0
    def setUp(self):
        self.testdata = self.testClient.getParsedTestDataConfig()
        self.apiclient = self.testClient.getApiClient()

        # Get Zone, Domain and Default Built-in template
        self.domain = get_domain(self.apiclient)
        self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())

        self.testdata["mode"] = self.zone.networktype
        self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"])

        self.hosts = []
        suitablecluster = None
        clusters = Cluster.list(self.apiclient)
        self.assertTrue(isinstance(clusters, list) and len(clusters) > 0, msg = "No clusters found")
        for cluster in clusters:
            self.hosts = Host.list(self.apiclient, clusterid=cluster.id, type='Routing')
            if isinstance(self.hosts, list) and len(self.hosts) >= 2:
                suitablecluster = cluster
                break
        self.assertEqual(validateList(self.hosts)[0], PASS, "hosts list validation failed")
        if len(self.hosts) < 2:
            self.skipTest("Atleast 2 hosts required in cluster for VM HA test")
        #update host tags
        for host in self.hosts:
            Host.update(self.apiclient, id=host.id, hosttags=self.testdata["service_offerings"]["hasmall"]["hosttags"])

        #create a user account
        self.account = Account.create(
            self.apiclient,
            self.testdata["account"],
            domainid=self.domain.id
        )
        #create a service offering
        self.service_offering = ServiceOffering.create(
            self.apiclient,
            self.testdata["service_offerings"]["hasmall"]
        )
        #deploy ha vm
        self.virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.testdata["virtual_machine"],
            accountid=self.account.name,
            zoneid=self.zone.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            templateid=self.template.id
        )
        list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
        self.debug(
            "Verify listVirtualMachines response for virtual machine: %s"\
            % self.virtual_machine.id
        )
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty")
        self.virtual_machine = list_vms[0]

        self.mock_checkhealth = SimulatorMock.create(
            apiclient=self.apiclient,
            command="CheckHealthCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_ping = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_checkvirtualmachine = SimulatorMock.create(
            apiclient=self.apiclient,
            command="CheckVirtualMachineCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            clusterid=suitablecluster.id,
            hostid=self.virtual_machine.hostid,
            value="result:fail")
        self.mock_pingtest = SimulatorMock.create(
            apiclient=self.apiclient,
            command="PingTestCommand",
            zoneid=suitablecluster.zoneid,
            podid=suitablecluster.podid,
            value="result:fail")
        self.mock_checkonhost_list = []
        for host in self.hosts:
            if host.id != self.virtual_machine.hostid:
                self.mock_checkonhost_list.append(SimulatorMock.create(
                    apiclient=self.apiclient,
                    command="CheckOnHostCommand",
                    zoneid=suitablecluster.zoneid,
                    podid=suitablecluster.podid,
                    clusterid=suitablecluster.id,
                    hostid=host.id,
                    value="result:fail"))
        #build cleanup list
        self.cleanup = [
            self.service_offering,
            self.account,
            self.mock_checkhealth,
            self.mock_ping,
            self.mock_checkvirtualmachine,
            self.mock_pingtest
        ]
        self.cleanup = self.cleanup + self.mock_checkonhost_list
    def setUpClass(cls):
        cls.testClient = super(TestVMLifeCycleHostmaintenance, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
        cls.template = get_template(
                            cls.api_client,
                            cls.zone.id,
                            cls.services["ostype"]
                            )
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id)
            if len(cls.hosts) >= 2:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 2 hosts found")

        Host.update(cls.api_client, id=cls.hosts[0].id, hosttags="hosttag1")
        Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="hosttag2")

        cls.service_offering_1 = ServiceOffering.create(
                                            cls.api_client,
                                            cls.services["service_offering_1"]
                                            )
        cls.service_offering_2 = ServiceOffering.create(
                                            cls.api_client,
                                            cls.services["service_offering_2"]
                                            )
        cls.vpc_off = VpcOffering.create(
                                     cls.api_client,
                                     cls.services["vpc_offering"]
                                     )
        cls.vpc_off.update(cls.api_client, state='Enabled')

        cls.account = Account.create(
                                     cls.api_client,
                                     cls.services["account"],
                                     admin=True,
                                     domainid=cls.domain.id
                                     )

        cls.vpc_off = VpcOffering.create(
                                     cls.api_client,
                                     cls.services["vpc_offering"]
                                     )

        cls.vpc_off.update(cls.api_client, state='Enabled')

        cls.services["vpc"]["cidr"] = '10.1.1.1/16'
        cls.vpc = VPC.create(
                         cls.api_client,
                         cls.services["vpc"],
                         vpcofferingid=cls.vpc_off.id,
                         zoneid=cls.zone.id,
                         account=cls.account.name,
                         domainid=cls.account.domainid
                         )

        cls.nw_off = NetworkOffering.create(
                                            cls.api_client,
                                            cls.services["network_offering"],
                                            conservemode=False
                                            )
        # Enable Network offering
        cls.nw_off.update(cls.api_client, state='Enabled')

        # Creating network using the network offering created
        cls.network_1 = Network.create(
                                cls.api_client,
                                cls.services["network"],
                                accountid=cls.account.name,
                                domainid=cls.account.domainid,
                                networkofferingid=cls.nw_off.id,
                                zoneid=cls.zone.id,
                                gateway='10.1.1.1',
                                vpcid=cls.vpc.id
                                )
        cls.nw_off_no_lb = NetworkOffering.create(
                                    cls.api_client,
                                    cls.services["network_offering_no_lb"],
                                    conservemode=False
                                    )
        # Enable Network offering
        cls.nw_off_no_lb.update(cls.api_client, state='Enabled')

        # Creating network using the network offering created
        cls.network_2 = Network.create(
                                cls.api_client,
                                cls.services["network"],
                                accountid=cls.account.name,
                                domainid=cls.account.domainid,
                                networkofferingid=cls.nw_off_no_lb.id,
                                zoneid=cls.zone.id,
                                gateway='10.1.2.1',
                                vpcid=cls.vpc.id
                                )
        # Spawn an instance in that network
        cls.vm_1 = VirtualMachine.create(
                                  cls.api_client,
                                  cls.services["virtual_machine"],
                                  accountid=cls.account.name,
                                  domainid=cls.account.domainid,
                                  serviceofferingid=cls.service_offering_1.id,
                                  networkids=[str(cls.network_1.id)]
                                  )
        # Spawn an instance in that network
        cls.vm_2 = VirtualMachine.create(
                                  cls.api_client,
                                  cls.services["virtual_machine"],
                                  accountid=cls.account.name,
                                  domainid=cls.account.domainid,
                                  serviceofferingid=cls.service_offering_1.id,
                                  networkids=[str(cls.network_1.id)]
                                  )
        cls.vm_3 = VirtualMachine.create(
                                  cls.api_client,
                                  cls.services["virtual_machine"],
                                  accountid=cls.account.name,
                                  domainid=cls.account.domainid,
                                  serviceofferingid=cls.service_offering_2.id,
                                  networkids=[str(cls.network_2.id)]
                                  )
        routers = Router.list(
                              cls.api_client,
                              account=cls.account.name,
                              domainid=cls.account.domainid,
                              listall=True
                              )
        if isinstance(routers, list):
            cls.vpcvr = routers[0]

        cls._cleanup = [
                        cls.service_offering_1,
                        cls.service_offering_2,
                        cls.nw_off,
                        cls.nw_off_no_lb,
                        ]
        return
    def setUpClass(cls):
        cls.testClient = super(TestVMLifeCycleHostmaintenance, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
        cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id)
            if len(cls.hosts) >= 2:
                clusterWithSufficientHosts = cluster

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 2 hosts found")

        Host.update(cls.api_client, id=cls.hosts[0].id, hosttags="hosttag1")
        Host.update(cls.api_client, id=cls.hosts[1].id, hosttags="hosttag2")

        cls.service_offering_1 = ServiceOffering.create(cls.api_client, cls.services["service_offering_1"])
        cls.service_offering_2 = ServiceOffering.create(cls.api_client, cls.services["service_offering_2"])
        cls.vpc_off = VpcOffering.create(cls.api_client, cls.services["vpc_offering"])
        cls.vpc_off.update(cls.api_client, state="Enabled")

        cls.account = Account.create(cls.api_client, cls.services["account"], admin=True, domainid=cls.domain.id)

        cls.vpc_off = VpcOffering.create(cls.api_client, cls.services["vpc_offering"])

        cls.vpc_off.update(cls.api_client, state="Enabled")

        cls.services["vpc"]["cidr"] = "10.1.1.1/16"
        cls.vpc = VPC.create(
            cls.api_client,
            cls.services["vpc"],
            vpcofferingid=cls.vpc_off.id,
            zoneid=cls.zone.id,
            account=cls.account.name,
            domainid=cls.account.domainid,
        )

        cls.nw_off = NetworkOffering.create(cls.api_client, cls.services["network_offering"], conservemode=False)
        # Enable Network offering
        cls.nw_off.update(cls.api_client, state="Enabled")

        # Creating network using the network offering created
        cls.network_1 = Network.create(
            cls.api_client,
            cls.services["network"],
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            networkofferingid=cls.nw_off.id,
            zoneid=cls.zone.id,
            gateway="10.1.1.1",
            vpcid=cls.vpc.id,
        )
        cls.nw_off_no_lb = NetworkOffering.create(
            cls.api_client, cls.services["network_offering_no_lb"], conservemode=False
        )
        # Enable Network offering
        cls.nw_off_no_lb.update(cls.api_client, state="Enabled")

        # Creating network using the network offering created
        cls.network_2 = Network.create(
            cls.api_client,
            cls.services["network"],
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            networkofferingid=cls.nw_off_no_lb.id,
            zoneid=cls.zone.id,
            gateway="10.1.2.1",
            vpcid=cls.vpc.id,
        )
        # Spawn an instance in that network
        cls.vm_1 = VirtualMachine.create(
            cls.api_client,
            cls.services["virtual_machine"],
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering_1.id,
            networkids=[str(cls.network_1.id)],
        )
        # Spawn an instance in that network
        cls.vm_2 = VirtualMachine.create(
            cls.api_client,
            cls.services["virtual_machine"],
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering_1.id,
            networkids=[str(cls.network_1.id)],
        )
        cls.vm_3 = VirtualMachine.create(
            cls.api_client,
            cls.services["virtual_machine"],
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering_2.id,
            networkids=[str(cls.network_2.id)],
        )
        routers = Router.list(cls.api_client, account=cls.account.name, domainid=cls.account.domainid, listall=True)
        if isinstance(routers, list):
            cls.vpcvr = routers[0]

        cls._cleanup = [cls.service_offering_1, cls.service_offering_2, cls.nw_off, cls.nw_off_no_lb]
        return
    def test_01_disable_enable_cluster(self):
        """disable enable cluster
            1. Disable cluster and verify following things:
                For admin user:
                     --Should be able to create new vm, snapshot,
                     volume,template,iso in the same cluster
                For Non-admin user:
                     --Should not be able create new vm, snapshot,
                     volume,template,iso in the same cluster
            2. Enable the above disabled cluster and verify that:
                -All users should be create to deploy new vm, snapshot,
                volume,template,iso in the same cluster
            3. Disable the managestate of the cluster and verify that:
                --Host in the cluster should get disconnected
                --VM's in the cluster are ping-able and ssh to
                --Creation of new VM in the cluster should fail
            4. Enable the managestate of the cluster and verify that:
                --Hosts in the cluster get connected
                --VM's in the cluster are accessible
            5. Try to delete the cluster and it should fail with error message:
                -"The cluster is not deletable because there are
                servers running in this cluster"

        """
        # Step 1
        vm_user = VirtualMachine.create(
            self.userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
            mode=self.zone.networktype,
        )

        self.vm_list.append(vm_user)

        vm_root = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
            mode=self.zone.networktype,
        )

        self.vm_list.append(vm_root)

        cmd = updateCluster.updateClusterCmd()
        cmd.id = self.cluster.id
        cmd.allocationstate = DISABLED
        self.apiclient.updateCluster(cmd)
        clusterList = Cluster.list(self.apiclient, id=self.cluster.id)

        self.assertEqual(clusterList[0].allocationstate, DISABLED, "Check if the cluster is in disabled state")

        # Verify the existing vms should be running
        self.assertEqual(vm_user.state.lower(), "running", "Verify that the user vm is running")

        self.assertEqual(vm_root.state.lower(), "running", "Verify that the root vm is running")

        VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        root_volume = list_volumes(self.apiclient, virtualmachineid=vm_root.id, type="ROOT", listall=True)

        self.assertEqual(
            validateList(root_volume)[0], PASS, "list root volume response is empty for volume id %s" % vm_root.id
        )

        if self.snapshotSupported:
            Snapshot.create(self.apiclient, root_volume[0].id)

            snapshots = list_snapshots(self.apiclient, volumeid=root_volume[0].id, listall=True)
            self.assertEqual(
                validateList(snapshots)[0], PASS, "list snapshot  is empty for volume id %s" % root_volume[0].id
            )

            Template.create_from_snapshot(self.apiclient, snapshots[0], self.testdata["privatetemplate"])

        builtin_info = get_builtin_template_info(self.apiclient, self.zone.id)
        self.testdata["privatetemplate"]["url"] = builtin_info[0]
        self.testdata["privatetemplate"]["hypervisor"] = builtin_info[1]
        self.testdata["privatetemplate"]["format"] = builtin_info[2]

        Template.register(self.apiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        Volume.create(
            self.apiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.admin_account.name,
            domainid=self.admin_account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        Iso.create(
            self.apiclient,
            self.testdata["iso2"],
            zoneid=self.zone.id,
            account=self.admin_account.name,
            domainid=self.admin_account.domainid,
        )

        # non-admin user should fail to create vm, snap, temp etc
        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
                mode=self.zone.networktype,
            )

        root_volume = list_volumes(self.userapiclient, virtualmachineid=vm_user.id, type="ROOT", listall=True)

        self.assertEqual(
            validateList(root_volume)[0], PASS, "list root volume response is empty for volume id %s" % vm_user.id
        )

        if self.snapshotSupported:
            Snapshot.create(self.userapiclient, root_volume[0].id)

        Template.register(self.userapiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        Iso.create(
            self.userapiclient,
            self.testdata["iso2"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )

        # Step 2
        cmd.allocationstate = ENABLED
        self.apiclient.updateCluster(cmd)
        clusterList = Cluster.list(self.apiclient, id=self.cluster.id)
        self.assertEqual(clusterList[0].allocationstate, ENABLED, "Check if the cluster is in disabled state")

        # After enabling the zone all users should be able to add new VM,
        # volume, templatee and iso

        root_vm_new = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        self.assertEqual(root_vm_new.state.lower(), "running", "Verify that admin should create new VM")

        if self.snapshotSupported:
            Snapshot.create(self.apiclient, root_volume[0].id)

        # Non root user
        user_vm_new = VirtualMachine.create(
            self.userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        self.assertEqual(user_vm_new.state.lower(), "running", "Verify that admin should create new VM")

        if self.snapshotSupported:
            Snapshot.create(self.userapiclient, root_volume[0].id)

        # Step 3

        cmd = updateCluster.updateClusterCmd()
        cmd.id = self.cluster.id
        cmd.managedstate = "Unmanaged"
        self.apiclient.updateCluster(cmd)
        clusterList = Cluster.list(self.apiclient, id=self.cluster.id)

        self.assertEqual(
            clusterList[0].managedstate.lower(), "unmanaged", "Check if the cluster is in unmanaged  state"
        )
        # Hosts in the cluster takes some time to go into disconnected state
        time.sleep(60)
        hostList = Host.list(self.apiclient, clusterid=self.cluster.id)

        for host in hostList:
            self.assertEqual(host.state.lower(), "disconnected", "Check if host in the cluster gets disconnected")
        exception_list = []
        for vm in self.vm_list:
            try:
                SshClient(vm.ssh_ip, vm.ssh_port, vm.username, vm.password)

            except Exception as e:
                exception_list.append(e)

        self.assertEqual(len(exception_list), 0, "Check if vm's are accesible")

        # non-admin user should fail to create vm, snap, temp etc
        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
                mode=self.zone.networktype,
            )

        root_volume = list_volumes(self.userapiclient, virtualmachineid=vm_user.id, type="ROOT", listall=True)

        if self.snapshotSupported:
            with self.assertRaises(Exception):
                Snapshot.create(self.userapiclient, root_volume[0].id)

        Template.register(self.userapiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        # Step 4
        cmd.managedstate = "Managed"
        self.apiclient.updateCluster(cmd)
        # After changing the cluster's managestate to Managed hosts in the
        # cluster takes some time to come back to Up state
        time.sleep(120)
        hostList = Host.list(self.apiclient, clusterid=self.cluster.id)
        for host in hostList:
            self.assertEqual(host.state.lower(), "up", "Check if host in the cluster gets up")

        vm_root.stop(self.apiclient)
        vm_user.stop(self.apiclient)
        root_state = self.dbclient.execute("select state from vm_instance where name='%s'" % vm_root.name)[0][0]

        user_state = self.dbclient.execute("select state from vm_instance where name='%s'" % vm_user.name)[0][0]

        self.assertEqual(root_state, "Stopped", "verify that vm should stop")

        self.assertEqual(user_state, "Stopped", "verify that vm should stop")

        root_vm_new = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        self.assertEqual(root_vm_new.state.lower(), "running", "Verify that admin should create new VM")

        # Step 5
        # Deletion of zone should fail if resources are running on the zone
        with self.assertRaises(Exception):
            self.pod.delete(self.apiclient)

        return
예제 #37
0
    def test_list_pod_with_overcommit(self):
        """Test List Pod Api with cluster CPU and Memory OverProvisioning
	    """

        podlist = Pod.list(self.apiclient)

        for pod in podlist:
            clusterlist = Cluster.list(self.apiclient, podid=pod.id)
            if len(clusterlist) > 1:

                updateCpuOvercommitCmd = updateConfiguration.updateConfigurationCmd()
                updateCpuOvercommitCmd.clusterid = clusterlist[0].id
                updateCpuOvercommitCmd.name="cpu.overprovisioning.factor"

                if clusterlist[0].cpuovercommitratio == clusterlist[1].cpuovercommitratio and clusterlist[0].cpuovercommitratio == "1.0":
                    cpuovercommit = "1.0"
                    updateCpuOvercommitCmd.value="2.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                elif clusterlist[0].cpuovercommitratio != clusterlist[1].cpuovercommitratio:
                    cpuovercommit = clusterlist[0].cpuovercommitratio

                else:
                    cpuovercommit = clusterlist[0].cpuovercommitratio
                    updateCpuOvercommitCmd.value="1.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                updateMemoryOvercommitCmd = updateConfiguration.updateConfigurationCmd()
                updateMemoryOvercommitCmd.clusterid = clusterlist[0].id
                updateMemoryOvercommitCmd.name="mem.overprovisioning.factor"

                if clusterlist[0].memoryovercommitratio == clusterlist[1].memoryovercommitratio and clusterlist[0].memoryovercommitratio == "1.0":
                    memoryovercommit = "1.0"
                    updateMemoryOvercommitCmd.value="2.0"
                    self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                elif clusterlist[0].memoryovercommitratio != clusterlist[1].memoryovercommitratio:
                    memoryovercommit = clusterlist[0].memoryovercommitratio

                else:
                    memoryovercommit = clusterlist[0].memoryovercommitratio
                    updateMemoryOvercommitCmd.value="1.0"
                    self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                podWithCap = Pod.list(self.apiclient, id=pod.id, showcapacities=True)
                cpucapacity = Capacities.list(self.apiclient, podid=pod.id, type=1)
                memorycapacity = Capacities.list(self.apiclient, podid=pod.id, type=0)

                updateCpuOvercommitCmd.value = cpuovercommit
                updateMemoryOvercommitCmd.value = memoryovercommit

                self.apiclient.updateConfiguration(updateCpuOvercommitCmd)
                self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                self.assertEqual(
                    [cap for cap in podWithCap[0].capacity if cap.type == 1][0].capacitytotal,
                    cpucapacity[0].capacitytotal,
                    "listPods api returns wrong CPU capacity "
                )

                self.assertEqual(
                    [cap for cap in podWithCap[0].capacity if cap.type == 0][0].capacitytotal,
                    memorycapacity[0].capacitytotal,
                    "listPods api returns wrong memory capacity"
                )
    def setUpClass(cls):
        cls.testClient = super(TestHostHighAvailability, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(
            cls.api_client,
            cls.zone.id,
            cls.services["ostype"]
        )
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ['lxc']:
            raise unittest.SkipTest("Template creation from root volume is not supported in LXC")


        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing")
            if len(cls.hosts) >= 3:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 3 hosts found")

        configs = Configurations.list(
                                      cls.api_client,
                                      name='ha.tag'
                                      )

        assert isinstance(configs, list), "Config list not\
                retrieved for ha.tag"

        if configs[0].value != "ha":
            raise unittest.SkipTest("Please set the global config\
                    value for ha.tag as 'ha'")

        Host.update(cls.api_client, id=cls.hosts[2].id, hosttags="ha")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_with_ha"],
            offerha=True
        )

        cls.service_offering_without_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_without_ha"],
            offerha=False
        )

        cls._cleanup = [
            cls.service_offering_with_ha,
            cls.service_offering_without_ha,
        ]
        return