Exemple #1
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient,
                name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient,
                name="network.loadbalancer.haproxy.stats.uri")[0].value
            # Update global setting network.loadbalancer.haproxy.stats.auth to a known value
            haproxy_auth = "admin:password"
            Configurations.update(self.apiclient,
                                  "network.loadbalancer.haproxy.stats.auth",
                                  haproxy_auth)
            self.logger.debug(
                "Updated global setting stats network.loadbalancer.haproxy.stats.auth to %s"
                % (haproxy_auth))
            settings["username"], settings["password"] = haproxy_auth.split(
                ":")
            settings["visibility"] = Configurations.list(
                self.apiclient,
                name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Exemple #2
0
    def tearDownClass(cls):
        version_delete_failed = False
        # Delete added Kubernetes supported version
        for version_id in cls.kubernetes_version_ids:
            try:
                cls.deleteKubernetesSupportedVersion(version_id)
            except Exception as e:
                version_delete_failed = True
                cls.debug(
                    "Error: Exception during cleanup for added Kubernetes supported versions: %s"
                    % e)
        try:
            # Restore original CKS template
            if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None:
                Configurations.update(
                    cls.apiclient, cls.cks_template_name_key,
                    cls.initial_configuration_cks_template_name)
            # Restore CKS enabled
            if cls.initial_configuration_cks_enabled not in ["true", True]:
                cls.debug("Restoring Kubernetes Service enabled value")
                Configurations.update(cls.apiclient,
                                      "cloud.kubernetes.service.enabled",
                                      "false")
                cls.restartServer()

            cls.updateVmwareSettings(True)

            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        if version_delete_failed == True:
            raise Exception(
                "Warning: Exception during cleanup, unable to delete Kubernetes supported versions"
            )
        return
Exemple #3
0
 def test_04_snapshot_volume_bypass_secondary(self):
     '''
         Test snapshot bypassing secondary
     '''
     Configurations.update(self.apiclient,
         name = "sp.bypass.secondary.storage",
         value = "true")
     volume = list_volumes(
                     self.apiclient,
                     virtualmachineid = self.virtual_machine.id,
                     type = "ROOT",
                     listall = True,
                     )
     snapshot = Snapshot.create(
        self.apiclient,
         volume_id = volume[0].id,
         account=self.account.name,
         domainid=self.account.domainid,
         )
     try:
         cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd()
         cmd.snapshotid = snapshot.id
         snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd)
         flag = False
         for s in snapshot_details:
             if s["snapshotDetailsName"] == snapshot.id:
                 name = s["snapshotDetailsValue"].split("/")[3]
                 sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name)
                 flag = True
                 self.debug('################ %s' % sp_snapshot)
         if flag == False:
             raise Exception("Could not find snapshot in snapshot details")
     except spapi.ApiError as err:
         raise Exception(err)
     self.assertIsNotNone(snapshot, "Could not create snapshot")
 def tearDown(self):
     Configurations.update(self.apiclient,
                           name="enable.dynamic.scale.vm",
                           value="false")
     # Clean up, terminate the created ISOs
     super(TestScaleVm, self).tearDown()
     return
Exemple #5
0
    def tearDownClass(cls):
        if k8s_cluster != None and k8s_cluster.id != None:
            clsObj = TestKubernetesCluster()
            clsObj.deleteKubernetesClusterAndVerify(k8s_cluster.id, False,
                                                    True)

        version_delete_failed = False
        # Delete added Kubernetes supported version
        for version_id in cls.kubernetes_version_ids:
            try:
                cls.deleteKubernetesSupportedVersion(version_id)
            except Exception as e:
                version_delete_failed = True
                cls.debug(
                    "Error: Exception during cleanup for added Kubernetes supported versions: %s"
                    % e)
        try:
            # Restore CKS enabled
            if cls.initial_configuration_cks_enabled not in ["true", True]:
                cls.debug("Restoring Kubernetes Service enabled value")
                Configurations.update(cls.apiclient,
                                      "cloud.kubernetes.service.enabled",
                                      "false")
                cls.restartServer()

            cls.updateVmwareSettings(True)

            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        if version_delete_failed == True:
            raise Exception(
                "Warning: Exception during cleanup, unable to delete Kubernetes supported versions"
            )
        return
Exemple #6
0
 def rollback_nested_configurations(self, rollback_nv, rollback_nv_per_vm):
     if rollback_nv:
         config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "false")
         self.logger.debug("Reverted global setting vmware.nested.virtualization back to false")
     if rollback_nv_per_vm:
         config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization.perVM", "false")
         self.logger.debug("Reverted global setting vmware.nested.virtualization.perVM back to false")
    def test_02_Overcommit_factor(self):
        """change mem.overprovisioning.factor and verify vm memory """

        listHost = Host.list(self.apiclient, id=self.deployVmResponse.hostid)
        self.assertEqual(
            validateList(listHost)[0], PASS,
            "check list host for host id %s" % self.deployVmResponse.hostid)
        if listHost[0].hypervisor.lower() not in ['kvm', 'xenserver']:
            self.skipTest(
                "Skiping test because of not supported hypervisor type %s" %
                listHost[0].hypervisor)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        self.deployVmResponse.stop(self.apiclient)
        self.deployVmResponse.start(self.apiclient)

        if listHost[0].hypervisor.lower() == 'xenserver':

            k = ssh_xen_host(self.hostConfig["password"],
                             self.hostConfig["username"],
                             listHost[0].ipaddress,
                             self.deployVmResponse.instancename)

        elif listHost[0].hypervisor.lower() == 'kvm':

            k = ssh_kvm_host(self.hostConfig["password"],
                             self.hostConfig["username"],
                             listHost[0].ipaddress,
                             self.deployVmResponse.instancename)

        self.assertEqual(k[0], k[1],
                         "Check static max ,min on host for overcommit 1 ")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        self.deployVmResponse.stop(self.apiclient)
        self.deployVmResponse.start(self.apiclient)

        if listHost[0].hypervisor.lower() == 'xenserver':
            k1 = ssh_xen_host(self.hostConfig["password"],
                              self.hostConfig["username"],
                              listHost[0].ipaddress,
                              self.deployVmResponse.instancename)

        elif listHost[0].hypervisor.lower() == 'kvm':
            time.sleep(200)
            k1 = ssh_kvm_host(self.hostConfig["password"],
                              self.hostConfig["username"],
                              listHost[0].ipaddress,
                              self.deployVmResponse.instancename)
        self.assertEqual(k1[0], 2 * k1[1],
                         "Check static max ,min on  host for overcommit 2")
Exemple #8
0
    def test_01_test_settings_for_domain(self):
        """
        1. Get the default value for the setting in domain scope
        2. Change the default value to new value
        3. Make sure updated value is same as new value
        4. Reset the config value
        5. Make sure that current value is same as default value
        :return:
        """
        config_name="ldap.basedn"
        #1. Get default value
        configs = Configurations.list(
            self.apiclient,
            name=config_name
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        orig_value = str(configs[0].value)
        new_value = "testing"

        #2. Update to new value
        Configurations.update(
            self.apiclient,
            name=config_name,
            value=new_value,
            domainid=self.domain.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        #3. validate they are same
        self.assertEqual(new_value,
                         str(configs[0].value),
                         "Failed to set new config value")

        #4. Reset the value
        Configurations.reset(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )

        #5. Make sure its same as original value
        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        self.assertEqual(orig_value,
                         str(configs[0].value),
                         "Failed to reset the value")
Exemple #9
0
 def test_03_concurrent_snapshot_global_value_assignment(self):
     """ Test verifies that exception is raised if string value is assigned to
          concurrent.snapshots.threshold.perhost parameter.
     """
     with self.assertRaises(Exception):
         Configurations.update(self.apiclient,
                               "concurrent.snapshots.threshold.perhost",
                               "String")
     return
Exemple #10
0
 def tearDownClass(cls):
     try:
         Configurations.update(cls.apiclient,
                               name="kvm.vmstoragesnapshot.enabled",
                               value="false")
         # Cleanup resources used
         cleanup_resources(cls.apiclient, cls._cleanup)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Exemple #11
0
 def tearDownClass(cls):
     if cls.initial_ipv6_offering_enabled != None:
         Configurations.update(cls.apiclient,
             ipv6_offering_config_name,
             cls.initial_ipv6_offering_enabled)
     super(TestIpv6Network, cls).tearDownClass()
     if cls.test_ipv6_guestprefix != None:
         cmd = deleteGuestNetworkIpv6Prefix.deleteGuestNetworkIpv6PrefixCmd()
         cmd.id = cls.test_ipv6_guestprefix.id
         cls.apiclient.deleteGuestNetworkIpv6Prefix(cmd)
Exemple #12
0
    def setUpClass(cls):
        testClient = super(TestIpv6Network, cls).getClsTestClient()
        cls.services = testClient.getParsedTestDataConfig()
        cls.apiclient = testClient.getApiClient()
        cls.dbclient = testClient.getDbConnection()
        cls.test_ipv6_guestprefix = None
        cls.initial_ipv6_offering_enabled = None
        cls._cleanup = []
        cls.routerDetailsMap = {}

        cls.logger = logging.getLogger('TestIpv6Network')

        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.services['mode'] = cls.zone.networktype
        cls.ipv6NotSupported = False

        ipv6_guestprefix = cls.getGuestIpv6Prefix()
        if ipv6_guestprefix == None:
            cls.ipv6NotSupported = True
        if cls.ipv6NotSupported == False:
            ipv6_publiciprange = cls.getPublicIpv6Range()
            if ipv6_publiciprange == None:
                cls.ipv6NotSupported = True

        if cls.ipv6NotSupported == False:
            cls.initial_ipv6_offering_enabled = Configurations.list(
                cls.apiclient,
                name=ipv6_offering_config_name)[0].value
            Configurations.update(cls.apiclient,
                ipv6_offering_config_name,
                "true")
            cls.domain = get_domain(cls.apiclient)
            cls.account = Account.create(
                cls.apiclient,
                cls.services["account"],
                admin=True,
                domainid=cls.domain.id
            )
            cls._cleanup.append(cls.account)
            cls.hypervisor = testClient.getHypervisorInfo()
            cls.template = get_template(
                cls.apiclient,
                cls.zone.id,
                cls.services["ostype"]
            )
            if cls.hypervisor.lower() in ('xenserver'):
                # Default Xenserver template has IPv6 disabled
                cls.template = get_test_template(
                   cls.apiclient,
                   cls.zone.id,
                   cls.hypervisor)
        else:
            cls.debug("IPv6 is not supported, skipping tests!")
        return
 def test_03_concurrent_snapshot_global_value_assignment(self):
     """ Test verifies that exception is raised if string value is assigned to
          concurrent.snapshots.threshold.perhost parameter.
     """
     with self.assertRaises(Exception):
        Configurations.update(
          self.apiclient,
          "concurrent.snapshots.threshold.perhost",
          "String"
        )
     return
Exemple #14
0
    def test_05_test_settings_for_zone(self):
        """
        1. Get the default value for the setting in zone scope
        2. Change the default value to new value
        3. Make sure updated value is same as new value
        4. Reset the config value
        5. Make sure that current value is same as default value
        :return:
        """
        config_name = "enable.dynamic.scale.vm"
        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )
        self.assertIsNotNone(configs, "Fail to get zone setting %s " % config_name)

        orig_value = str(configs[0].value)
        new_value = 'true'

        Configurations.update(
            self.apiclient,
            name=config_name,
            value=new_value,
            zoneid=self.zone.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )
        self.assertIsNotNone(configs, "Fail to get ol setting %s " % config_name)

        self.assertEqual(new_value,
                         (configs[0].value),
                         "Failed to set new config value")

        Configurations.reset(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )
        self.assertIsNotNone(configs, "Fail to get zone setting %s " % config_name)

        self.assertEqual(orig_value,
                         (configs[0].value),
                         "Failed to reset the value for zone")
Exemple #15
0
 def bypass_secondary(cls, bypassed):
     if bypassed:
         backup_config = Configurations.update(cls.testClass.apiclient,
         name = "sp.bypass.secondary.storage",
         value = "true")
     else:
         backup_config = Configurations.update(cls.testClass.apiclient,
         name = "sp.bypass.secondary.storage",
         value = "false")
     cfg.logger.info(list_configurations(
         cls.testClass.apiclient,
         name = "sp.bypass.secondary.storage"))
Exemple #16
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used
            cleanup_resources(cls.api_client, cls._cleanup)

            # Restore original backup framework values values
            if cls.backup_enabled == "false":
                Configurations.update(cls.api_client, 'backup.framework.enabled', value=cls.backup_enabled, zoneid=cls.zone.id)
            if cls.backup_provider != "dummy":
                Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value=cls.backup_provider, zoneid=cls.zone.id)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
Exemple #17
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
 def tearDownClass(cls):
     try:
         # Restore CKS enabled
         if cls.initial_configuration_cks_enabled not in ["true", True]:
             cls.debug("Restoring Kubernetes Service enabled value")
             Configurations.update(cls.apiclient,
                                   "cloud.kubernetes.service.enabled",
                                   "false")
             cls.restartServer()
         cleanup_resources(cls.apiclient, cls._cleanup)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Exemple #19
0
 def updateVmwareSettings(cls, tearDown):
     value = "false"
     if not tearDown:
         value = "true"
     if cls.hypervisor.lower() == 'vmware':
         Configurations.update(cls.apiclient, "vmware.create.full.clone",
                               value)
         allStoragePools = StoragePool.list(cls.apiclient)
         for pool in allStoragePools:
             Configurations.update(cls.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value=value)
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
 def update_NuageVspGlobalDomainTemplate(self, value):
     self.debug("Updating global setting nuagevsp.vpc.domaintemplate.name "
                "with value - %s" % value)
     self.user_apikey = self.api_client.connection.apiKey
     self.user_secretkey = self.api_client.connection.securityKey
     self.api_client.connection.apiKey = self.default_apikey
     self.api_client.connection.securityKey = self.default_secretkey
     Configurations.update(self.api_client,
                           name="nuagevsp.vpc.domaintemplate.name",
                           value=value)
     self.api_client.connection.apiKey = self.user_apikey
     self.api_client.connection.securityKey = self.user_secretkey
     self.debug("Successfully updated global setting "
                "nuagevsp.vpc.domaintemplate.name with value - %s" % value)
Exemple #22
0
 def updateVmwareCreateFullCloneSetting(self, tearDown):
     if not tearDown:
         Configurations.update(self.apiclient,
                               "vmware.create.full.clone",
                               "true")
         allStoragePools = StoragePool.list(
             self.apiclient
         )
         for pool in allStoragePools:
             Configurations.update(self.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value="true")
     else:
         Configurations.update(self.apiclient,
                               "vmware.create.full.clone",
                               self.fullClone[0].value.lower())
         allStoragePools = StoragePool.list(
             self.apiclient
         )
         for pool in allStoragePools:
             Configurations.update(self.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value=self.storeCloneValues[pool.id])
 def update_NuageVspGlobalDomainTemplate(self, value):
     self.debug("Updating global setting nuagevsp.vpc.domaintemplate.name "
                "with value - %s" % value)
     self.user_apikey = self.api_client.connection.apiKey
     self.user_secretkey = self.api_client.connection.securityKey
     self.api_client.connection.apiKey = self.default_apikey
     self.api_client.connection.securityKey = self.default_secretkey
     Configurations.update(self.api_client,
                           name="nuagevsp.vpc.domaintemplate.name",
                           value=value)
     self.api_client.connection.apiKey = self.user_apikey
     self.api_client.connection.securityKey = self.user_secretkey
     self.debug("Successfully updated global setting "
                "nuagevsp.vpc.domaintemplate.name with value - %s" % value)
Exemple #24
0
    def test_13_snapshot_to_volume_from_secondary(self):
        ''' Try to create volume from snapshot which is deleted from primary and exists on secondary storage
        '''
        virtual_machine = VirtualMachine.create(self.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=self.zone.id,
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            hypervisor=self.hypervisor,
            rootdisksize=10
            )
        volume1 = list_volumes(
            self.apiclient,
            virtualmachineid = self.virtual_machine.id,
            type = "ROOT",
            listall = True
            )

        Configurations.update(self.apiclient,
            name = "sp.bypass.secondary.storage",
            value = "false")

        snapshot = Snapshot.create(
            self.apiclient,
            volume_id = volume1[0].id,
            account=self.account.name,
            domainid=self.account.domainid,
            )

        snapshot_name = self.getSnapshotName(snapshot)
        self.spapi.snapshotDelete(snapshotName = snapshot_name)


        self.assertIsNotNone(snapshot, "Could not create snapshot")
        self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")

        volume = self.helper.create_custom_disk(
            self.apiclient,
            {"diskname":"StorPoolDisk" },
            account=self.account.name,
            domainid=self.account.domainid,
            zoneid = self.zone.id,
            snapshotid = snapshot.id
            )

        self.assertIsNotNone(volume, "Could not create volume from snapshot")
        self.assertIsInstance(volume, Volume, "Volume is not instance of Volume")
Exemple #25
0
    def test_es_1223_apply_algo_to_pods(self):
        """
        @Desc: Test VM creation while "apply.allocation.algorithm.to.pods" is
        set to true
        @Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
        @Steps:
        Step1: Set global configuration "apply.allocation.algorithm.to.pods"
        to true
        Step2: Restart management server
        Step3: Verifying that VM creation is successful
        """
        # Step1:  set global configuration
        # "apply.allocation.algorithm.to.pods" to true
        # Configurations.update(self.apiClient,
        # "apply.allocation.algorithm.to.pods", "true")
        # TODO: restart management server
        if not is_config_suitable(apiclient=self.apiClient,
                                  name='apply.allocation.algorithm.to.pods',
                                  value='true'):
            self.skipTest('apply.allocation.algorithm.to.pods '
                          'should be true. skipping')
        # TODO:Step2: Restart management server
        self.services["virtual_machine"]["zoneid"] = self.zone.id
        self.services["virtual_machine"]["template"] = self.template.id
        # Step3: Verifying that VM creation is successful
        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.services["virtual_machine2"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.cleanup.append(virtual_machine)
        # Verify VM state
        self.assertEqual(
            virtual_machine.state,
            'Running',
            "Check VM state is Running or not"
        )

        # cleanup: set global configuration
        # "apply.allocation.algorithm.to.pods" back to false
        Configurations.update(
            self.apiClient,
            name="apply.allocation.algorithm.to.pods",
            value="false"
        )
        # TODO:cleanup: Restart management server
        return
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
Exemple #27
0
    def test_es_1223_apply_algo_to_pods(self):
        """
        @Desc: Test VM creation while "apply.allocation.algorithm.to.pods" is
        set to true
        @Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
        @Steps:
        Step1: Set global configuration "apply.allocation.algorithm.to.pods"
        to true
        Step2: Restart management server
        Step3: Verifying that VM creation is successful
        """
        # Step1:  set global configuration
        # "apply.allocation.algorithm.to.pods" to true
        # Configurations.update(self.apiClient,
        # "apply.allocation.algorithm.to.pods", "true")
        # TODO: restart management server
        if not is_config_suitable(apiclient=self.apiClient,
                                  name='apply.allocation.algorithm.to.pods',
                                  value='true'):
            self.skipTest('apply.allocation.algorithm.to.pods '
                          'should be true. skipping')
        # TODO:Step2: Restart management server
        self.services["virtual_machine"]["zoneid"] = self.zone.id
        self.services["virtual_machine"]["template"] = self.template.id
        # Step3: Verifying that VM creation is successful
        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.services["virtual_machine2"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.cleanup.append(virtual_machine)
        # Verify VM state
        self.assertEqual(
            virtual_machine.state,
            'Running',
            "Check VM state is Running or not"
        )

        # cleanup: set global configuration
        # "apply.allocation.algorithm.to.pods" back to false
        Configurations.update(
            self.apiClient,
            name="apply.allocation.algorithm.to.pods",
            value="false"
        )
        # TODO:cleanup: Restart management server
        return
Exemple #28
0
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
Exemple #29
0
    def tearDownClass(cls):
        try:
            cls.apiclient = super(TestPublicIp,
                                  cls).getClsTestClient().getApiClient()

            Configurations.update(cls.apiclient,
                                  name=cls.use_system_ips_config_name,
                                  value=cls.use_system_ips_config_value)
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)

        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)

        return
    def test_vms_with_same_name(self):
        """ Test vm deployment with same name

        # 1. Deploy a VM on with perticular name from account_1
        # 2. Try to deploy another vm with same name from account_2
        # 3. Verify that second VM deployment fails

        """
        # Step 1
        # Create VM on cluster wide
        configs = Configurations.list(
            self.apiclient,
            name="vm.instancename.flag")
        orig_value = configs[0].value

        if orig_value == "false":
            Configurations.update(self.apiclient,
                                  name="vm.instancename.flag",
                                  value="true"
                                  )

            # Restart management server
            self.RestartServer()
            time.sleep(120)

        self.testdata["small"]["displayname"]="TestName"
        self.testdata["small"]["name"]="TestName"
        VirtualMachine.create(
            self.userapiclient_1,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account_1.name,
            domainid=self.account_1.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient_2,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account_2.name,
                domainid=self.account_2.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
        return
Exemple #31
0
    def test_07_snapshot_to_template_bypass_secondary(self):
        ''' Create template from snapshot bypassing secondary storage
        '''
        ##cls.virtual_machine
        volume = list_volumes(self.apiclient,
                              virtualmachineid=self.virtual_machine.id)
        snapshot = Snapshot.create(self.apiclient, volume_id=volume[0].id)

        backup_config = list_configurations(self.apiclient,
                                            name="sp.bypass.secondary.storage")
        if (backup_config[0].value == "false"):
            backup_config = Configurations.update(
                self.apiclient,
                name="sp.bypass.secondary.storage",
                value="true")
        self.assertIsNotNone(snapshot, "Could not create snapshot")
        self.assertIsInstance(snapshot, Snapshot,
                              "Snapshot is not an instance of Snapshot")

        template = self.create_template_from_snapshot(self.apiclient,
                                                      self.services,
                                                      snapshotid=snapshot.id)
        virtual_machine = VirtualMachine.create(
            self.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)},
            zoneid=self.zone.id,
            templateid=template.id,
            serviceofferingid=self.service_offering.id,
            hypervisor=self.hypervisor,
            rootdisksize=10)
        ssh_client = virtual_machine.get_ssh_client()
        self.assertIsNotNone(template, "Template is None")
        self.assertIsInstance(template, Template,
                              "Template is instance of template")
        self._cleanup.append(snapshot)
        self._cleanup.append(template)
Exemple #32
0
    def execute_internallb_haproxy_tests(self, vpc_offering):

        settings = self.get_lb_stats_settings()

        dummy_port = 90
        network_gw = "10.1.2.1"
        default_visibility = "global"

        # Update global setting if it is not set to our test default
        if settings["visibility"] != default_visibility:
            config_update = Configurations.update(
                self.apiclient,
                "network.loadbalancer.haproxy.stats.visibility",
                default_visibility)
            self.logger.debug(
                "Updated global setting stats haproxy.stats.visibility to %s" %
                (default_visibility))
            settings = self.get_lb_stats_settings()

        # Create and enable network offering
        network_offering_intlb = self.create_and_enable_network_serviceoffering(
            self.services["network_offering_internal_lb"])

        # Create VPC
        vpc = self.create_vpc(vpc_offering)

        # Create network tier with internal lb service enabled
        network_internal_lb = self.create_network_tier("intlb_test02", vpc.id,
                                                       network_gw,
                                                       network_offering_intlb)

        # Create 1 lb vm in internal lb network tier
        vm = self.deployvm_in_network(vpc, network_internal_lb.id)

        # Acquire 1 public ip and attach to the internal lb network tier
        public_ip = self.acquire_publicip(vpc, network_internal_lb)

        # Create an internal loadbalancer in the internal lb network tier
        applb = self.create_internal_loadbalancer(dummy_port, dummy_port,
                                                  "leastconn",
                                                  network_internal_lb.id)

        # Assign the 1 VM to the Internal Load Balancer
        self.logger.debug("Assigning virtual machines to LB: %s" % applb.id)
        try:
            applb.assign(self.apiclient, vms=[vm])
        except Exception as e:
            self.fail(
                "Failed to assign virtual machine(s) to loadbalancer: %s" % e)

        # Create nat rule to access client vm
        nat_rule = self.create_natrule(vpc, vm, "22", "22", public_ip,
                                       network_internal_lb)

        # Verify access to and the contents of the admin stats page on the
        # private address via a vm in the internal lb tier
        stats = self.verify_lb_stats(
            applb.sourceipaddress,
            self.get_ssh_client(vm, nat_rule.ipaddress, 10), settings)
        self.assertTrue(stats, "Failed to verify LB HAProxy stats")
Exemple #33
0
    def test_vms_with_same_name(self):
        """ Test vm deployment with same name

        # 1. Deploy a VM on with perticular name from account_1
        # 2. Try to deploy another vm with same name from account_2
        # 3. Verify that second VM deployment fails

        """
        # Step 1
        # Create VM on cluster wide
        configs = Configurations.list(self.apiclient,
                                      name="vm.instancename.flag")
        orig_value = configs[0].value

        if orig_value == "false":
            Configurations.update(self.apiclient,
                                  name="vm.instancename.flag",
                                  value="true")

            # Restart management server
            self.RestartServer()
            time.sleep(120)

        self.testdata["small"]["displayname"] = "TestName"
        self.testdata["small"]["name"] = "TestName"
        VirtualMachine.create(
            self.userapiclient_1,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account_1.name,
            domainid=self.account_1.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient_2,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account_2.name,
                domainid=self.account_2.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
        return
Exemple #34
0
    def setUpClass(cls):
        # Setup

        cls.testClient = super(TestDummyBackupAndRecovery, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        cls.domain = get_domain(cls.api_client)
        cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
        if cls.template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["small"]["template"] = cls.template.id
        cls._cleanup = []

        # Check backup configuration values, set them to enable the dummy provider
        backup_enabled_cfg = Configurations.list(cls.api_client, name='backup.framework.enabled', zoneid=cls.zone.id)
        backup_provider_cfg = Configurations.list(cls.api_client, name='backup.framework.provider.plugin', zoneid=cls.zone.id)
        cls.backup_enabled = backup_enabled_cfg[0].value
        cls.backup_provider = backup_provider_cfg[0].value

        if cls.backup_enabled == "false":
            Configurations.update(cls.api_client, 'backup.framework.enabled', value='true', zoneid=cls.zone.id)
        if cls.backup_provider != "dummy":
            Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value='dummy', zoneid=cls.zone.id)

        if cls.hypervisor.lower() != 'simulator':
            return

        cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
        cls.offering = ServiceOffering.create(cls.api_client,cls.services["service_offerings"]["small"])
        cls.vm = VirtualMachine.create(cls.api_client, cls.services["small"], accountid=cls.account.name,
                                       domainid=cls.account.domainid, serviceofferingid=cls.offering.id,
                                       mode=cls.services["mode"])
        cls._cleanup = [cls.offering, cls.account]

        # Import a dummy backup offering to use on tests

        cls.provider_offerings = BackupOffering.listExternal(cls.api_client, cls.zone.id)
        cls.debug("Importing backup offering %s - %s" % (cls.provider_offerings[0].externalid, cls.provider_offerings[0].name))
        cls.offering = BackupOffering.importExisting(cls.api_client, cls.zone.id, cls.provider_offerings[0].externalid,
                                                   cls.provider_offerings[0].name, cls.provider_offerings[0].description)
        cls._cleanup.append(cls.offering)
Exemple #35
0
    def test_01_create_ipv6_network_offering(self):
        """Test to create network offering

        # Validate the following:
        # 1. createNetworkOffering should return valid info for new offering
        # 2. The Cloud Database contains the valid information
        """
        Configurations.update(self.apiclient,
            ipv6_offering_config_name,
            "true")
        ipv6_service = self.services["network_offering"]
        ipv6_service["internetprotocol"] = "dualstack"
        network_offering = NetworkOffering.create(
            self.apiclient,
            ipv6_service
        )
        self.cleanup.append(network_offering)

        self.debug("Created Network offering with ID: %s" % network_offering.id)

        list_network_off_response = NetworkOffering.list(self.apiclient,
            id=network_offering.id)
        self.assertEqual(
            isinstance(list_network_off_response, list),
            True,
            "Check list response returns a valid list"
        )
        self.assertNotEqual(
            len(list_network_off_response),
            0,
            "Check Network offering is created"
        )
        network_off_response = list_network_off_response[0]

        self.assertEqual(
            network_off_response.id,
            network_offering.id,
            "Check server id in listNetworkOfferings"
        )
        self.assertEqual(
            network_off_response.internetprotocol.lower(),
            ipv6_service["internetprotocol"].lower(),
            "Check internetprotocol in listNetworkOfferings"
        )
        return
    def setUpClass(cls):
        cls.testClient = super(TestKubernetesSupportedVersion, cls).getClsTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
        cls.kubernetes_version_iso_url = 'http://download.cloudstack.org/cks/setup-1.16.3.iso'

        cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient,
                                                                    name="cloud.kubernetes.service.enabled")[0].value
        if cls.initial_configuration_cks_enabled not in ["true", True]:
            cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server")
            Configurations.update(cls.apiclient,
                                  "cloud.kubernetes.service.enabled",
                                  "true")
            cls.restartServer()

        cls._cleanup = []
        return
    def execute_internallb_haproxy_tests(self, vpc_offering):

        settings = self.get_lb_stats_settings()

        dummy_port = 90
        network_gw = "10.1.2.1"
        default_visibility = "global"

        # Update global setting if it is not set to our test default
        if settings["visibility"] != default_visibility:
            config_update = Configurations.update(
                self.apiclient, "network.loadbalancer.haproxy.stats.visibility", default_visibility)
            self.logger.debug(
                "Updated global setting stats haproxy.stats.visibility to %s" % (default_visibility))
            settings = self.get_lb_stats_settings()

        # Create and enable network offering
        network_offering_intlb = self.create_and_enable_network_serviceoffering(
            self.services["network_offering_internal_lb"])

        # Create VPC
        vpc = self.create_vpc(vpc_offering)

        # Create network tier with internal lb service enabled
        network_internal_lb = self.create_network_tier(
            "intlb_test02", vpc.id, network_gw,  network_offering_intlb)

        # Create 1 lb vm in internal lb network tier
        vm = self.deployvm_in_network(vpc, network_internal_lb.id)

        # Acquire 1 public ip and attach to the internal lb network tier
        public_ip = self.acquire_publicip(vpc, network_internal_lb)

        # Create an internal loadbalancer in the internal lb network tier
        applb = self.create_internal_loadbalancer(
            dummy_port, dummy_port, "leastconn", network_internal_lb.id)

        # Assign the 1 VM to the Internal Load Balancer
        self.logger.debug("Assigning virtual machines to LB: %s" % applb.id)
        try:
            applb.assign(self.apiclient, vms=[vm])
        except Exception as e:
            self.fail(
                "Failed to assign virtual machine(s) to loadbalancer: %s" % e)

        # Create nat rule to access client vm
        self.create_natrule(
            vpc, vm, "22", "22", public_ip, network_internal_lb)

        # Verify access to and the contents of the admin stats page on the
        # private address via a vm in the internal lb tier
        stats = self.verify_lb_stats(
            applb.sourceipaddress, self.get_ssh_client(vm, 5), settings)
        self.assertTrue(stats, "Failed to verify LB HAProxy stats")
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            # Update global setting network.loadbalancer.haproxy.stats.auth to a known value
            haproxy_auth = "admin:password"
            Configurations.update(self.apiclient, "network.loadbalancer.haproxy.stats.auth", haproxy_auth)
            self.logger.debug(
                "Updated global setting stats network.loadbalancer.haproxy.stats.auth to %s" % (haproxy_auth))
            settings["username"], settings["password"] = haproxy_auth.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                StoragePool.update(cls.api_client, id=cls.storageID,
                                   tags="")
                cls.restartServer()

                #Giving 30 seconds to management to warm-up,
                #Experienced failures when trying to deploy a VM exactly when management came up
                time.sleep(30)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false", storageid=cls.storageID)
                if cls.storageID:
                    StoragePool.update(cls.api_client, id=cls.storageID,
                                    tags="")

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def setUpClass(cls):
        cls.cloudstacktestclient = super(TestDeployVmRootSize,
                                     cls).getClsTestClient()
        cls.api_client = cls.cloudstacktestclient.getApiClient()
        cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower()
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__

        # Get Zone, Domain and Default Built-in template
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client,
                            cls.cloudstacktestclient.getZoneForTests())
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.updateclone = False
        cls.restartreq = False
        cls.defaultdiskcontroller = "ide"
        cls.template = get_template(cls.api_client, cls.zone.id)
        if cls.template == FAILED:
            assert False, "get_template() failed to return template "

        #create a user account
        cls.account = Account.create(
            cls.api_client,
            cls.services["account"],
            domainid=cls.domain.id,admin=True
        )
        cls._cleanup.append(cls.account)
        list_pool_resp = list_storage_pools(cls.api_client,
                                            account=cls.account.name,
                                            domainid=cls.domain.id)
        #Identify the storage pool type  and set vmware fullclone to
        # true if storage is VMFS
        if cls.hypervisor == 'vmware':
             # please make sure url of templateregister dictionary in
             # test_data.config pointing to .ova file

             list_config_storage_response = list_configurations(
                        cls.api_client
                        , name=
                        "vmware.root.disk.controller")
             cls.defaultdiskcontroller = list_config_storage_response[0].value
             if list_config_storage_response[0].value == "ide" or \
                             list_config_storage_response[0].value == \
                             "osdefault":
                        Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value="scsi")

                        cls.updateclone = True
                        cls.restartreq = True

             list_config_fullclone_global_response = list_configurations(
                        cls.api_client
                        , name=
                        "vmware.create.full.clone")
             if list_config_fullclone_global_response[0].value=="false":
                        Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="true")

                        cls.updateclone = True
                        cls.restartreq = True

             cls.tempobj = Template.register(cls.api_client,
                                    cls.services["templateregister"],
                                    hypervisor=cls.hypervisor,
                                    zoneid=cls.zone.id,
                                         account=cls.account.name,
                                         domainid=cls.domain.id
                                        )
             cls.tempobj.download(cls.api_client)

             for strpool in list_pool_resp:
                if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
                    list_config_storage_response = list_configurations(
                        cls.api_client
                        , name=
                        "vmware.create.full.clone",storageid=strpool.id)
                    res = validateList(list_config_storage_response)
                    if res[2]== INVALID_INPUT:
                        raise Exception("Failed to  list configurations ")

                    if list_config_storage_response[0].value == "false":
                        Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="true",
                                              storageid=strpool.id)
                        cls.updateclone = True
                        StoragePool.update(cls.api_client,id=strpool.id,
                                           tags="scsi")
                        cls.storageID = strpool.id
                        break
             if cls.restartreq:
                cls.restartServer()
        #create a service offering
        cls.service_offering = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering"]
        )
        #build cleanup list
        cls.services_offering_vmware=ServiceOffering.create(
                cls.api_client,cls.services["service_offering"],tags="scsi")
        cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])
    def test_02_Overcommit_factor(self):
        """change mem.overprovisioning.factor and verify vm memory """

        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host for host id %s" %
            self.deployVmResponse.hostid)
        if listHost[0].hypervisor.lower() not in ['kvm', 'xenserver']:
            self.skipTest(
                "Skiping test because of not supported hypervisor type %s" %
                listHost[0].hypervisor)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        self.deployVmResponse.stop(self.apiclient)
        self.deployVmResponse.start(self.apiclient)

        if listHost[0].hypervisor.lower() == 'xenserver':

            k = ssh_xen_host(
                self.testdata["configurableData"]["password"],
                listHost[0].ipaddress,
                self.deployVmResponse.instancename)

        elif listHost[0].hypervisor.lower() == 'kvm':

            k = ssh_kvm_host(
                self.testdata["configurableData"]["password"],
                listHost[0].ipaddress,
                self.deployVmResponse.instancename)

        self.assertEqual(k[0],
                         k[1],
                         "Check static max ,min on host for overcommit 1 ")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        self.deployVmResponse.stop(self.apiclient)
        self.deployVmResponse.start(self.apiclient)

        if listHost[0].hypervisor.lower() == 'xenserver':
            k1 = ssh_xen_host(
                self.testdata["configurableData"]["password"],
                listHost[0].ipaddress,
                self.deployVmResponse.instancename)

        elif listHost[0].hypervisor.lower() == 'kvm':
            time.sleep(200)
            k1 = ssh_kvm_host(
                self.testdata["configurableData"]["password"],
                listHost[0].ipaddress,
                self.deployVmResponse.instancename)
        self.assertEqual(k1[0],
                         2 * k1[1],
                         "Check static max ,min on  host for overcommit 2")
    def test_03_cluste_capacity_check(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
           verify cluster capacity """

        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host for host id %s" %
            self.deployVmResponse.hostid)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)

        time.sleep(self.wait_time)

        capacity = Capacities.list(self.apiclient,
                                   clusterid=listHost[0].clusterid)
        self.assertEqual(
            validateList(capacity)[0],
            PASS,
            "check list capacity response for cluster id %s" %
            listHost[0].clusterid)
        cpu, mem = capacity_parser(capacity)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=2)

        time.sleep(self.wait_time)

        capacity1 = Capacities.list(self.apiclient,
                                    clusterid=listHost[0].clusterid)
        self.assertEqual(
            validateList(capacity1)[0],
            PASS,
            "check list capacity response for cluster id %s" %
            listHost[0].clusterid)
        cpu1, mem1 = capacity_parser(capacity1)
        self.assertEqual(2 * cpu[0],
                         cpu1[0],
                         "check total capacity ")
        self.assertEqual(2 * cpu[1],
                         cpu1[1],
                         "check capacity used")
        self.assertEqual(cpu[2],
                         cpu1[2],
                         "check capacity % used")

        self.assertEqual(2 * mem[0],
                         mem1[0],
                         "check mem total capacity ")
        self.assertEqual(2 * mem[1],
                         mem1[1],
                         "check mem capacity used")
        self.assertEqual(mem[2],
                         mem1[2],
                         "check mem capacity % used")
    def setUpClass(cls):
        cls.testClient = super(TestResizeVolume, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower()
        cls.storageID = None
        # Fill services from the external config file
        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(
            cls.api_client,
            cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.unsupportedStorageType = False
        cls.unsupportedHypervisorType = False
        cls.updateclone = False
        if cls.hypervisor not in ['xenserver',"kvm","vmware"]:
            cls.unsupportedHypervisorType=True
            return
        cls.template = get_template(
            cls.api_client,
            cls.zone.id
        )
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id
        cls.services["volume"]["zoneid"] = cls.zone.id
        try:
            cls.parent_domain = Domain.create(cls.api_client,
                                              services=cls.services[
                                                  "domain"],
                                              parentdomainid=cls.domain.id)
            cls.parentd_admin = Account.create(cls.api_client,
                                               cls.services["account"],
                                               admin=True,
                                               domainid=cls.parent_domain.id)
            cls._cleanup.append(cls.parentd_admin)
            cls._cleanup.append(cls.parent_domain)
            list_pool_resp = list_storage_pools(cls.api_client,
                                               account=cls.parentd_admin.name,domainid=cls.parent_domain.id)
            res = validateList(list_pool_resp)
            if res[2]== INVALID_INPUT:
                raise Exception("Failed to  list storage pool-no storagepools found ")
            #Identify the storage pool type  and set vmware fullclone to true if storage is VMFS
            if cls.hypervisor == 'vmware':
                for strpool in list_pool_resp:
                    if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
                        list_config_storage_response = list_configurations(
                            cls.api_client
                            , name=
                            "vmware.create.full.clone",storageid=strpool.id)
                        res = validateList(list_config_storage_response)
                        if res[2]== INVALID_INPUT:
                         raise Exception("Failed to  list configurations ")
                        if list_config_storage_response[0].value == "false":
                            Configurations.update(cls.api_client,
                                                  "vmware.create.full.clone",
                                                  value="true",storageid=strpool.id)
                            cls.updateclone = True
                            StoragePool.update(cls.api_client,id=strpool.id,tags="scsi")
                            cls.storageID = strpool.id
                            cls.unsupportedStorageType = False
                            break
                    else:
                        cls.unsupportedStorageType = True
            # Creating service offering with normal config
            cls.service_offering = ServiceOffering.create(
                cls.api_client,
                cls.services["service_offering"])
            cls.services_offering_vmware=ServiceOffering.create(
                cls.api_client,cls.services["service_offering"],tags="scsi")
            cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])

        except Exception as e:
            cls.tearDownClass()
        return
    def test_nested_virtualization_vmware(self):
        """Test nested virtualization on Vmware hypervisor"""
        if self.hypervisor.lower() not in ["vmware"]:
             self.skipTest("Skipping test because suitable hypervisor/host not present")
             
        # 1) Update nested virtualization configurations, if needed
        configs = Configurations.list(self.apiclient, name="vmware.nested.virtualization")
        rollback_nv = False
        rollback_nv_per_vm = False
        for conf in configs:
            if (conf.name == "vmware.nested.virtualization" and conf.value == "false"):
                config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "true")
                self.logger.debug("Updated global setting vmware.nested.virtualization to true")
                rollback_nv = True
            elif (conf.name == "vmware.nested.virtualization.perVM" and conf.value == "false"):
                config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization.perVM", "true")
                self.logger.debug("Updated global setting vmware.nested.virtualization.perVM to true")
                rollback_nv_per_vm = True
                
        # 2) Deploy a vm
        virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["small"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            mode=self.services['mode']
        )
        self.assert_(virtual_machine is not None, "VM failed to deploy")
        self.assert_(virtual_machine.state == 'Running', "VM is not running")
        self.logger.debug("Deployed vm: %s" % virtual_machine.id)
        
        isolated_network = Network.create(
            self.apiclient,
            self.services["isolated_network"],
            self.account.name,
            self.account.domainid,
            networkofferingid=self.isolated_network_offering.id)

        virtual_machine.add_nic(self.apiclient, isolated_network.id)
        
        # 3) SSH into vm
        ssh_client = virtual_machine.get_ssh_client()

        if ssh_client:
            # run ping test
            result = ssh_client.execute("cat /proc/cpuinfo | grep flags")
            self.logger.debug(result)
        else:
            self.fail("Failed to setup ssh connection to %s" % virtual_machine.public_ip)
            
        # 4) Revert configurations, if needed
        if rollback_nv:
            config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "false")
            self.logger.debug("Reverted global setting vmware.nested.virtualization back to false")
        if rollback_nv_per_vm:
            config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "false")
            self.logger.debug("Reverted global setting vmware.nested.virtualization.perVM back to false")
            
        #5) Check for CPU flags: vmx for Intel and svm for AMD indicates nested virtualization is enabled
        self.assert_(result is not None, "Empty result for CPU flags")
        res = str(result)
        self.assertTrue('vmx' in res or 'svm' in res)
    def test_04_zone_capacity_check(self):
        """change cpu/mem.overprovisioning.factor at cluster level for
           all cluster in a zone  and  verify capacity at zone level """
        list_cluster = Cluster.list(self.apiclient,
                                    zoneid=self.zone.id)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for zone id  %s" %
            self.zone.id)
        k = len(list_cluster)
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=1)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=1)

        time.sleep(self.wait_time)

        capacity = Capacities.list(self.apiclient,
                                   zoneid=self.zone.id)
        self.assertEqual(
            validateList(capacity)[0],
            PASS,
            "check list capacity response for zone id %s" %
            self.zone.id)
        cpu, mem = capacity_parser(capacity)
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=2)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=2)

        time.sleep(self.wait_time)

        capacity1 = Capacities.list(self.apiclient,
                                    zoneid=self.zone.id)
        self.assertEqual(validateList(capacity1)[0],
                         PASS,
                         "check list capacity for zone id %s" % self.zone.id)

        cpu1, mem1 = capacity_parser(capacity1)
        self.assertEqual(2 * cpu[0],
                         cpu1[0],
                         "check total capacity ")
        self.assertEqual(2 * cpu[1],
                         cpu1[1],
                         "check capacity used")
        self.assertEqual(cpu[2],
                         cpu1[2],
                         "check capacity % used")

        self.assertEqual(2 * mem[0],
                         mem1[0],
                         "check mem total capacity ")
        self.assertEqual(2 * mem[1],
                         mem1[1],
                         "check mem capacity used")
        self.assertEqual(mem[2],
                         mem1[2],
                         "check mem capacity % used")
        for id in xrange(k):
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="mem.overprovisioning.factor",
                                  value=1)
            Configurations.update(self.apiclient,
                                  clusterid=list_cluster[id].id,
                                  name="cpu.overprovisioning.factor",
                                  value=1)
    def setUpClass(cls):
        testClient = super(TestDeltaSnapshots, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        cls.hypervisor = cls.testClient.getHypervisorInfo()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])

        cls.snapshots_created = []
        cls._cleanup = []

        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
        cls.skiptest = False

        if cls.hypervisor.lower() not in ["xenserver"]:
            cls.skiptest = True

        try:

            # Create an account
            cls.account = Account.create(
                cls.apiclient,
                cls.testdata["account"],
                domainid=cls.domain.id
            )
            cls._cleanup.append(cls.account)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name,
                DomainName=cls.account.domain
            )

            # Create Service offering
            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
            )
            cls._cleanup.append(cls.service_offering)

            if cls.testdata["configurableData"][
                    "restartManagementServerThroughTestCase"]:
                # Set snapshot delta max value
                Configurations.update(cls.apiclient,
                                      name="snapshot.delta.max",
                                      value="3"
                                      )

                # Restart management server
                cls.RestartServer()
                time.sleep(120)

            configs = Configurations.list(
                cls.apiclient,
                name="snapshot.delta.max")
            cls.delta_max = configs[0].value

            cls.vm = VirtualMachine.create(
                cls.apiclient,
                cls.testdata["small"],
                templateid=cls.template.id,
                accountid=cls.account.name,
                domainid=cls.account.domainid,
                serviceofferingid=cls.service_offering.id,
                zoneid=cls.zone.id,
                mode=cls.zone.networktype
            )

        except Exception as e:
            cls.tearDownClass()
            raise e
        return
    def test_03_volume_rec_snapshot(self):
        """ Test Volume (root) Snapshot
        # 1. For snapshot.delta.max > maxsnaps verify that when number of snapshot exceeds 
                maxsnaps value previous snapshot should get deleted from database but remain 
                on secondary storage and when the value exceeds snapshot.delta.max the 
                snapshot should get deleted from secondary storage
        """

        if self.hypervisor.lower() != "xenserver":
            self.skipTest("Skip test for hypervisor other than Xenserver")

        # Step 1
        self.testdata["recurring_snapshot"]["intervaltype"] = 'HOURLY'
        self.testdata["recurring_snapshot"]["schedule"] = 1
        recurring_snapshot_root = SnapshotPolicy.create(
            self.apiclient,
            self.volume[0].id,
            self.testdata["recurring_snapshot"]
        )

        Configurations.update(self.apiclient,
                              name="snapshot.delta.max",
                              value="3"
                              )

        list_snapshots_policy = list_snapshot_policy(
            self.apiclient,
            id=recurring_snapshot_root.id,
            volumeid=self.volume[0].id
        )
        list_validation = validateList(list_snapshots_policy)

        self.assertEqual(
            list_validation[0],
            PASS,
            "snapshot list validation failed due to %s" %
            list_validation[2])

        timeout = self.testdata["timeout"]
        while True:
            snapshots = list_snapshots(
                self.apiclient,
                volumeid=self.volume[0].id,
                intervaltype=self.testdata[
                    "recurring_snapshot"]["intervaltype"],
                snapshottype='RECURRING',
                listall=True
            )

            if isinstance(snapshots, list):
                break

            elif timeout == 0:
                raise Exception("List snapshots API call failed.")

        time.sleep(3600 * 2)

        snapshots_2 = list_snapshots(
            self.apiclient,
            volumeid=self.volume[0].id,
            intervaltype=self.testdata["recurring_snapshot"]["intervaltype"],
            snapshottype='RECURRING',
            listall=True
        )

        self.assertTrue(snapshots[0] not in snapshots_2)

        for snapshot in snapshots_2:
            snapshots.append(snapshot)

        time.sleep(360)
        self.assertEqual(
            self.dbclient.execute(
                "select status  from snapshots where uuid='%s'" %
                snapshots[0].id)[0][0],
            "Destroyed"
        )

        self.assertTrue(
            is_snapshot_on_nfs(
                self.apiclient,
                self.dbclient,
                self.config,
                self.zone.id,
                snapshots[0].id))

        time.sleep(3600)

        snapshots_3 = list_snapshots(
            self.apiclient,
            volumeid=self.volume[0].id,
            intervaltype=self.testdata["recurring_snapshot"]["intervaltype"],
            snapshottype='RECURRING',
            listall=True
        )

        self.assertTrue(snapshots[1] not in snapshots_3)
        snapshots.append(snapshots_3[1])
        time.sleep(180)

        self.assertEqual(
            self.dbclient.execute(
                "select status  from snapshots where uuid='%s'" %
                snapshots[1].id)[0][0],
            "Destroyed"
        )

        for snapshot in [snapshots[0], snapshots[1]]:
            self.assertTrue(
                is_snapshot_on_nfs(
                    self.apiclient,
                    self.dbclient,
                    self.config,
                    self.zone.id,
                    snapshot.id))

        time.sleep(3600)

        snapshots_4 = list_snapshots(
            self.apiclient,
            volumeid=self.volume[0].id,
            intervaltype=self.testdata["recurring_snapshot"]["intervaltype"],
            snapshottype='RECURRING',
            listall=True
        )

        self.assertTrue(snapshots[2] not in snapshots_4)

        snapshots.append(snapshots_4[1])
        time.sleep(180)

        self.assertEqual(
            self.dbclient.execute(
                "select status  from snapshots where uuid='%s'" %
                snapshots[2].id)[0][0],
            "Destroyed"
        )

        for snapshot in [snapshots[0], snapshots[1], snapshots[2]]:
            self.assertFalse(
                is_snapshot_on_nfs(
                    self.apiclient,
                    self.dbclient,
                    self.config,
                    self.zone.id,
                    snapshot.id))

        return
def addLdapConfiguration1(cls, ldapConfiguration):
    """
            :param ldapConfiguration
            """
    cls.chkConfig = checkLdapConfiguration(cls, ldapConfiguration)
    if not cls.chkConfig:
        return 0

    # Setup Global settings
    Configurations.update(
        cls.apiClient,
        name="ldap.basedn",
        value=ldapConfiguration['basedn']
    )
    Configurations.update(
        cls.apiClient,
        name="ldap.bind.password",
        value=ldapConfiguration['bindpassword']
    )
    Configurations.update(
        cls.apiClient,
        name="ldap.bind.principal",
        value=ldapConfiguration['principal']
    )
    Configurations.update(
        cls.apiClient,
        name="ldap.email.attribute",
        value=ldapConfiguration['emailAttribute']
    )
    Configurations.update(
        cls.apiClient,
        name="ldap.user.object",
        value=ldapConfiguration['userObject']
    )
    Configurations.update(
        cls.apiClient,
        name="ldap.username.attribute",
        value=ldapConfiguration['usernameAttribute']
    )
    Configurations.update(
        cls.apiClient,
        name="ldap.nested.groups.enable",
        value="true"
    )

    ldapServer = addLdapConfiguration.addLdapConfigurationCmd()
    ldapServer.hostname = ldapConfiguration['hostname']
    ldapServer.port = ldapConfiguration['port']

    cls.debug("calling addLdapConfiguration API command")
    try:
        cls.apiClient.addLdapConfiguration(ldapServer)
        cls.debug("addLdapConfiguration was successful")
        return 1
    except Exception as e:
        cls.debug(
            "addLdapConfiguration failed %s Check the Passed passed"
            " ldap attributes" %
            e)
        cls.reason = "addLdapConfiguration failed %s Check the Passed " \
                     "passed ldap attributes" % e
        raise Exception(
            "addLdapConfiguration failed %s Check the Passed passed"
            " ldap attributes" %
            e)
        return 1
Exemple #50
0
 def updateConfigurAndRestart(self, name, value):
     Configurations.update(self.apiclient, name, value)
     self.RestartServers()
     time.sleep(self.services["sleep"])
    def test_02_concurrent_snapshots_configuration(self):
        """Concurrent Snapshots
            1. Verify that CreateSnapshot command fails when it
                takes more time than job.expire.minute
            2. Verify that snapshot creation fails if CreateSnapshot command
                takes more time than concurrent.snapshots.threshold.perhost
            3. Check the event generation when snapshot creation
                fails if CreateSnapshot takes more time than
                concurrent.snapshots.threshold.perhost

        """

        # Step 1
        if not self.testdata["configurableData"][
                "restartManagementServerThroughTestCase"]:
            self.skipTest(
                "Skip test if restartManagementServerThroughTestCase \
                        is not provided")

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")
        orig_expire = configs[0].value

        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="2"
                              )

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value="1"
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        try:
            thread_pool = []
            for i in range(4):
                create_snapshot_thread_1 = Thread(
                    target=CreateSnapshot,
                    args=(
                        self,
                        self.root_pool[i],
                        False))
                thread_pool.append(create_snapshot_thread_1)

            for thread in thread_pool:
                thread.start()

            for thread in thread_pool:
                thread.join()

        except Exception as e:
            raise Exception(
                "Warning: Exception unable to start thread : %s" %
                e)

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value=orig_expire
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        # Step 2
        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value="1"
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], False)

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value=orig_expire
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        # Step 3
        configs = Configurations.list(
            self.apiclient,
            name="job.cancel.threshold.minutes")
        orig_cancel = configs[0].value

        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value="1"
                              )

        self.RestartServer()
        time.sleep(120)

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], False)

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value=orig_cancel
                              )

        self.RestartServer()
        time.sleep(120)
        # Step 4
        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        self.RestartServer()
        time.sleep(120)

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value="1"
                              )

        self.RestartServer()
        time.sleep(120)

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], True)

        return