Example #1
0
    def test_09_expunge_vm(self):
        """Test destroy(expunge) Virtual Machine
        """
        # Validate the following
        # 1. listVM command should NOT  return this VM any more.

        self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)

        cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
        cmd.id = self.small_virtual_machine.id
        self.apiclient.destroyVirtualMachine(cmd)

        config = Configurations.list(self.apiclient, name="expunge.delay")

        expunge_delay = int(config[0].value)
        time.sleep(expunge_delay * 2)

        # VM should be destroyed unless expunge thread hasn't run
        # Wait for two cycles of the expunge thread
        config = Configurations.list(self.apiclient, name="expunge.interval")
        expunge_cycle = int(config[0].value)
        wait_time = expunge_cycle * 2
        while wait_time >= 0:
            list_vm_response = VirtualMachine.list(self.apiclient, id=self.small_virtual_machine.id)
            if list_vm_response:
                time.sleep(expunge_cycle)
                wait_time = wait_time - expunge_cycle
            else:
                break

        self.debug("listVirtualMachines response: %s" % list_vm_response)

        self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response")
        return
    def test_04_rvpc_network_garbage_collector_nics(self):
        """ Create a redundant VPC with 1 Tier, 1 VM, 1 ACL, 1 PF and test Network GC Nics"""
        self.logger.debug("Starting test_04_rvpc_network_garbage_collector_nics")
        self.query_routers()
        self.networks.append(self.create_network(self.services["network_offering"], "10.1.1.1", nr_vms=1))
        self.check_routers_state()
        self.add_nat_rules()
        self.do_vpc_test(False)

        self.stop_vm()

        gc_wait = Configurations.list(self.apiclient, name="network.gc.wait")
        gc_interval = Configurations.list(self.apiclient, name="network.gc.interval")
        
        self.logger.debug("network.gc.wait is ==> %s" % gc_wait)
        self.logger.debug("network.gc.interval is ==> %s" % gc_wait)

        total_sleep = 120
        if gc_wait and gc_interval:
            total_sleep = int(gc_wait[0].value) + int(gc_interval[0].value)
        else:
            self.logger.debug("Could not retrieve the keys 'network.gc.interval' and 'network.gc.wait'. Sleeping for 2 minutes.")

        time.sleep(total_sleep * 3)

        self.check_routers_interface(interface_to_check="eth2", expected_exists=False)
        self.start_vm()
        self.check_routers_state(status_to_check="MASTER")
        self.check_routers_interface(interface_to_check="eth2", expected_exists=True)
 def test_03_concurrent_snapshot_global_value_assignment(self):
     """ Test verifies that exception is raised if string value is assigned to
          concurrent.snapshots.threshold.perhost parameter.
     """
     with self.assertRaises(Exception):
        Configurations.update(
          self.apiclient,
          "concurrent.snapshots.threshold.perhost",
          "String"
        )
     return
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
Example #5
0
 def rollback_nested_configurations(self, rollback_nv, rollback_nv_per_vm):
     if rollback_nv:
         config_update = Configurations.update(
             self.apiclient, "vmware.nested.virtualization", "false")
         self.logger.debug(
             "Reverted global setting vmware.nested.virtualization back to false"
         )
     if rollback_nv_per_vm:
         config_update = Configurations.update(
             self.apiclient, "vmware.nested.virtualization.perVM", "false")
         self.logger.debug(
             "Reverted global setting vmware.nested.virtualization.perVM back to false"
         )
Example #6
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
Example #7
0
 def updateVmwareSettings(cls, tearDown):
     value = "false"
     if not tearDown:
         value = "true"
     if cls.hypervisor.lower() == 'vmware':
         Configurations.update(cls.apiclient, "vmware.create.full.clone",
                               value)
         allStoragePools = StoragePool.list(cls.apiclient)
         for pool in allStoragePools:
             Configurations.update(cls.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value=value)
 def tearDownClass(cls):
     try:
         # Restore CKS enabled
         if cls.initial_configuration_cks_enabled not in ["true", True]:
             cls.debug("Restoring Kubernetes Service enabled value")
             Configurations.update(cls.apiclient,
                                   "cloud.kubernetes.service.enabled",
                                   "false")
             cls.restartServer()
         cleanup_resources(cls.apiclient, cls._cleanup)
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
 def update_NuageVspGlobalDomainTemplate(self, value):
     self.debug("Updating global setting nuagevsp.vpc.domaintemplate.name "
                "with value - %s" % value)
     self.user_apikey = self.api_client.connection.apiKey
     self.user_secretkey = self.api_client.connection.securityKey
     self.api_client.connection.apiKey = self.default_apikey
     self.api_client.connection.securityKey = self.default_secretkey
     Configurations.update(self.api_client,
                           name="nuagevsp.vpc.domaintemplate.name",
                           value=value)
     self.api_client.connection.apiKey = self.user_apikey
     self.api_client.connection.securityKey = self.user_secretkey
     self.debug("Successfully updated global setting "
                "nuagevsp.vpc.domaintemplate.name with value - %s" % value)
Example #10
0
 def updateVmwareCreateFullCloneSetting(self, tearDown):
     if not tearDown:
         Configurations.update(self.apiclient,
                               "vmware.create.full.clone",
                               "true")
         allStoragePools = StoragePool.list(
             self.apiclient
         )
         for pool in allStoragePools:
             Configurations.update(self.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value="true")
     else:
         Configurations.update(self.apiclient,
                               "vmware.create.full.clone",
                               self.fullClone[0].value.lower())
         allStoragePools = StoragePool.list(
             self.apiclient
         )
         for pool in allStoragePools:
             Configurations.update(self.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value=self.storeCloneValues[pool.id])
Example #11
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         #Clean up, terminate the created network offerings
         #cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(self.apiclient,
                                        name='network.gc.interval')
         wait = Configurations.list(self.apiclient, name='network.gc.wait')
         # Sleep to ensure that all resources are deleted
         time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
 def update_NuageVspGlobalDomainTemplate(self, value):
     self.debug("Updating global setting nuagevsp.vpc.domaintemplate.name "
                "with value - %s" % value)
     self.user_apikey = self.api_client.connection.apiKey
     self.user_secretkey = self.api_client.connection.securityKey
     self.api_client.connection.apiKey = self.default_apikey
     self.api_client.connection.securityKey = self.default_secretkey
     Configurations.update(self.api_client,
                           name="nuagevsp.vpc.domaintemplate.name",
                           value=value)
     self.api_client.connection.apiKey = self.user_apikey
     self.api_client.connection.securityKey = self.user_secretkey
     self.debug("Successfully updated global setting "
                "nuagevsp.vpc.domaintemplate.name with value - %s" % value)
Example #13
0
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
Example #14
0
    def test_01_cluster_settings(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
         verify the change """
        listHost = Host.list(self.apiclient,
                             id=self.deployVmResponse.hostid
                             )
        self.assertEqual(
            validateList(listHost)[0],
            PASS,
            "check list host response for host id %s" %
            self.deployVmResponse.hostid)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=3)

        list_cluster = Cluster.list(self.apiclient,
                                    id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster)[0],
            PASS,
            "check list cluster response for cluster id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster[0].cpuovercommitratio),
                         3,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster[0].memoryovercommitratio),
                         2,
                         "check memory overcommit value at cluster level")

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)
        list_cluster1 = Cluster.list(self.apiclient,
                                     id=listHost[0].clusterid)
        self.assertEqual(
            validateList(list_cluster1)[0],
            PASS,
            "check the list cluster response for id %s" %
            listHost[0].clusterid)
        self.assertEqual(int(list_cluster1[0].cpuovercommitratio),
                         1,
                         "check the cpu overcommit value at cluster level ")

        self.assertEqual(int(list_cluster1[0].memoryovercommitratio),
                         1,
                         "check memory overcommit value at cluster level")
Example #15
0
    def test_13_snapshot_to_volume_from_secondary(self):
        ''' Try to create volume from snapshot which is deleted from primary and exists on secondary storage
        '''
        virtual_machine = VirtualMachine.create(self.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=self.zone.id,
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            hypervisor=self.hypervisor,
            rootdisksize=10
            )
        volume1 = list_volumes(
            self.apiclient,
            virtualmachineid = self.virtual_machine.id,
            type = "ROOT",
            listall = True
            )

        Configurations.update(self.apiclient,
            name = "sp.bypass.secondary.storage",
            value = "false")

        snapshot = Snapshot.create(
            self.apiclient,
            volume_id = volume1[0].id,
            account=self.account.name,
            domainid=self.account.domainid,
            )

        snapshot_name = self.getSnapshotName(snapshot)
        self.spapi.snapshotDelete(snapshotName = snapshot_name)


        self.assertIsNotNone(snapshot, "Could not create snapshot")
        self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")

        volume = self.helper.create_custom_disk(
            self.apiclient,
            {"diskname":"StorPoolDisk" },
            account=self.account.name,
            domainid=self.account.domainid,
            zoneid = self.zone.id,
            snapshotid = snapshot.id
            )

        self.assertIsNotNone(volume, "Could not create volume from snapshot")
        self.assertIsInstance(volume, Volume, "Volume is not instance of Volume")
Example #16
0
    def test_es_1223_apply_algo_to_pods(self):
        """
        @Desc: Test VM creation while "apply.allocation.algorithm.to.pods" is
        set to true
        @Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
        @Steps:
        Step1: Set global configuration "apply.allocation.algorithm.to.pods"
        to true
        Step2: Restart management server
        Step3: Verifying that VM creation is successful
        """
        # Step1:  set global configuration
        # "apply.allocation.algorithm.to.pods" to true
        # Configurations.update(self.apiClient,
        # "apply.allocation.algorithm.to.pods", "true")
        # TODO: restart management server
        if not is_config_suitable(apiclient=self.apiClient,
                                  name='apply.allocation.algorithm.to.pods',
                                  value='true'):
            self.skipTest('apply.allocation.algorithm.to.pods '
                          'should be true. skipping')
        # TODO:Step2: Restart management server
        self.services["virtual_machine"]["zoneid"] = self.zone.id
        self.services["virtual_machine"]["template"] = self.template.id
        # Step3: Verifying that VM creation is successful
        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.services["virtual_machine2"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.cleanup.append(virtual_machine)
        # Verify VM state
        self.assertEqual(
            virtual_machine.state,
            'Running',
            "Check VM state is Running or not"
        )

        # cleanup: set global configuration
        # "apply.allocation.algorithm.to.pods" back to false
        Configurations.update(
            self.apiClient,
            name="apply.allocation.algorithm.to.pods",
            value="false"
        )
        # TODO:cleanup: Restart management server
        return
Example #17
0
    def test_es_1223_apply_algo_to_pods(self):
        """
        @Desc: Test VM creation while "apply.allocation.algorithm.to.pods" is
        set to true
        @Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-4947
        @Steps:
        Step1: Set global configuration "apply.allocation.algorithm.to.pods"
        to true
        Step2: Restart management server
        Step3: Verifying that VM creation is successful
        """
        # Step1:  set global configuration
        # "apply.allocation.algorithm.to.pods" to true
        # Configurations.update(self.apiClient,
        # "apply.allocation.algorithm.to.pods", "true")
        # TODO: restart management server
        if not is_config_suitable(apiclient=self.apiClient,
                                  name='apply.allocation.algorithm.to.pods',
                                  value='true'):
            self.skipTest('apply.allocation.algorithm.to.pods '
                          'should be true. skipping')
        # TODO:Step2: Restart management server
        self.services["virtual_machine"]["zoneid"] = self.zone.id
        self.services["virtual_machine"]["template"] = self.template.id
        # Step3: Verifying that VM creation is successful
        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.services["virtual_machine2"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.cleanup.append(virtual_machine)
        # Verify VM state
        self.assertEqual(
            virtual_machine.state,
            'Running',
            "Check VM state is Running or not"
        )

        # cleanup: set global configuration
        # "apply.allocation.algorithm.to.pods" back to false
        Configurations.update(
            self.apiClient,
            name="apply.allocation.algorithm.to.pods",
            value="false"
        )
        # TODO:cleanup: Restart management server
        return
Example #18
0
    def tearDownClass(cls):
        try:
            cls.apiclient = super(TestPublicIp,
                                  cls).getClsTestClient().getApiClient()

            Configurations.update(cls.apiclient,
                                  name=cls.use_system_ips_config_name,
                                  value=cls.use_system_ips_config_value)
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)

        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)

        return
    def test_vms_with_same_name(self):
        """ Test vm deployment with same name

        # 1. Deploy a VM on with perticular name from account_1
        # 2. Try to deploy another vm with same name from account_2
        # 3. Verify that second VM deployment fails

        """
        # Step 1
        # Create VM on cluster wide
        configs = Configurations.list(
            self.apiclient,
            name="vm.instancename.flag")
        orig_value = configs[0].value

        if orig_value == "false":
            Configurations.update(self.apiclient,
                                  name="vm.instancename.flag",
                                  value="true"
                                  )

            # Restart management server
            self.RestartServer()
            time.sleep(120)

        self.testdata["small"]["displayname"]="TestName"
        self.testdata["small"]["name"]="TestName"
        VirtualMachine.create(
            self.userapiclient_1,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account_1.name,
            domainid=self.account_1.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient_2,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account_2.name,
                domainid=self.account_2.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
        return
Example #20
0
    def test_vm_sync(self):
        """Test VM Sync

        # Validate the following:
        # vm1 should be running, vm2 should be stopped as power report says PowerOff, vm3 should be stopped as missing from power report
        """

        #wait for vmsync to happen
        ping_interval = Configurations.list(self.apiclient,
                                            name="ping.interval")
        total_duration = int(float(ping_interval[0].value) * 3.2)
        time.sleep(total_duration)

        list_vms = VirtualMachine.list(
            self.apiclient,
            ids=[self.vm1.id, self.vm2.id, self.vm3.id],
            listAll=True)
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 3,
                        msg="List VM response is empty")
        for vm in list_vms:
            if vm.id == self.vm1.id:
                self.assertTrue(
                    vm.state == "Running",
                    msg="VM {0} is expected to be in running state".format(
                        vm.name))
            elif vm.id == self.vm2.id or vm.id == self.vm3.id:
                self.assertTrue(
                    vm.state == "Stopped",
                    msg="VM {0} is expected to be in stopped state".format(
                        vm.name))
Example #21
0
    def setUpClass(cls):
        # We want to fail quicker if it's failure
        socket.setdefaulttimeout(60)

        cls.testClient = super(TestVPCRedundancy, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.hypervisor = cls.testClient.getHypervisorInfo()

        cls.template = get_test_template(cls.api_client, cls.zone.id,
                                         cls.hypervisor)
        if cls.template == FAILED:
            assert False, "get_test_template() failed to return template"

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering = ServiceOffering.create(
            cls.api_client, cls.services["service_offering"])
        cls._cleanup = [cls.service_offering]

        cls.logger = logging.getLogger('TestVPCRedundancy')
        cls.stream_handler = logging.StreamHandler()
        cls.logger.setLevel(logging.DEBUG)
        cls.logger.addHandler(cls.stream_handler)

        cls.advert_int = int(
            Configurations.list(
                cls.api_client,
                name="router.redundant.vrrp.interval")[0].value)
Example #22
0
    def test_07_snapshot_to_template_bypass_secondary(self):
        ''' Create template from snapshot bypassing secondary storage
        '''
        ##cls.virtual_machine
        volume = list_volumes(self.apiclient,
                              virtualmachineid=self.virtual_machine.id)
        snapshot = Snapshot.create(self.apiclient, volume_id=volume[0].id)

        backup_config = list_configurations(self.apiclient,
                                            name="sp.bypass.secondary.storage")
        if (backup_config[0].value == "false"):
            backup_config = Configurations.update(
                self.apiclient,
                name="sp.bypass.secondary.storage",
                value="true")
        self.assertIsNotNone(snapshot, "Could not create snapshot")
        self.assertIsInstance(snapshot, Snapshot,
                              "Snapshot is not an instance of Snapshot")

        template = self.create_template_from_snapshot(self.apiclient,
                                                      self.services,
                                                      snapshotid=snapshot.id)
        virtual_machine = VirtualMachine.create(
            self.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)},
            zoneid=self.zone.id,
            templateid=template.id,
            serviceofferingid=self.service_offering.id,
            hypervisor=self.hypervisor,
            rootdisksize=10)
        ssh_client = virtual_machine.get_ssh_client()
        self.assertIsNotNone(template, "Template is None")
        self.assertIsInstance(template, Template,
                              "Template is instance of template")
        self._cleanup.append(snapshot)
        self._cleanup.append(template)
Example #23
0
    def execute_internallb_haproxy_tests(self, vpc_offering):

        settings = self.get_lb_stats_settings()

        dummy_port = 90
        network_gw = "10.1.2.1"
        default_visibility = "global"

        # Update global setting if it is not set to our test default
        if settings["visibility"] != default_visibility:
            config_update = Configurations.update(
                self.apiclient,
                "network.loadbalancer.haproxy.stats.visibility",
                default_visibility)
            self.logger.debug(
                "Updated global setting stats haproxy.stats.visibility to %s" %
                (default_visibility))
            settings = self.get_lb_stats_settings()

        # Create and enable network offering
        network_offering_intlb = self.create_and_enable_network_serviceoffering(
            self.services["network_offering_internal_lb"])

        # Create VPC
        vpc = self.create_vpc(vpc_offering)

        # Create network tier with internal lb service enabled
        network_internal_lb = self.create_network_tier("intlb_test02", vpc.id,
                                                       network_gw,
                                                       network_offering_intlb)

        # Create 1 lb vm in internal lb network tier
        vm = self.deployvm_in_network(vpc, network_internal_lb.id)

        # Acquire 1 public ip and attach to the internal lb network tier
        public_ip = self.acquire_publicip(vpc, network_internal_lb)

        # Create an internal loadbalancer in the internal lb network tier
        applb = self.create_internal_loadbalancer(dummy_port, dummy_port,
                                                  "leastconn",
                                                  network_internal_lb.id)

        # Assign the 1 VM to the Internal Load Balancer
        self.logger.debug("Assigning virtual machines to LB: %s" % applb.id)
        try:
            applb.assign(self.apiclient, vms=[vm])
        except Exception as e:
            self.fail(
                "Failed to assign virtual machine(s) to loadbalancer: %s" % e)

        # Create nat rule to access client vm
        nat_rule = self.create_natrule(vpc, vm, "22", "22", public_ip,
                                       network_internal_lb)

        # Verify access to and the contents of the admin stats page on the
        # private address via a vm in the internal lb tier
        stats = self.verify_lb_stats(
            applb.sourceipaddress,
            self.get_ssh_client(vm, nat_rule.ipaddress, 10), settings)
        self.assertTrue(stats, "Failed to verify LB HAProxy stats")
    def test_01_VPN_user_limit(self):
        """VPN remote access user limit tests"""

        # Validate the following
        # prerequisite: change management configuration setting of
        #    remote.access.vpn.user.limit
        # 1. provision more users than is set in the limit
        #    Provisioning of users after the limit should failProvisioning of
        #    users after the limit should fail

        self.debug("Fetching the limit for remote access VPN users")
        configs = Configurations.list(
                                     self.apiclient,
                                     name='remote.access.vpn.user.limit',
                                     listall=True)
        self.assertEqual(isinstance(configs, list),
                         True,
                         "List configs should return a valid response")

        limit = int(configs[0].value)

        self.debug("Enabling the VPN access for IP: %s" %
                                            self.public_ip.ipaddress)

        self.create_VPN(self.public_ip)
        self.debug("Creating %s VPN users" % limit)
        for x in range(limit):
            self.create_VPN_Users()

        self.debug("Adding another user exceeding limit for remote VPN users")
        with self.assertRaises(Exception):
            self.create_VPN_Users()
        self.debug("Limit exceeded exception raised!")
        return
Example #25
0
    def setUpClass(cls):
        # We want to fail quicker if it's failure
        socket.setdefaulttimeout(60)

        cls.testClient = super(TestVPCRedundancy, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.hypervisor = cls.testClient.getHypervisorInfo()

        cls.template = get_test_template(cls.api_client, cls.zone.id, cls.hypervisor)
        if cls.template == FAILED:
            assert False, "get_test_template() failed to return template"

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering"])
        cls._cleanup = [cls.service_offering]

        cls.logger = logging.getLogger('TestVPCRedundancy')
        cls.stream_handler = logging.StreamHandler()
        cls.logger.setLevel(logging.DEBUG)
        cls.logger.addHandler(cls.stream_handler)

        cls.advert_int = int(Configurations.list(cls.api_client, name="router.redundant.vrrp.interval")[0].value)
    def setUpClass(cls):
        testClient = super(TestVerifyEventsTable, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()

        cls.hypervisor = cls.testClient.getHypervisorInfo()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])

        cls._cleanup = []

        try:

            cls.unsupportedHypervisor = False
            if cls.hypervisor.lower() in ['hyperv', 'lxc', 'kvm']:
                if cls.hypervisor.lower() == 'kvm':
                    configs = Configurations.list(
                        cls.apiclient,
                        name='kvm.snapshot.enabled'
                    )

                    if configs[0].value == "false":
                        cls.unsupportedHypervisor = True
                else:
                    cls.unsupportedHypervisor = True

                return
            # Create an account
            cls.account = Account.create(
                cls.apiclient,
                cls.testdata["account"],
                domainid=cls.domain.id
            )

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name,
                DomainName=cls.account.domain
            )
            # Create Service offering
            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
            )

            cls._cleanup = [
                cls.account,
                cls.service_offering,
            ]
        except Exception as e:
            cls.tearDownClass()
            raise e
        return
    def setUpClass(cls):
        cls.testClient = super(TestHostHighAvailability,
                               cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(cls.api_client, cls.zone.id,
                                    cls.services["ostype"])
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ['lxc']:
            raise unittest.SkipTest(
                "Template creation from root volume is not supported in LXC")

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client,
                                  clusterid=cluster.id,
                                  type="Routing")
            if len(cls.hosts) >= 3:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 3 hosts found")

        configs = Configurations.list(cls.api_client, name='ha.tag')

        assert isinstance(configs, list), "Config list not\
                retrieved for ha.tag"

        if configs[0].value != "ha":
            raise unittest.SkipTest("Please set the global config\
                    value for ha.tag as 'ha'")

        Host.update(cls.api_client, id=cls.hosts[2].id, hosttags="ha")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_with_ha"],
            offerha=True)

        cls.service_offering_without_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_without_ha"],
            offerha=False)

        cls._cleanup = [
            cls.service_offering_with_ha,
            cls.service_offering_without_ha,
        ]
        return
Example #28
0
    def test_vms_with_same_name(self):
        """ Test vm deployment with same name

        # 1. Deploy a VM on with perticular name from account_1
        # 2. Try to deploy another vm with same name from account_2
        # 3. Verify that second VM deployment fails

        """
        # Step 1
        # Create VM on cluster wide
        configs = Configurations.list(self.apiclient,
                                      name="vm.instancename.flag")
        orig_value = configs[0].value

        if orig_value == "false":
            Configurations.update(self.apiclient,
                                  name="vm.instancename.flag",
                                  value="true")

            # Restart management server
            self.RestartServer()
            time.sleep(120)

        self.testdata["small"]["displayname"] = "TestName"
        self.testdata["small"]["name"] = "TestName"
        VirtualMachine.create(
            self.userapiclient_1,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account_1.name,
            domainid=self.account_1.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient_2,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account_2.name,
                domainid=self.account_2.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
        return
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.interval'
                                 )
         wait = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.wait'
                                 )
         #time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Example #30
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         cleanup_resources(self.apiclient, self._cleanup)
         interval = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.interval'
                                 )
         wait = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.wait'
                                 )
         time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Example #31
0
def is_config_suitable(apiclient, name, value):
    """
    Ensure if the deployment has the expected `value` for the global setting `name'
    @return: true if value is set, else false
    """
    configs = Configurations.list(apiclient, name=name)
    assert configs is not None and isinstance(configs, list) and len(configs) > 0
    return configs[0].value == value
Example #32
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            settings["username"], settings["password"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.auth")[0].value.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Example #33
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = { }
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            settings["username"], settings["password"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.auth")[0].value.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Example #34
0
    def test_01_create_ipv6_network_offering(self):
        """Test to create network offering

        # Validate the following:
        # 1. createNetworkOffering should return valid info for new offering
        # 2. The Cloud Database contains the valid information
        """
        Configurations.update(self.apiclient,
            ipv6_offering_config_name,
            "true")
        ipv6_service = self.services["network_offering"]
        ipv6_service["internetprotocol"] = "dualstack"
        network_offering = NetworkOffering.create(
            self.apiclient,
            ipv6_service
        )
        self.cleanup.append(network_offering)

        self.debug("Created Network offering with ID: %s" % network_offering.id)

        list_network_off_response = NetworkOffering.list(self.apiclient,
            id=network_offering.id)
        self.assertEqual(
            isinstance(list_network_off_response, list),
            True,
            "Check list response returns a valid list"
        )
        self.assertNotEqual(
            len(list_network_off_response),
            0,
            "Check Network offering is created"
        )
        network_off_response = list_network_off_response[0]

        self.assertEqual(
            network_off_response.id,
            network_offering.id,
            "Check server id in listNetworkOfferings"
        )
        self.assertEqual(
            network_off_response.internetprotocol.lower(),
            ipv6_service["internetprotocol"].lower(),
            "Check internetprotocol in listNetworkOfferings"
        )
        return
Example #35
0
 def setUpClass(cls):
     testClient = super(TestCreateIpv6NetworkVpcOffering, cls).getClsTestClient()
     cls.apiclient = testClient.getApiClient()
     cls.services = testClient.getParsedTestDataConfig()
     cls.initial_ipv6_offering_enabled = Configurations.list(
         cls.apiclient,
         name=ipv6_offering_config_name)[0].value
     cls._cleanup = []
     return
Example #36
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used
            cleanup_resources(cls.api_client, cls._cleanup)

            # Restore original backup framework values values
            if cls.backup_enabled == "false":
                Configurations.update(cls.api_client,
                                      'backup.framework.enabled',
                                      value=cls.backup_enabled,
                                      zoneid=cls.zone.id)
            if cls.backup_provider != "dummy":
                Configurations.update(cls.api_client,
                                      'backup.framework.provider.plugin',
                                      value=cls.backup_provider,
                                      zoneid=cls.zone.id)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
Example #37
0
def is_config_suitable(apiclient, name, value):
    """
    Ensure if the deployment has the expected `value` for the global setting `name'
    @return: true if value is set, else false
    """
    configs = Configurations.list(apiclient, name=name)
    assert (configs is not None and isinstance(configs, list)
            and len(configs) > 0)
    return configs[0].value == value
Example #38
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         #Clean up, terminate the created network offerings
         #cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.interval'
                                 )
         wait = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.wait'
                                 )
         # Sleep to ensure that all resources are deleted
         time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Example #39
0
    def test_01_test_settings_for_domain(self):
        """
        1. Get the default value for the setting in domain scope
        2. Change the default value to new value
        3. Make sure updated value is same as new value
        4. Reset the config value
        5. Make sure that current value is same as default value
        :return:
        """
        config_name="ldap.basedn"
        #1. Get default value
        configs = Configurations.list(
            self.apiclient,
            name=config_name
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        orig_value = str(configs[0].value)
        new_value = "testing"

        #2. Update to new value
        Configurations.update(
            self.apiclient,
            name=config_name,
            value=new_value,
            domainid=self.domain.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        #3. validate they are same
        self.assertEqual(new_value,
                         str(configs[0].value),
                         "Failed to set new config value")

        #4. Reset the value
        Configurations.reset(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )

        #5. Make sure its same as original value
        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        self.assertEqual(orig_value,
                         str(configs[0].value),
                         "Failed to reset the value")
    def setUpClass(cls):
        cls.testClient = super(TestKubernetesSupportedVersion, cls).getClsTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
        cls.kubernetes_version_iso_url = 'http://download.cloudstack.org/cks/setup-1.16.3.iso'

        cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient,
                                                                    name="cloud.kubernetes.service.enabled")[0].value
        if cls.initial_configuration_cks_enabled not in ["true", True]:
            cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server")
            Configurations.update(cls.apiclient,
                                  "cloud.kubernetes.service.enabled",
                                  "true")
            cls.restartServer()

        cls._cleanup = []
        return
    def test_09_expunge_vm(self):
        """Test destroy(expunge) Virtual Machine
        """
        # Validate the following
        # 1. listVM command should NOT  return this VM any more.

        self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)

        cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
        cmd.id = self.small_virtual_machine.id
        self.apiclient.destroyVirtualMachine(cmd)

        config = Configurations.list(
            self.apiclient,
            name='expunge.delay'
        )

        expunge_delay = int(config[0].value)
        time.sleep(expunge_delay * 2)

        # VM should be destroyed unless expunge thread hasn't run
        # Wait for two cycles of the expunge thread
        config = Configurations.list(
            self.apiclient,
            name='expunge.interval'
        )
        expunge_cycle = int(config[0].value)
        wait_time = expunge_cycle * 4
        while wait_time >= 0:
            list_vm_response = VirtualMachine.list(
                self.apiclient,
                id=self.small_virtual_machine.id
            )
            if not list_vm_response:
                break
            self.debug("Waiting for VM to expunge")
            time.sleep(expunge_cycle)
            wait_time = wait_time - expunge_cycle

        self.debug("listVirtualMachines response: %s" % list_vm_response)

        self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response")
        return
Example #42
0
    def test_03_cluste_capacity_check(self):
        """change cpu/mem.overprovisioning.factor at cluster level and
           verify cluster capacity """

        listHost = Host.list(self.apiclient, id=self.deployVmResponse.hostid)
        self.assertEqual(
            validateList(listHost)[0], PASS,
            "check list host for host id %s" % self.deployVmResponse.hostid)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=1)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=1)

        time.sleep(self.wait_time)

        capacity = Capacities.list(self.apiclient,
                                   clusterid=listHost[0].clusterid)
        self.assertEqual(
            validateList(capacity)[0], PASS,
            "check list capacity response for cluster id %s" %
            listHost[0].clusterid)
        cpu, mem = capacity_parser(capacity)

        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="mem.overprovisioning.factor",
                              value=2)
        Configurations.update(self.apiclient,
                              clusterid=listHost[0].clusterid,
                              name="cpu.overprovisioning.factor",
                              value=2)

        time.sleep(self.wait_time)

        capacity1 = Capacities.list(self.apiclient,
                                    clusterid=listHost[0].clusterid)
        self.assertEqual(
            validateList(capacity1)[0], PASS,
            "check list capacity response for cluster id %s" %
            listHost[0].clusterid)
        cpu1, mem1 = capacity_parser(capacity1)
        self.assertEqual(2 * cpu[0], cpu1[0], "check total capacity ")
        self.assertEqual(2 * cpu[1], cpu1[1], "check capacity used")
        self.assertEqual(cpu[2], cpu1[2], "check capacity % used")

        self.assertEqual(2 * mem[0], mem1[0], "check mem total capacity ")
        self.assertEqual(2 * mem[1], mem1[1], "check mem capacity used")
        self.assertEqual(mem[2], mem1[2], "check mem capacity % used")
    def test_maxAccountNetworks(self):
        """Test Limit number of guest account specific networks
        """

        # Steps for validation
        # 1. Fetch max.account.networks from configurations
        # 2. Create an account. Create account more that max.accout.network
        # 3. Create network should fail

        self.debug("Creating project with '%s' as admin" % self.account.name)
        # Create project as a domain admin
        project = Project.create(self.apiclient,
                                 self.services["project"],
                                 account=self.account.name,
                                 domainid=self.account.domainid)
        # Cleanup created project at end of test
        self.cleanup.append(project)
        self.debug("Created project with domain admin with ID: %s" %
                   project.id)

        config = Configurations.list(self.apiclient,
                                     name='max.project.networks',
                                     listall=True)
        self.assertEqual(
            isinstance(config, list), True,
            "List configurations hsould have max.project.networks")

        config_value = int(config[0].value)
        self.debug("max.project.networks: %s" % config_value)

        for ctr in range(config_value):
            # Creating network using the network offering created
            self.debug("Creating network with network offering: %s" %
                       self.network_offering.id)
            network = Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id)
            self.cleanup.append(network)
            self.debug("Created network with ID: %s" % network.id)
        self.debug("Creating network in account already having networks : %s" %
                   config_value)

        with self.assertRaises(Exception):
            network = Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id)
            self.cleanup.append(network)
        self.debug('Create network failed (as expected)')
        return
Example #44
0
    def test_01_snapshot_to_template(self):
        ''' Create template from snapshot without bypass secondary storage
        '''
        volume = Volume.list(
            self.apiclient,
            virtualmachineid = self.virtual_machine.id,
            type = "ROOT",
            listall = True,
            )

        Configurations.update(self.apiclient,
            name = "sp.bypass.secondary.storage",
            value = "false")
        snapshot = Snapshot.create(
           self.apiclient,
            volume_id = volume[0].id,
            account=self.account.name,
            domainid=self.account.domainid,
            )
        self.assertIsNotNone(snapshot, "Could not create snapshot")
        self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot")

        template = self.helper.create_template_from_snapshot(
            self.apiclient,
            self.services,
            snapshotid = snapshot.id
            )
        virtual_machine = VirtualMachine.create(self.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=self.zone.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            templateid=template.id,
            serviceofferingid=self.service_offering.id,
            hypervisor=self.hypervisor,
            rootdisksize=10
            )
        ssh_client = virtual_machine.get_ssh_client(reconnect=True)

        self.assertIsNotNone(template, "Template is None")
        self.assertIsInstance(template, Template, "Template is instance of template")
        self._cleanup.append(template)
Example #45
0
    def execute_internallb_haproxy_tests(self, vpc_offering):

        settings = self.get_lb_stats_settings()

        dummy_port = 90
        network_gw = "10.1.2.1"
        default_visibility = "global"

        # Update global setting if it is not set to our test default
        if settings["visibility"] != default_visibility:
            config_update = Configurations.update(
                self.apiclient, "network.loadbalancer.haproxy.stats.visibility", default_visibility)
            self.logger.debug(
                "Updated global setting stats haproxy.stats.visibility to %s" % (default_visibility))
            settings = self.get_lb_stats_settings()

        # Create and enable network offering
        network_offering_intlb = self.create_and_enable_network_serviceoffering(
            self.services["network_offering_internal_lb"])

        # Create VPC
        vpc = self.create_vpc(vpc_offering)

        # Create network tier with internal lb service enabled
        network_internal_lb = self.create_network_tier(
            "intlb_test02", vpc.id, network_gw,  network_offering_intlb)

        # Create 1 lb vm in internal lb network tier
        vm = self.deployvm_in_network(vpc, network_internal_lb.id)

        # Acquire 1 public ip and attach to the internal lb network tier
        public_ip = self.acquire_publicip(vpc, network_internal_lb)

        # Create an internal loadbalancer in the internal lb network tier
        applb = self.create_internal_loadbalancer(
            dummy_port, dummy_port, "leastconn", network_internal_lb.id)

        # Assign the 1 VM to the Internal Load Balancer
        self.logger.debug("Assigning virtual machines to LB: %s" % applb.id)
        try:
            applb.assign(self.apiclient, vms=[vm])
        except Exception as e:
            self.fail(
                "Failed to assign virtual machine(s) to loadbalancer: %s" % e)

        # Create nat rule to access client vm
        self.create_natrule(
            vpc, vm, "22", "22", public_ip, network_internal_lb)

        # Verify access to and the contents of the admin stats page on the
        # private address via a vm in the internal lb tier
        stats = self.verify_lb_stats(
            applb.sourceipaddress, self.get_ssh_client(vm, 5), settings)
        self.assertTrue(stats, "Failed to verify LB HAProxy stats")
Example #46
0
    def setUpClass(cls):
        testClient = super(Overcommit, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        # Get Zone,Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient)
        cls.testdata["mode"] = cls.zone.networktype
        cls.testdata["configurableData"]["password"] = "******"
        cls.hypervisor = testClient.getHypervisorInfo()

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])
        cls.testdata["template"]["ostypeid"] = cls.template.ostypeid
        list_conf = Configurations.list(cls.apiclient,
                                        name="capacity.check.period"
                                        )
        cls.wait_time = 5 + int(list_conf[0].value) / 1000
        if cls.template == FAILED:
            cls.fail(
                "get_template() failed to return template with description \
                %s" %
                cls.testdata["ostype"])
        cls._cleanup = []
        try:
            cls.account = Account.create(cls.apiclient,
                                         cls.testdata["account"],
                                         domainid=cls.domain.id
                                         )
            cls._cleanup.append(cls.account)

            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offerings"]["small"])

            cls._cleanup.append(cls.service_offering)

            cls.deployVmResponse = VirtualMachine.create(
                cls.apiclient,
                services=cls.testdata["virtual_machine"],
                accountid=cls.account.name,
                domainid=cls.account.domainid,
                serviceofferingid=cls.service_offering.id,
                templateid=cls.template.id,
                zoneid=cls.zone.id,
            )

        except Exception as e:
            cls.tearDownClass()
            raise e

        return
Example #47
0
    def test_vm_ha(self):
        """Test VM HA

        # Validate the following:
        # VM started on other host in cluster
        """

        #wait for VM to HA
        ping_timeout = Configurations.list(self.apiclient, name="ping.timeout")
        ping_interval = Configurations.list(self.apiclient, name="ping.interval")
        total_duration = int(float(ping_timeout[0].value) * float(ping_interval[0].value))
        time.sleep(total_duration)

        duration = 0
        vm = None
        while duration < total_duration:
            list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
            self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty")
            vm = list_vms[0]
            if vm.hostid != self.virtual_machine.hostid and vm.state == "Running":
                break
            else:
                time.sleep(10)
                duration = duration + 10

        self.assertEqual(
            vm.id,
            self.virtual_machine.id,
            "VM ids do not match")
        self.assertEqual(
            vm.name,
            self.virtual_machine.name,
            "VM names do not match")
        self.assertEqual(
            vm.state,
            "Running",
            msg="VM is not in Running state")
        self.assertNotEqual(
            vm.hostid,
            self.virtual_machine.hostid,
            msg="VM is not started on another host as part of HA")
Example #48
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            # Update global setting network.loadbalancer.haproxy.stats.auth to a known value
            haproxy_auth = "admin:password"
            Configurations.update(self.apiclient, "network.loadbalancer.haproxy.stats.auth", haproxy_auth)
            self.logger.debug(
                "Updated global setting stats network.loadbalancer.haproxy.stats.auth to %s" % (haproxy_auth))
            settings["username"], settings["password"] = haproxy_auth.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                StoragePool.update(cls.api_client, id=cls.storageID,
                                   tags="")
                cls.restartServer()

                #Giving 30 seconds to management to warm-up,
                #Experienced failures when trying to deploy a VM exactly when management came up
                time.sleep(30)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def test_05_rvpc_multi_tiers(self):
        """ Create a redundant VPC with 3 Tiers, 3 VMs, 3 PF rules"""
        self.logger.debug("Starting test_05_rvpc_multi_tiers")
        self.query_routers()

        network_to_delete_1 = self.create_network(self.services["network_offering"], "10.1.1.1", nr_vms=1, mark_net_cleanup=False)
        self.networks.append(network_to_delete_1)
        self.networks.append(self.create_network(self.services["network_offering_no_lb"], "10.1.2.1", nr_vms=1))
        network_to_delete_2 = self.create_network(self.services["network_offering_no_lb"], "10.1.3.1", nr_vms=1, mark_net_cleanup=False)
        self.networks.append(network_to_delete_2)
        
        self.check_routers_state()
        self.add_nat_rules()
        self.do_vpc_test(False)

        self.destroy_vm(network_to_delete_1)
        network_to_delete_1.get_net().delete(self.apiclient)
        self.networks.remove(network_to_delete_1)

        vrrp_interval = Configurations.list(self.apiclient, name="router.redundant.vrrp.interval")
        
        self.logger.debug("router.redundant.vrrp.interval is ==> %s" % vrrp_interval)

        total_sleep = 10
        if vrrp_interval:
            total_sleep = int(vrrp_interval[0].value) * 4
        else:
            self.logger.debug("Could not retrieve the key 'router.redundant.vrrp.interval'. Sleeping for 10 seconds.")
            
        '''
        Sleep (router.redundant.vrrp.interval * 4) seconds here because since we are removing the first tier (NIC) the VRRP will have to reconfigure the interface it uses.
        Due to the configuration changes, it will start a new election and it might take up to 4 seconds, because each router has an
        advertisement interval of 2 seconds.
        '''
        time.sleep(total_sleep)
        self.check_routers_state(status_to_check="MASTER")
        self.do_vpc_test(False)

        self.destroy_vm(network_to_delete_2)
        network_to_delete_2.get_net().delete(self.apiclient)
        self.networks.remove(network_to_delete_2)

        '''
        Let's be sure and sleep for 'total_sleep' seconds because removing/adding an interface will restart keepalived.
        It restarts it because the keepalived configuration file changes in order to have the virtual_ipaddress section updated. 
        '''
        time.sleep(total_sleep)
        self.check_routers_state(status_to_check="MASTER")
        self.do_vpc_test(False)
    def test_maxAccountNetworks(self):
        """Test Limit number of guest account specific networks
        """

        # Steps for validation
        # 1. Fetch max.account.networks from configurations
        # 2. Create an account. Create account more that max.accout.network
        # 3. Create network should fail

        self.debug("Creating project with '%s' as admin" % self.account.name)
        # Create project as a domain admin
        project = Project.create(
            self.apiclient, self.services["project"], account=self.account.name, domainid=self.account.domainid
        )
        # Cleanup created project at end of test
        self.cleanup.append(project)
        self.debug("Created project with domain admin with ID: %s" % project.id)

        config = Configurations.list(self.apiclient, name="max.project.networks", listall=True)
        self.assertEqual(isinstance(config, list), True, "List configurations hsould have max.project.networks")

        config_value = int(config[0].value)
        self.debug("max.project.networks: %s" % config_value)

        for ctr in range(config_value):
            # Creating network using the network offering created
            self.debug("Creating network with network offering: %s" % self.network_offering.id)
            network = Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id,
            )
            self.debug("Created network with ID: %s" % network.id)
        self.debug("Creating network in account already having networks : %s" % config_value)

        with self.assertRaises(Exception):
            Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id,
            )
        self.debug("Create network failed (as expected)")
        return
Example #52
0
    def test_vm_sync(self):
        """Test VM Sync

        # Validate the following:
        # vm1 should be running, vm2 should be stopped as power report says PowerOff, vm3 should be stopped as missing from power report
        """

        # wait for vmsync to happen
        ping_interval = Configurations.list(self.apiclient, name="ping.interval")
        total_duration = int(float(ping_interval[0].value) * 3.2)
        time.sleep(total_duration)

        list_vms = VirtualMachine.list(self.apiclient, ids=[self.vm1.id, self.vm2.id, self.vm3.id], listAll=True)
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 3, msg="List VM response is empty")
        for vm in list_vms:
            if vm.id == self.vm1.id:
                self.assertTrue(vm.state == "Running", msg="VM {0} is expected to be in running state".format(vm.name))
            elif vm.id == self.vm2.id or vm.id == self.vm3.id:
                self.assertTrue(vm.state == "Stopped", msg="VM {0} is expected to be in stopped state".format(vm.name))
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false", storageid=cls.storageID)
                if cls.storageID:
                    StoragePool.update(cls.api_client, id=cls.storageID,
                                    tags="")

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def test_network_gc(self):
        """Test network garbage collection with RVR
        """

        # Steps to validate
        # 1. createNetwork using network offering for redundant virtual router
        # 2. listRouters in above network
        # 3. deployVM in above user account in the created network
        # 4. stop the running user VM
        # 5. wait for network.gc time
        # 6. listRouters
        # 7. start the routers MASTER and BACK
        # 8. wait for network.gc time and listRouters
        # 9. delete the account

        # Creating network using the network offering created
        self.debug("Creating network with network offering: %s" %
                                                    self.network_offering.id)
        network = Network.create(
                                self.apiclient,
                                self.services["network"],
                                accountid=self.account.name,
                                domainid=self.account.domainid,
                                networkofferingid=self.network_offering.id,
                                zoneid=self.zone.id
                                )
        self.debug("Created network with ID: %s" % network.id)

        networks = Network.list(
                                self.apiclient,
                                id=network.id,
                                listall=True
                                )
        self.assertEqual(
            isinstance(networks, list),
            True,
            "List networks should return a valid response for created network"
             )
        nw_response = networks[0]

        self.debug("Network state: %s" % nw_response.state)
        self.assertEqual(
                    nw_response.state,
                    "Allocated",
                    "The network should be in allocated state after creation"
                    )

        self.debug("Listing routers for network: %s" % network.name)
        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
            routers,
            None,
            "Routers should not be spawned when network is in allocated state"
            )

        self.debug("Deploying VM in account: %s" % self.account.name)

        # Spawn an instance in that network
        virtual_machine = VirtualMachine.create(
                                  self.apiclient,
                                  self.services["virtual_machine"],
                                  accountid=self.account.name,
                                  domainid=self.account.domainid,
                                  serviceofferingid=self.service_offering.id,
                                  networkids=[str(network.id)]
                                  )
        self.debug("Deployed VM in network: %s" % network.id)

        vms = VirtualMachine.list(
                                  self.apiclient,
                                  id=virtual_machine.id,
                                  listall=True
                                  )
        self.assertEqual(
                         isinstance(vms, list),
                         True,
                         "List Vms should return a valid list"
                         )
        vm = vms[0]
        self.assertEqual(
                         vm.state,
                         "Running",
                         "Vm should be in running state after deployment"
                         )

        self.debug("Listing routers for network: %s" % network.name)
        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        self.assertEqual(
                    len(routers),
                    2,
                    "Length of the list router should be 2 (Backup & master)"
                    )

        self.debug("Stopping the user VM: %s" % virtual_machine.name)

        try:
            virtual_machine.stop(self.apiclient)
        except Exception as e:
            self.fail("Failed to stop guest Vm: %s - %s" %
                                            (virtual_machine.name, e))

        interval = Configurations.list(
                                    self.apiclient,
                                    name='network.gc.interval'
                                    )
        delay = int(interval[0].value)
        interval = Configurations.list(
                                    self.apiclient,
                                    name='network.gc.wait'
                                    )
        exp = int(interval[0].value)

        self.debug("Sleeping for network gc wait + interval time")
        # Sleep to ensure that all resources are deleted
        time.sleep((delay + exp) * 2)

        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        for router in routers:
            self.assertEqual(
                             router.state,
                             "Stopped",
                             "Router should be in stopped state"
                             )
            self.debug("Starting the stopped router again")
            cmd = startRouter.startRouterCmd()
            cmd.id = router.id
            self.apiclient.startRouter(cmd)

        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        for router in routers:
            self.assertEqual(
                             router.state,
                             "Running",
                             "Router should be in running state"
                             )

        self.debug("Sleeping for network gc wait + interval time")
        # Sleep to ensure that all resources are deleted
        time.sleep((delay + exp) * 3)

        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        for router in routers:
            self.assertEqual(
                             router.state,
                             "Stopped",
                             "Router should be in stopped state"
                             )
        return
Example #55
0
    def test_01_create_tier_Vmxnet3(self):
        """
            Test to create vpc tier with nic type as Vmxnet3
            #1.Set global setting parameter "vmware.systemvm.nic.device.type"
            to "Vmxnet3"
            #2.Create VPC
            #3.Create one tier
            #4.Deploy one guest vm in the tier created in step3
        """
        if self.hypervisor.lower() not in ['vmware']:
            self.skipTest("This test can only run on vmware setup")

        nic_types = Configurations.list(
            self.apiclient,
            name="vmware.systemvm.nic.device.type"
        )
        self.assertEqual(validateList(nic_types)[0], PASS, "Invalid list config")
        nic_type = nic_types[0].value
        reset = False
        if nic_type.lower() != "vmxnet3":
            self.updateConfigurAndRestart("vmware.systemvm.nic.device.type", "Vmxnet3")
            reset = True

        self.services["vpc"]["cidr"] = "10.1.1.1/16"
        self.debug("creating a VPC network in the account: %s" %
                   self.account.name)
        try:
            vpc = VPC.create(
                self.apiclient,
                self.services["vpc"],
                vpcofferingid=self.vpc_off.id,
                zoneid=self.zone.id,
                account=self.account.name,
                domainid=self.account.domainid
            )
            vpc_res = VPC.list(self.apiclient, id=vpc.id)
            self.assertEqual(validateList(vpc_res)[0], PASS, "Invalid response from listvpc")

            self.network_offering = NetworkOffering.create(
                self.apiclient,
                self.services["network_offering"],
                conservemode=False
            )
            # Enable Network offering
            self.network_offering.update(self.apiclient, state='Enabled')
            self.cleanup.append(self.network_offering)

            gateway = vpc.cidr.split('/')[0]
            # Split the cidr to retrieve gateway
            # for eg. cidr = 10.0.0.1/24
            # Gateway = 10.0.0.1
            # Creating network using the network offering created
            self.debug("Creating network with network offering: %s" %
                       self.network_offering.id)
            network = Network.create(
                self.apiclient,
                self.services["network"],
                accountid=self.account.name,
                domainid=self.account.domainid,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id,
                gateway=gateway,
                vpcid=vpc.id
            )
            self.debug("Created network with ID: %s" % network.id)
            vm = VirtualMachine.create(
                self.apiclient,
                self.services["virtual_machine"],
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                networkids=[str(network.id)]
            )
            self.assertIsNotNone(vm, "VM creation failed")
            self.debug("Deployed VM in network: %s" % network.id)
            vm_res = VirtualMachine.list(self.apiclient, id=vm.id)
            self.assertEqual(
                validateList(vm_res)[0],
                PASS,
                "list vm returned invalid response"
            )
            vr_res = Router.list(
                self.apiclient,
                vpcid=vpc.id,
                listall="true"
            )
            self.assertEqual(validateList(vr_res)[0], PASS, "list vrs failed for vpc")
            vr_linklocal_ip = vr_res[0].linklocalip
            result = get_process_status(
                self.apiclient.connection.mgtSvr,
                22,
                self.apiclient.connection.user,
                self.apiclient.connection.passwd,
                vr_linklocal_ip,
                'lspci | grep "Ethernet controller"',
                hypervisor=self.hypervisor
            )
            self.assertEqual(
                validateList(result)[0],
                PASS,
                "We didn't find NICS with adapter type VMXNET3"
            )
            reg = re.compile("VMware VMXNET3")
            count = 0
            for line in result:
                if reg.search(line):
                    count += 1
            self.assertEqual(
                count,
                3,
                "Not all NICs on VR are of type VMXNET3"
            )
        except Exception as e:
            self.fail("NIC creation failed for vpc tier with systemvm nic \
                        adapter type as Vmxnet3: %s" % e)
        finally:
            if reset:
                self.updateConfigurAndRestart("vmware.systemvm.nic.device.type", nic_type)
        return
    def setUpClass(cls):
        cls.testClient = super(TestResizeVolume, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower()
        cls.storageID = None
        # Fill services from the external config file
        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(
            cls.api_client,
            cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.unsupportedStorageType = False
        cls.unsupportedHypervisorType = False
        cls.updateclone = False
        if cls.hypervisor not in ['xenserver',"kvm","vmware"]:
            cls.unsupportedHypervisorType=True
            return
        cls.template = get_template(
            cls.api_client,
            cls.zone.id
        )
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id
        cls.services["volume"]["zoneid"] = cls.zone.id
        try:
            cls.parent_domain = Domain.create(cls.api_client,
                                              services=cls.services[
                                                  "domain"],
                                              parentdomainid=cls.domain.id)
            cls.parentd_admin = Account.create(cls.api_client,
                                               cls.services["account"],
                                               admin=True,
                                               domainid=cls.parent_domain.id)
            cls._cleanup.append(cls.parentd_admin)
            cls._cleanup.append(cls.parent_domain)
            list_pool_resp = list_storage_pools(cls.api_client,
                                               account=cls.parentd_admin.name,domainid=cls.parent_domain.id)
            res = validateList(list_pool_resp)
            if res[2]== INVALID_INPUT:
                raise Exception("Failed to  list storage pool-no storagepools found ")
            #Identify the storage pool type  and set vmware fullclone to true if storage is VMFS
            if cls.hypervisor == 'vmware':
                for strpool in list_pool_resp:
                    if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
                        list_config_storage_response = list_configurations(
                            cls.api_client
                            , name=
                            "vmware.create.full.clone",storageid=strpool.id)
                        res = validateList(list_config_storage_response)
                        if res[2]== INVALID_INPUT:
                         raise Exception("Failed to  list configurations ")
                        if list_config_storage_response[0].value == "false":
                            Configurations.update(cls.api_client,
                                                  "vmware.create.full.clone",
                                                  value="true",storageid=strpool.id)
                            cls.updateclone = True
                            StoragePool.update(cls.api_client,id=strpool.id,tags="scsi")
                            cls.storageID = strpool.id
                            cls.unsupportedStorageType = False
                            break
                    else:
                        cls.unsupportedStorageType = True
            # Creating service offering with normal config
            cls.service_offering = ServiceOffering.create(
                cls.api_client,
                cls.services["service_offering"])
            cls.services_offering_vmware=ServiceOffering.create(
                cls.api_client,cls.services["service_offering"],tags="scsi")
            cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])

        except Exception as e:
            cls.tearDownClass()
        return
    def test_01_VRServiceFailureAlerting(self):

        if self.zone.networktype == "Basic":
            list_router_response = list_routers(
                self.apiclient,
                listall="true"
            )
        else:
            list_router_response = list_routers(
                self.apiclient,
                account=self.account.name,
                domainid=self.account.domainid
            )
        self.assertEqual(
            isinstance(list_router_response, list),
            True,
            "Check list response returns a valid list"
        )
        router = list_router_response[0]

        self.debug("Router ID: %s, state: %s" % (router.id, router.state))

        self.assertEqual(
            router.state,
            'Running',
            "Check list router response for router state"
        )

        alertSubject = "Monitoring Service on VR " + router.name

        if self.hypervisor.lower() in ('vmware', 'hyperv'):
            result = get_process_status(
                self.apiclient.connection.mgtSvr,
                22,
                self.apiclient.connection.user,
                self.apiclient.connection.passwd,
                router.linklocalip,
                "service dnsmasq stop",
                hypervisor=self.hypervisor
            )
        else:
            try:
                hosts = list_hosts(
                    self.apiclient,
                    zoneid=router.zoneid,
                    type='Routing',
                    state='Up',
                    id=router.hostid
                )

                self.assertEqual(
                    isinstance(hosts, list),
                    True,
                    "Check list host returns a valid list"
                )

                host = hosts[0]
                result = get_process_status(
                    host.ipaddress,
                    22,
                    self.services["configurableData"]["host"]["username"],
                    self.services["configurableData"]["host"]["password"],
                    router.linklocalip,
                    "service apache2 stop"
                )

            except Exception as e:
                raise Exception("Exception raised in getting host\
                        credentials: %s " % e)

        res = str(result)
        self.debug("apache process status: %s" % res)

        configs = Configurations.list(
                self.apiclient,
                name='router.alerts.check.interval'
            )

        # Set the value for one more minute than
        # actual range to be on safer side
        waitingPeriod = (
                int(configs[0].value) + 600)  # in seconds

        time.sleep(waitingPeriod)
        # wait for (router.alerts.check.interval + 10) minutes meanwhile monitor service on
        # VR starts the apache service (
        # router.alerts.check.interval default value is
        # 30minutes)

        qresultset = self.dbclient.execute(
            "select id from alert where subject = '%s' ORDER BY id DESC LIMIT 1;" %
            str(alertSubject))
        self.assertNotEqual(
            len(qresultset),
            0,
            "Check DB Query result set"
        )
        return
    def test_deployVmWithCustomDisk(self):
        """Test custom disk sizes beyond range
        """
        # Steps for validation
        # 1. listConfigurations - custom.diskoffering.size.min
        #    and custom.diskoffering.size.max
        # 2. deployVm with custom disk offering size < min
        # 3. deployVm with custom disk offering min< size < max
        # 4. deployVm with custom disk offering size > max
        # Validate the following
        # 2. and 4. of deploy VM should fail.
        #    Only case 3. should succeed.
        #    cleanup all created data disks from the account

        config = Configurations.list(
            self.apiclient,
            name="custom.diskoffering.size.min"
        )
        self.assertEqual(
            isinstance(config, list),
            True,
            "custom.diskoffering.size.min should be present in global config"
        )
        # minimum size of custom disk (in GBs)
        min_size = int(config[0].value)
        self.debug("custom.diskoffering.size.min: %s" % min_size)

        config = Configurations.list(
            self.apiclient,
            name="custom.diskoffering.size.max"
        )
        self.assertEqual(
            isinstance(config, list),
            True,
            "custom.diskoffering.size.min should be present in global config"
        )
        # maximum size of custom disk (in GBs)
        max_size = int(config[0].value)
        self.debug("custom.diskoffering.size.max: %s" % max_size)

        self.debug("Creating a volume with size less than min cust disk size")
        self.services["custom_volume"]["customdisksize"] = (min_size - 1)
        self.services["custom_volume"]["zoneid"] = self.zone.id
        with self.assertRaises(Exception):
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
        self.debug("Create volume failed!")

        self.debug("Creating a volume with size more than max cust disk size")
        self.services["custom_volume"]["customdisksize"] = (max_size + 1)
        with self.assertRaises(Exception):
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
        self.debug("Create volume failed!")

        self.debug("Creating a volume with size more than min cust disk " +
                   "but less than max cust disk size"
                   )
        self.services["custom_volume"]["customdisksize"] = (min_size + 1)
        try:
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
            self.debug("Create volume of cust disk size succeeded")
        except Exception as e:
            self.fail("Create volume failed with exception: %s" % e)
        return
Example #59
0
    def test_01_custom_hostname_instancename_false(self):
        """ Verify custom hostname for the instance when
            vm.instancename.flag=false
        """

        # Validate the following
        # 1. Set the vm.instancename.flog to false. Hostname and displayname
        #    should be UUID
        # 2. Give the user provided display name. Internal name should be
        #    i-<userid>-<vmid>-instance name (It should not contain display name)

        if not is_config_suitable(apiclient=self.apiclient, name="vm.instancename.flag", value="false"):
            self.skipTest("vm.instancename.flag should be false. skipping")

        self.debug("Deploying VM in account: %s" % self.account.name)
        # Spawn an instance in that network
        virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.debug("Checking if the virtual machine is created properly or not?")
        vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True)

        self.assertEqual(isinstance(vms, list), True, "List vms should retuen a valid name")
        vm = vms[0]
        self.assertEqual(vm.state, "Running", "Vm state should be running after deployment")
        self.debug(
            "vm.displayname: %s, original: %s" % (vm.displayname, self.services["virtual_machine"]["displayname"])
        )
        self.assertEqual(
            vm.displayname,
            self.services["virtual_machine"]["displayname"],
            "Vm display name should match the given name",
        )

        # Fetch account ID and VMID from database to check internal name
        self.debug("select id from account where uuid = '%s';" % self.account.id)

        qresultset = self.dbclient.execute("select id from account where uuid = '%s';" % self.account.id)
        self.assertEqual(isinstance(qresultset, list), True, "Check DB query result set for valid data")

        self.assertNotEqual(len(qresultset), 0, "Check DB Query result set")
        qresult = qresultset[0]
        account_id = qresult[0]

        self.debug("select id from vm_instance where uuid = '%s';" % vm.id)

        qresultset = self.dbclient.execute("select id from vm_instance where uuid = '%s';" % vm.id)

        self.assertEqual(isinstance(qresultset, list), True, "Check DB query result set for valid data")

        self.assertNotEqual(len(qresultset), 0, "Check DB Query result set")
        qresult = qresultset[0]
        self.debug("Query result: %s" % qresult)
        vmid = qresult[0]

        self.debug("Fetching the global config value for instance.name")
        configs = Configurations.list(self.apiclient, name="instance.name", listall=True)

        config = configs[0]
        self.debug("Config value : %s" % config)
        instance_name = config.value
        self.debug("Instance.name: %s" % instance_name)

        # internal Name = i-<user ID>-<VM ID>-<instance_name>
        # internal_name = "i-" + str(account_id) + "-" + str(vmid) + "-" + instance_name
        internal_name = "i-%s-%s-%s" % (str(account_id), str(vmid), instance_name)
        self.debug("Internal name: %s" % internal_name)
        self.debug("vm instance name : %s" % vm.instancename)
        self.assertEqual(vm.instancename, internal_name, "VM internal name should match with that of the format")
        return