Esempio n. 1
0
    def test_04_rvpc_network_garbage_collector_nics(self):
        """ Create a redundant VPC with 1 Tier, 1 VM, 1 ACL, 1 PF and test Network GC Nics"""
        self.logger.debug(
            "Starting test_04_rvpc_network_garbage_collector_nics")
        self.query_routers()
        self.networks.append(
            self.create_network(self.services["network_offering"],
                                "10.1.1.1",
                                nr_vms=1))
        self.check_routers_state()
        self.add_nat_rules()
        self.do_vpc_test(False)

        self.stop_vm()

        gc_wait = Configurations.list(self.apiclient, name="network.gc.wait")
        gc_interval = Configurations.list(self.apiclient,
                                          name="network.gc.interval")

        self.logger.debug("network.gc.wait is ==> %s" % gc_wait)
        self.logger.debug("network.gc.interval is ==> %s" % gc_interval)

        total_sleep = 120
        if gc_wait and gc_interval:
            total_sleep = int(gc_wait[0].value) + int(gc_interval[0].value)
        else:
            self.logger.debug(
                "Could not retrieve the keys 'network.gc.interval' and 'network.gc.wait'. Sleeping for 2 minutes."
            )

        time.sleep(total_sleep * 3)

        self.check_routers_state(status_to_check="BACKUP", expected_count=2)
        self.start_vm()
        self.check_routers_state(status_to_check="MASTER")
    def test_04_rvpc_network_garbage_collector_nics(self):
        """ Create a redundant VPC with 1 Tier, 1 VM, 1 ACL, 1 PF and test Network GC Nics"""
        self.logger.debug("Starting test_04_rvpc_network_garbage_collector_nics")
        self.query_routers()
        self.networks.append(self.create_network(self.services["network_offering"], "10.1.1.1", nr_vms=1))
        self.check_routers_state()
        self.add_nat_rules()
        self.do_vpc_test(False)

        self.stop_vm()

        gc_wait = Configurations.list(self.apiclient, name="network.gc.wait")
        gc_interval = Configurations.list(self.apiclient, name="network.gc.interval")
        
        self.logger.debug("network.gc.wait is ==> %s" % gc_wait)
        self.logger.debug("network.gc.interval is ==> %s" % gc_wait)

        total_sleep = 120
        if gc_wait and gc_interval:
            total_sleep = int(gc_wait[0].value) + int(gc_interval[0].value)
        else:
            self.logger.debug("Could not retrieve the keys 'network.gc.interval' and 'network.gc.wait'. Sleeping for 2 minutes.")

        time.sleep(total_sleep * 3)

        self.check_routers_interface(interface_to_check="eth2", expected_exists=False)
        self.start_vm()
        self.check_routers_state(status_to_check="MASTER")
        self.check_routers_interface(interface_to_check="eth2", expected_exists=True)
Esempio n. 3
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient,
                name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient,
                name="network.loadbalancer.haproxy.stats.uri")[0].value
            # Update global setting network.loadbalancer.haproxy.stats.auth to a known value
            haproxy_auth = "admin:password"
            Configurations.update(self.apiclient,
                                  "network.loadbalancer.haproxy.stats.auth",
                                  haproxy_auth)
            self.logger.debug(
                "Updated global setting stats network.loadbalancer.haproxy.stats.auth to %s"
                % (haproxy_auth))
            settings["username"], settings["password"] = haproxy_auth.split(
                ":")
            settings["visibility"] = Configurations.list(
                self.apiclient,
                name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Esempio n. 4
0
    def test_09_expunge_vm(self):
        """Test destroy(expunge) Virtual Machine
        """
        # Validate the following
        # 1. listVM command should NOT  return this VM any more.

        self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)

        cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
        cmd.id = self.small_virtual_machine.id
        self.apiclient.destroyVirtualMachine(cmd)

        config = Configurations.list(self.apiclient, name="expunge.delay")

        expunge_delay = int(config[0].value)
        time.sleep(expunge_delay * 2)

        # VM should be destroyed unless expunge thread hasn't run
        # Wait for two cycles of the expunge thread
        config = Configurations.list(self.apiclient, name="expunge.interval")
        expunge_cycle = int(config[0].value)
        wait_time = expunge_cycle * 2
        while wait_time >= 0:
            list_vm_response = VirtualMachine.list(self.apiclient, id=self.small_virtual_machine.id)
            if list_vm_response:
                time.sleep(expunge_cycle)
                wait_time = wait_time - expunge_cycle
            else:
                break

        self.debug("listVirtualMachines response: %s" % list_vm_response)

        self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response")
        return
Esempio n. 5
0
    def test_01_test_settings_for_domain(self):
        """
        1. Get the default value for the setting in domain scope
        2. Change the default value to new value
        3. Make sure updated value is same as new value
        4. Reset the config value
        5. Make sure that current value is same as default value
        :return:
        """
        config_name="ldap.basedn"
        #1. Get default value
        configs = Configurations.list(
            self.apiclient,
            name=config_name
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        orig_value = str(configs[0].value)
        new_value = "testing"

        #2. Update to new value
        Configurations.update(
            self.apiclient,
            name=config_name,
            value=new_value,
            domainid=self.domain.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        #3. validate they are same
        self.assertEqual(new_value,
                         str(configs[0].value),
                         "Failed to set new config value")

        #4. Reset the value
        Configurations.reset(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )

        #5. Make sure its same as original value
        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            domainid=self.domain.id
        )
        self.assertIsNotNone(configs, "Fail to get domain setting %s " % config_name)

        self.assertEqual(orig_value,
                         str(configs[0].value),
                         "Failed to reset the value")
Esempio n. 6
0
    def test_05_test_settings_for_zone(self):
        """
        1. Get the default value for the setting in zone scope
        2. Change the default value to new value
        3. Make sure updated value is same as new value
        4. Reset the config value
        5. Make sure that current value is same as default value
        :return:
        """
        config_name = "enable.dynamic.scale.vm"
        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )
        self.assertIsNotNone(configs, "Fail to get zone setting %s " % config_name)

        orig_value = str(configs[0].value)
        new_value = 'true'

        Configurations.update(
            self.apiclient,
            name=config_name,
            value=new_value,
            zoneid=self.zone.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )
        self.assertIsNotNone(configs, "Fail to get ol setting %s " % config_name)

        self.assertEqual(new_value,
                         (configs[0].value),
                         "Failed to set new config value")

        Configurations.reset(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )

        configs = Configurations.list(
            self.apiclient,
            name=config_name,
            zoneid=self.zone.id
        )
        self.assertIsNotNone(configs, "Fail to get zone setting %s " % config_name)

        self.assertEqual(orig_value,
                         (configs[0].value),
                         "Failed to reset the value for zone")
Esempio n. 7
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(self.apiclient,
                                        name='network.gc.interval')
         wait = Configurations.list(self.apiclient, name='network.gc.wait')
         #time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Esempio n. 8
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         #Clean up, terminate the created network offerings
         #cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(self.apiclient,
                                        name='network.gc.interval')
         wait = Configurations.list(self.apiclient, name='network.gc.wait')
         # Sleep to ensure that all resources are deleted
         time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Esempio n. 9
0
    def test_vm_sync(self):
        """Test VM Sync

        # Validate the following:
        # vm1 should be running, vm2 should be stopped as power report says PowerOff, vm3 should be stopped as missing from power report
        """

        #wait for vmsync to happen
        ping_interval = Configurations.list(self.apiclient,
                                            name="ping.interval")
        total_duration = int(float(ping_interval[0].value) * 3.2)
        time.sleep(total_duration)

        list_vms = VirtualMachine.list(
            self.apiclient,
            ids=[self.vm1.id, self.vm2.id, self.vm3.id],
            listAll=True)
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 3,
                        msg="List VM response is empty")
        for vm in list_vms:
            if vm.id == self.vm1.id:
                self.assertTrue(
                    vm.state == "Running",
                    msg="VM {0} is expected to be in running state".format(
                        vm.name))
            elif vm.id == self.vm2.id or vm.id == self.vm3.id:
                self.assertTrue(
                    vm.state == "Stopped",
                    msg="VM {0} is expected to be in stopped state".format(
                        vm.name))
Esempio n. 10
0
    def setUpClass(cls):
        # We want to fail quicker if it's failure
        socket.setdefaulttimeout(60)

        cls.testClient = super(TestVPCRedundancy, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.hypervisor = cls.testClient.getHypervisorInfo()

        cls.template = get_test_template(cls.api_client, cls.zone.id, cls.hypervisor)
        if cls.template == FAILED:
            assert False, "get_test_template() failed to return template"

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering"])
        cls._cleanup = [cls.service_offering]

        cls.logger = logging.getLogger('TestVPCRedundancy')
        cls.stream_handler = logging.StreamHandler()
        cls.logger.setLevel(logging.DEBUG)
        cls.logger.addHandler(cls.stream_handler)

        cls.advert_int = int(Configurations.list(cls.api_client, name="router.redundant.vrrp.interval")[0].value)
    def test_01_VPN_user_limit(self):
        """VPN remote access user limit tests"""

        # Validate the following
        # prerequisite: change management configuration setting of
        #    remote.access.vpn.user.limit
        # 1. provision more users than is set in the limit
        #    Provisioning of users after the limit should failProvisioning of
        #    users after the limit should fail

        self.debug("Fetching the limit for remote access VPN users")
        configs = Configurations.list(
                                     self.apiclient,
                                     name='remote.access.vpn.user.limit',
                                     listall=True)
        self.assertEqual(isinstance(configs, list),
                         True,
                         "List configs should return a valid response")

        limit = int(configs[0].value)

        self.debug("Enabling the VPN access for IP: %s" %
                                            self.public_ip.ipaddress)

        self.create_VPN(self.public_ip)
        self.debug("Creating %s VPN users" % limit)
        for x in range(limit):
            self.create_VPN_Users()

        self.debug("Adding another user exceeding limit for remote VPN users")
        with self.assertRaises(Exception):
            self.create_VPN_Users()
        self.debug("Limit exceeded exception raised!")
        return
Esempio n. 12
0
    def setUpClass(cls):
        # We want to fail quicker if it's failure
        socket.setdefaulttimeout(60)

        cls.testClient = super(TestVPCRedundancy, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.hypervisor = cls.testClient.getHypervisorInfo()

        cls.template = get_test_template(cls.api_client, cls.zone.id,
                                         cls.hypervisor)
        if cls.template == FAILED:
            assert False, "get_test_template() failed to return template"

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering = ServiceOffering.create(
            cls.api_client, cls.services["service_offering"])
        cls._cleanup = [cls.service_offering]

        cls.logger = logging.getLogger('TestVPCRedundancy')
        cls.stream_handler = logging.StreamHandler()
        cls.logger.setLevel(logging.DEBUG)
        cls.logger.addHandler(cls.stream_handler)

        cls.advert_int = int(
            Configurations.list(
                cls.api_client,
                name="router.redundant.vrrp.interval")[0].value)
Esempio n. 13
0
    def setUpClass(cls):
        testClient = super(TestVerifyEventsTable, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()

        cls.hypervisor = cls.testClient.getHypervisorInfo()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])

        cls._cleanup = []

        try:

            cls.unsupportedHypervisor = False
            if cls.hypervisor.lower() in ['hyperv', 'lxc', 'kvm']:
                if cls.hypervisor.lower() == 'kvm':
                    configs = Configurations.list(
                        cls.apiclient,
                        name='kvm.snapshot.enabled'
                    )

                    if configs[0].value == "false":
                        cls.unsupportedHypervisor = True
                else:
                    cls.unsupportedHypervisor = True

                return
            # Create an account
            cls.account = Account.create(
                cls.apiclient,
                cls.testdata["account"],
                domainid=cls.domain.id
            )

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name,
                DomainName=cls.account.domain
            )
            # Create Service offering
            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
            )

            cls._cleanup = [
                cls.account,
                cls.service_offering,
            ]
        except Exception as e:
            cls.tearDownClass()
            raise e
        return
    def setUpClass(cls):
        cls.testClient = super(TestHostHighAvailability,
                               cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(cls.api_client, cls.zone.id,
                                    cls.services["ostype"])
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ['lxc']:
            raise unittest.SkipTest(
                "Template creation from root volume is not supported in LXC")

        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client,
                                  clusterid=cluster.id,
                                  type="Routing")
            if len(cls.hosts) >= 3:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 3 hosts found")

        configs = Configurations.list(cls.api_client, name='ha.tag')

        assert isinstance(configs, list), "Config list not\
                retrieved for ha.tag"

        if configs[0].value != "ha":
            raise unittest.SkipTest("Please set the global config\
                    value for ha.tag as 'ha'")

        Host.update(cls.api_client, id=cls.hosts[2].id, hosttags="ha")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_with_ha"],
            offerha=True)

        cls.service_offering_without_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_without_ha"],
            offerha=False)

        cls._cleanup = [
            cls.service_offering_with_ha,
            cls.service_offering_without_ha,
        ]
        return
Esempio n. 15
0
def is_config_suitable(apiclient, name, value):
    """
    Ensure if the deployment has the expected `value` for the global setting `name'
    @return: true if value is set, else false
    """
    configs = Configurations.list(apiclient, name=name)
    assert configs is not None and isinstance(configs, list) and len(configs) > 0
    return configs[0].value == value
Esempio n. 16
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.interval'
                                 )
         wait = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.wait'
                                 )
         #time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
Esempio n. 17
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            settings["username"], settings["password"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.auth")[0].value.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Esempio n. 18
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = { }
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            settings["username"], settings["password"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.auth")[0].value.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Esempio n. 19
0
    def setUpClass(cls):
        # Setup

        cls.testClient = super(TestDummyBackupAndRecovery, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        cls.domain = get_domain(cls.api_client)
        cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
        if cls.template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["small"]["template"] = cls.template.id
        cls._cleanup = []

        # Check backup configuration values, set them to enable the dummy provider
        backup_enabled_cfg = Configurations.list(cls.api_client, name='backup.framework.enabled', zoneid=cls.zone.id)
        backup_provider_cfg = Configurations.list(cls.api_client, name='backup.framework.provider.plugin', zoneid=cls.zone.id)
        cls.backup_enabled = backup_enabled_cfg[0].value
        cls.backup_provider = backup_provider_cfg[0].value

        if cls.backup_enabled == "false":
            Configurations.update(cls.api_client, 'backup.framework.enabled', value='true', zoneid=cls.zone.id)
        if cls.backup_provider != "dummy":
            Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value='dummy', zoneid=cls.zone.id)

        if cls.hypervisor.lower() != 'simulator':
            return

        cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
        cls.offering = ServiceOffering.create(cls.api_client,cls.services["service_offerings"]["small"])
        cls.vm = VirtualMachine.create(cls.api_client, cls.services["small"], accountid=cls.account.name,
                                       domainid=cls.account.domainid, serviceofferingid=cls.offering.id,
                                       mode=cls.services["mode"])
        cls._cleanup = [cls.offering, cls.account]

        # Import a dummy backup offering to use on tests

        cls.provider_offerings = BackupOffering.listExternal(cls.api_client, cls.zone.id)
        cls.debug("Importing backup offering %s - %s" % (cls.provider_offerings[0].externalid, cls.provider_offerings[0].name))
        cls.offering = BackupOffering.importExisting(cls.api_client, cls.zone.id, cls.provider_offerings[0].externalid,
                                                   cls.provider_offerings[0].name, cls.provider_offerings[0].description)
        cls._cleanup.append(cls.offering)
Esempio n. 20
0
 def setUpClass(cls):
     testClient = super(TestCreateIpv6NetworkVpcOffering, cls).getClsTestClient()
     cls.apiclient = testClient.getApiClient()
     cls.services = testClient.getParsedTestDataConfig()
     cls.initial_ipv6_offering_enabled = Configurations.list(
         cls.apiclient,
         name=ipv6_offering_config_name)[0].value
     cls._cleanup = []
     return
Esempio n. 21
0
def is_config_suitable(apiclient, name, value):
    """
    Ensure if the deployment has the expected `value` for the global setting `name'
    @return: true if value is set, else false
    """
    configs = Configurations.list(apiclient, name=name)
    assert (configs is not None and isinstance(configs, list)
            and len(configs) > 0)
    return configs[0].value == value
    def test_09_expunge_vm(self):
        """Test destroy(expunge) Virtual Machine
        """
        # Validate the following
        # 1. listVM command should NOT  return this VM any more.

        self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)

        cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
        cmd.id = self.small_virtual_machine.id
        self.apiclient.destroyVirtualMachine(cmd)

        config = Configurations.list(
            self.apiclient,
            name='expunge.delay'
        )

        expunge_delay = int(config[0].value)
        time.sleep(expunge_delay * 2)

        # VM should be destroyed unless expunge thread hasn't run
        # Wait for two cycles of the expunge thread
        config = Configurations.list(
            self.apiclient,
            name='expunge.interval'
        )
        expunge_cycle = int(config[0].value)
        wait_time = expunge_cycle * 4
        while wait_time >= 0:
            list_vm_response = VirtualMachine.list(
                self.apiclient,
                id=self.small_virtual_machine.id
            )
            if not list_vm_response:
                break
            self.debug("Waiting for VM to expunge")
            time.sleep(expunge_cycle)
            wait_time = wait_time - expunge_cycle

        self.debug("listVirtualMachines response: %s" % list_vm_response)

        self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response")
        return
Esempio n. 23
0
 def tearDown(self):
     try:
         self.debug("Cleaning up the resources")
         #Clean up, terminate the created network offerings
         #cleanup_resources(self.apiclient, self.cleanup)
         interval = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.interval'
                                 )
         wait = Configurations.list(
                                 self.apiclient,
                                 name='network.gc.wait'
                                 )
         # Sleep to ensure that all resources are deleted
         time.sleep(int(interval[0].value) + int(wait[0].value))
         self.debug("Cleanup complete!")
     except Exception as e:
         raise Exception("Warning: Exception during cleanup : %s" % e)
     return
    def test_maxAccountNetworks(self):
        """Test Limit number of guest account specific networks
        """

        # Steps for validation
        # 1. Fetch max.account.networks from configurations
        # 2. Create an account. Create account more that max.accout.network
        # 3. Create network should fail

        self.debug("Creating project with '%s' as admin" % self.account.name)
        # Create project as a domain admin
        project = Project.create(self.apiclient,
                                 self.services["project"],
                                 account=self.account.name,
                                 domainid=self.account.domainid)
        # Cleanup created project at end of test
        self.cleanup.append(project)
        self.debug("Created project with domain admin with ID: %s" %
                   project.id)

        config = Configurations.list(self.apiclient,
                                     name='max.project.networks',
                                     listall=True)
        self.assertEqual(
            isinstance(config, list), True,
            "List configurations hsould have max.project.networks")

        config_value = int(config[0].value)
        self.debug("max.project.networks: %s" % config_value)

        for ctr in range(config_value):
            # Creating network using the network offering created
            self.debug("Creating network with network offering: %s" %
                       self.network_offering.id)
            network = Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id)
            self.cleanup.append(network)
            self.debug("Created network with ID: %s" % network.id)
        self.debug("Creating network in account already having networks : %s" %
                   config_value)

        with self.assertRaises(Exception):
            network = Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id)
            self.cleanup.append(network)
        self.debug('Create network failed (as expected)')
        return
Esempio n. 25
0
    def setUpClass(cls):
        testClient = super(Overcommit, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        # Get Zone,Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient)
        cls.testdata["mode"] = cls.zone.networktype
        cls.hypervisor = testClient.getHypervisorInfo()
        cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])
        cls.testdata["template"]["ostypeid"] = cls.template.ostypeid
        list_conf = Configurations.list(cls.apiclient,
                                        name="capacity.check.period"
                                        )
        cls.wait_time = 5 + int(list_conf[0].value) / 1000
        if cls.template == FAILED:
            cls.fail(
                "get_template() failed to return template with description \
                %s" %
                cls.testdata["ostype"])
        cls._cleanup = []
        try:
            cls.account = Account.create(cls.apiclient,
                                         cls.testdata["account"],
                                         domainid=cls.domain.id
                                         )
            cls._cleanup.append(cls.account)

            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offerings"]["small"])

            cls._cleanup.append(cls.service_offering)

            cls.deployVmResponse = VirtualMachine.create(
                cls.apiclient,
                services=cls.testdata["virtual_machine"],
                accountid=cls.account.name,
                domainid=cls.account.domainid,
                serviceofferingid=cls.service_offering.id,
                templateid=cls.template.id,
                zoneid=cls.zone.id,
            )

        except Exception as e:
            cls.tearDownClass()
            raise e

        return
Esempio n. 26
0
    def setUpClass(cls):
        testClient = super(Overcommit, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        # Get Zone,Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient)
        cls.testdata["mode"] = cls.zone.networktype
        cls.testdata["configurableData"]["password"] = "******"
        cls.hypervisor = testClient.getHypervisorInfo()

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])
        cls.testdata["template"]["ostypeid"] = cls.template.ostypeid
        list_conf = Configurations.list(cls.apiclient,
                                        name="capacity.check.period"
                                        )
        cls.wait_time = 5 + int(list_conf[0].value) / 1000
        if cls.template == FAILED:
            cls.fail(
                "get_template() failed to return template with description \
                %s" %
                cls.testdata["ostype"])
        cls._cleanup = []
        try:
            cls.account = Account.create(cls.apiclient,
                                         cls.testdata["account"],
                                         domainid=cls.domain.id
                                         )
            cls._cleanup.append(cls.account)

            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offerings"]["small"])

            cls._cleanup.append(cls.service_offering)

            cls.deployVmResponse = VirtualMachine.create(
                cls.apiclient,
                services=cls.testdata["virtual_machine"],
                accountid=cls.account.name,
                domainid=cls.account.domainid,
                serviceofferingid=cls.service_offering.id,
                templateid=cls.template.id,
                zoneid=cls.zone.id,
            )

        except Exception as e:
            cls.tearDownClass()
            raise e

        return
Esempio n. 27
0
    def setUpClass(cls):
        testClient = super(TestIpv6Network, cls).getClsTestClient()
        cls.services = testClient.getParsedTestDataConfig()
        cls.apiclient = testClient.getApiClient()
        cls.dbclient = testClient.getDbConnection()
        cls.test_ipv6_guestprefix = None
        cls.initial_ipv6_offering_enabled = None
        cls._cleanup = []
        cls.routerDetailsMap = {}

        cls.logger = logging.getLogger('TestIpv6Network')

        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.services['mode'] = cls.zone.networktype
        cls.ipv6NotSupported = False

        ipv6_guestprefix = cls.getGuestIpv6Prefix()
        if ipv6_guestprefix == None:
            cls.ipv6NotSupported = True
        if cls.ipv6NotSupported == False:
            ipv6_publiciprange = cls.getPublicIpv6Range()
            if ipv6_publiciprange == None:
                cls.ipv6NotSupported = True

        if cls.ipv6NotSupported == False:
            cls.initial_ipv6_offering_enabled = Configurations.list(
                cls.apiclient,
                name=ipv6_offering_config_name)[0].value
            Configurations.update(cls.apiclient,
                ipv6_offering_config_name,
                "true")
            cls.domain = get_domain(cls.apiclient)
            cls.account = Account.create(
                cls.apiclient,
                cls.services["account"],
                admin=True,
                domainid=cls.domain.id
            )
            cls._cleanup.append(cls.account)
            cls.hypervisor = testClient.getHypervisorInfo()
            cls.template = get_template(
                cls.apiclient,
                cls.zone.id,
                cls.services["ostype"]
            )
            if cls.hypervisor.lower() in ('xenserver'):
                # Default Xenserver template has IPv6 disabled
                cls.template = get_test_template(
                   cls.apiclient,
                   cls.zone.id,
                   cls.hypervisor)
        else:
            cls.debug("IPv6 is not supported, skipping tests!")
        return
Esempio n. 28
0
    def test_vm_ha(self):
        """Test VM HA

        # Validate the following:
        # VM started on other host in cluster
        """

        #wait for VM to HA
        ping_timeout = Configurations.list(self.apiclient, name="ping.timeout")
        ping_interval = Configurations.list(self.apiclient, name="ping.interval")
        total_duration = int(float(ping_timeout[0].value) * float(ping_interval[0].value))
        time.sleep(total_duration)

        duration = 0
        vm = None
        while duration < total_duration:
            list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
            self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty")
            vm = list_vms[0]
            if vm.hostid != self.virtual_machine.hostid and vm.state == "Running":
                break
            else:
                time.sleep(10)
                duration = duration + 10

        self.assertEqual(
            vm.id,
            self.virtual_machine.id,
            "VM ids do not match")
        self.assertEqual(
            vm.name,
            self.virtual_machine.name,
            "VM names do not match")
        self.assertEqual(
            vm.state,
            "Running",
            msg="VM is not in Running state")
        self.assertNotEqual(
            vm.hostid,
            self.virtual_machine.hostid,
            msg="VM is not started on another host as part of HA")
Esempio n. 29
0
    def test_vm_ha(self):
        """Test VM HA

        # Validate the following:
        # VM started on other host in cluster
        """

        #wait for VM to HA
        ping_timeout = Configurations.list(self.apiclient, name="ping.timeout")
        ping_interval = Configurations.list(self.apiclient, name="ping.interval")
        total_duration = int(float(ping_timeout[0].value) * float(ping_interval[0].value))
        time.sleep(total_duration)

        duration = 0
        vm = None
        while duration < total_duration:
            list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
            self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty")
            vm = list_vms[0]
            if vm.hostid != self.virtual_machine.hostid and vm.state == "Running":
                break
            else:
                time.sleep(10)
                duration = duration + 10

        self.assertEqual(
            vm.id,
            self.virtual_machine.id,
            "VM ids do not match")
        self.assertEqual(
            vm.name,
            self.virtual_machine.name,
            "VM names do not match")
        self.assertEqual(
            vm.state,
            "Running",
            msg="VM is not in Running state")
        self.assertNotEqual(
            vm.hostid,
            self.virtual_machine.hostid,
            msg="VM is not started on another host as part of HA")
Esempio n. 30
0
    def get_lb_stats_settings(self):
        self.logger.debug("Retrieving haproxy stats settings")
        settings = {}
        try:
            settings["stats_port"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
            settings["stats_uri"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
            # Update global setting network.loadbalancer.haproxy.stats.auth to a known value
            haproxy_auth = "admin:password"
            Configurations.update(self.apiclient, "network.loadbalancer.haproxy.stats.auth", haproxy_auth)
            self.logger.debug(
                "Updated global setting stats network.loadbalancer.haproxy.stats.auth to %s" % (haproxy_auth))
            settings["username"], settings["password"] = haproxy_auth.split(":")
            settings["visibility"] = Configurations.list(
                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
            self.logger.debug(settings)
        except Exception as e:
            self.fail("Failed to retrieve stats settings " % e)

        return settings
Esempio n. 31
0
    def setUpClass(cls):
        testClient = super(TestVerifyEventsTable, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()

        cls.hypervisor = cls.testClient.getHypervisorInfo()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())

        cls.template = get_template(cls.apiclient, cls.zone.id,
                                    cls.testdata["ostype"])

        cls._cleanup = []

        try:

            cls.unsupportedHypervisor = False
            if cls.hypervisor.lower() in ['hyperv', 'lxc', 'kvm']:
                if cls.hypervisor.lower() == 'kvm':
                    configs = Configurations.list(cls.apiclient,
                                                  name='kvm.snapshot.enabled')

                    if configs[0].value == "false":
                        cls.unsupportedHypervisor = True
                else:
                    cls.unsupportedHypervisor = True

                return
            # Create an account
            cls.account = Account.create(cls.apiclient,
                                         cls.testdata["account"],
                                         domainid=cls.domain.id)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name, DomainName=cls.account.domain)
            # Create Service offering
            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
            )

            cls._cleanup = [
                cls.account,
                cls.service_offering,
            ]
        except Exception as e:
            cls.tearDownClass()
            raise e
        return
Esempio n. 32
0
    def test_05_rvpc_multi_tiers(self):
        """ Create a redundant VPC with 3 Tiers, 3 VMs, 3 PF rules"""
        self.logger.debug("Starting test_05_rvpc_multi_tiers")
        self.query_routers()

        network_to_delete_1 = self.create_network(self.services["network_offering"], "10.1.1.1", nr_vms=1, mark_net_cleanup=False)
        self.networks.append(network_to_delete_1)
        self.networks.append(self.create_network(self.services["network_offering_no_lb"], "10.1.2.1", nr_vms=1))
        network_to_delete_2 = self.create_network(self.services["network_offering_no_lb"], "10.1.3.1", nr_vms=1, mark_net_cleanup=False)
        self.networks.append(network_to_delete_2)
        
        self.check_routers_state()
        self.add_nat_rules()
        self.do_vpc_test(False)

        self.destroy_vm(network_to_delete_1)
        network_to_delete_1.get_net().delete(self.apiclient)
        self.networks.remove(network_to_delete_1)

        vrrp_interval = Configurations.list(self.apiclient, name="router.redundant.vrrp.interval")
        
        self.logger.debug("router.redundant.vrrp.interval is ==> %s" % vrrp_interval)

        total_sleep = 10
        if vrrp_interval:
            total_sleep = int(vrrp_interval[0].value) * 4
        else:
            self.logger.debug("Could not retrieve the key 'router.redundant.vrrp.interval'. Sleeping for 10 seconds.")
            
        '''
        Sleep (router.redundant.vrrp.interval * 4) seconds here because since we are removing the first tier (NIC) the VRRP will have to reconfigure the interface it uses.
        Due to the configuration changes, it will start a new election and it might take up to 4 seconds, because each router has an
        advertisement interval of 2 seconds.
        '''
        time.sleep(total_sleep)
        self.check_routers_state(status_to_check="MASTER")
        self.do_vpc_test(False)

        self.destroy_vm(network_to_delete_2)
        network_to_delete_2.get_net().delete(self.apiclient)
        self.networks.remove(network_to_delete_2)

        '''
        Let's be sure and sleep for 'total_sleep' seconds because removing/adding an interface will restart keepalived.
        It restarts it because the keepalived configuration file changes in order to have the virtual_ipaddress section updated. 
        '''
        time.sleep(total_sleep)
        self.check_routers_state(status_to_check="MASTER")
        self.do_vpc_test(False)
Esempio n. 33
0
    def test_05_rvpc_multi_tiers(self):
        """ Create a redundant VPC with 3 Tiers, 3 VMs, 3 PF rules"""
        self.logger.debug("Starting test_05_rvpc_multi_tiers")
        self.query_routers()

        network_to_delete_1 = self.create_network(self.services["network_offering"], "10.1.1.1", nr_vms=1, mark_net_cleanup=False)
        self.networks.append(network_to_delete_1)
        self.networks.append(self.create_network(self.services["network_offering_no_lb"], "10.1.2.1", nr_vms=1))
        network_to_delete_2 = self.create_network(self.services["network_offering_no_lb"], "10.1.3.1", nr_vms=1, mark_net_cleanup=False)
        self.networks.append(network_to_delete_2)

        self.check_routers_state()
        self.add_nat_rules()
        self.do_vpc_test(False)

        self.destroy_vm(network_to_delete_1)
        network_to_delete_1.get_net().delete(self.apiclient)
        self.networks.remove(network_to_delete_1)

        vrrp_interval = Configurations.list(self.apiclient, name="router.redundant.vrrp.interval")

        self.logger.debug("router.redundant.vrrp.interval is ==> %s" % vrrp_interval)

        total_sleep = 10
        if vrrp_interval:
            total_sleep = int(vrrp_interval[0].value) * 4
        else:
            self.logger.debug("Could not retrieve the key 'router.redundant.vrrp.interval'. Sleeping for 10 seconds.")

        '''
        Sleep (router.redundant.vrrp.interval * 4) seconds here because since we are removing the first tier (NIC) the VRRP will have to reconfigure the interface it uses.
        Due to the configuration changes, it will start a new election and it might take up to 4 seconds, because each router has an
        advertisement interval of 2 seconds.
        '''
        time.sleep(total_sleep)
        self.check_routers_state(status_to_check="MASTER")
        self.do_vpc_test(False)

        self.destroy_vm(network_to_delete_2)
        network_to_delete_2.get_net().delete(self.apiclient)
        self.networks.remove(network_to_delete_2)

        '''
        Let's be sure and sleep for 'total_sleep' seconds because removing/adding an interface will restart keepalived.
        It restarts it because the keepalived configuration file changes in order to have the virtual_ipaddress section updated.
        '''
        time.sleep(total_sleep)
        self.check_routers_state(status_to_check="MASTER")
        self.do_vpc_test(False)
    def test_vms_with_same_name(self):
        """ Test vm deployment with same name

        # 1. Deploy a VM on with perticular name from account_1
        # 2. Try to deploy another vm with same name from account_2
        # 3. Verify that second VM deployment fails

        """
        # Step 1
        # Create VM on cluster wide
        configs = Configurations.list(
            self.apiclient,
            name="vm.instancename.flag")
        orig_value = configs[0].value

        if orig_value == "false":
            Configurations.update(self.apiclient,
                                  name="vm.instancename.flag",
                                  value="true"
                                  )

            # Restart management server
            self.RestartServer()
            time.sleep(120)

        self.testdata["small"]["displayname"]="TestName"
        self.testdata["small"]["name"]="TestName"
        VirtualMachine.create(
            self.userapiclient_1,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account_1.name,
            domainid=self.account_1.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient_2,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account_2.name,
                domainid=self.account_2.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
        return
Esempio n. 35
0
    def test_maxAccountNetworks(self):
        """Test Limit number of guest account specific networks
        """

        # Steps for validation
        # 1. Fetch max.account.networks from configurations
        # 2. Create an account. Create account more that max.accout.network
        # 3. Create network should fail

        self.debug("Creating project with '%s' as admin" % self.account.name)
        # Create project as a domain admin
        project = Project.create(
            self.apiclient, self.services["project"], account=self.account.name, domainid=self.account.domainid
        )
        # Cleanup created project at end of test
        self.cleanup.append(project)
        self.debug("Created project with domain admin with ID: %s" % project.id)

        config = Configurations.list(self.apiclient, name="max.project.networks", listall=True)
        self.assertEqual(isinstance(config, list), True, "List configurations hsould have max.project.networks")

        config_value = int(config[0].value)
        self.debug("max.project.networks: %s" % config_value)

        for ctr in range(config_value):
            # Creating network using the network offering created
            self.debug("Creating network with network offering: %s" % self.network_offering.id)
            network = Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id,
            )
            self.debug("Created network with ID: %s" % network.id)
        self.debug("Creating network in account already having networks : %s" % config_value)

        with self.assertRaises(Exception):
            Network.create(
                self.apiclient,
                self.services["network"],
                projectid=project.id,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id,
            )
        self.debug("Create network failed (as expected)")
        return
Esempio n. 36
0
    def test_vms_with_same_name(self):
        """ Test vm deployment with same name

        # 1. Deploy a VM on with perticular name from account_1
        # 2. Try to deploy another vm with same name from account_2
        # 3. Verify that second VM deployment fails

        """
        # Step 1
        # Create VM on cluster wide
        configs = Configurations.list(self.apiclient,
                                      name="vm.instancename.flag")
        orig_value = configs[0].value

        if orig_value == "false":
            Configurations.update(self.apiclient,
                                  name="vm.instancename.flag",
                                  value="true")

            # Restart management server
            self.RestartServer()
            time.sleep(120)

        self.testdata["small"]["displayname"] = "TestName"
        self.testdata["small"]["name"] = "TestName"
        VirtualMachine.create(
            self.userapiclient_1,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account_1.name,
            domainid=self.account_1.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient_2,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account_2.name,
                domainid=self.account_2.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )
        return
Esempio n. 37
0
    def test_vm_sync(self):
        """Test VM Sync

        # Validate the following:
        # vm1 should be running, vm2 should be stopped as power report says PowerOff, vm3 should be stopped as missing from power report
        """

        # wait for vmsync to happen
        ping_interval = Configurations.list(self.apiclient, name="ping.interval")
        total_duration = int(float(ping_interval[0].value) * 3.2)
        time.sleep(total_duration)

        list_vms = VirtualMachine.list(self.apiclient, ids=[self.vm1.id, self.vm2.id, self.vm3.id], listAll=True)
        self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 3, msg="List VM response is empty")
        for vm in list_vms:
            if vm.id == self.vm1.id:
                self.assertTrue(vm.state == "Running", msg="VM {0} is expected to be in running state".format(vm.name))
            elif vm.id == self.vm2.id or vm.id == self.vm3.id:
                self.assertTrue(vm.state == "Stopped", msg="VM {0} is expected to be in stopped state".format(vm.name))
    def setUpClass(cls):
        cls.testClient = super(TestKubernetesSupportedVersion, cls).getClsTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
        cls.kubernetes_version_iso_url = 'http://download.cloudstack.org/cks/setup-1.16.3.iso'

        cls.initial_configuration_cks_enabled = Configurations.list(cls.apiclient,
                                                                    name="cloud.kubernetes.service.enabled")[0].value
        if cls.initial_configuration_cks_enabled not in ["true", True]:
            cls.debug("Enabling CloudStack Kubernetes Service plugin and restarting management server")
            Configurations.update(cls.apiclient,
                                  "cloud.kubernetes.service.enabled",
                                  "true")
            cls.restartServer()

        cls._cleanup = []
        return
    def test_02_concurrent_snapshots_configuration(self):
        """Concurrent Snapshots
            1. Verify that CreateSnapshot command fails when it
                takes more time than job.expire.minute
            2. Verify that snapshot creation fails if CreateSnapshot command
                takes more time than concurrent.snapshots.threshold.perhost
            3. Check the event generation when snapshot creation
                fails if CreateSnapshot takes more time than
                concurrent.snapshots.threshold.perhost

        """

        # Step 1
        if not self.testdata["configurableData"][
                "restartManagementServerThroughTestCase"]:
            self.skipTest(
                "Skip test if restartManagementServerThroughTestCase \
                        is not provided")

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")
        orig_expire = configs[0].value

        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="2"
                              )

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value="1"
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        try:
            thread_pool = []
            for i in range(4):
                create_snapshot_thread_1 = Thread(
                    target=CreateSnapshot,
                    args=(
                        self,
                        self.root_pool[i],
                        False))
                thread_pool.append(create_snapshot_thread_1)

            for thread in thread_pool:
                thread.start()

            for thread in thread_pool:
                thread.join()

        except Exception as e:
            raise Exception(
                "Warning: Exception unable to start thread : %s" %
                e)

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value=orig_expire
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        # Step 2
        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value="1"
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], False)

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value=orig_expire
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        # Step 3
        configs = Configurations.list(
            self.apiclient,
            name="job.cancel.threshold.minutes")
        orig_cancel = configs[0].value

        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value="1"
                              )

        self.RestartServer()
        time.sleep(120)

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], False)

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value=orig_cancel
                              )

        self.RestartServer()
        time.sleep(120)
        # Step 4
        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        self.RestartServer()
        time.sleep(120)

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value="1"
                              )

        self.RestartServer()
        time.sleep(120)

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], True)

        return
    def test_01_concurrent_snapshot_global_limit(self):
        """ Test if global value concurrent.snapshots.threshold.perhost
            value respected
            This is positive test cases and tests if we are able to create
            as many snapshots mentioned in global value
        # 1. Create an account and a VM in it
        # 2. Read the global value for concurrent.snapshots.threshold.perhost
        # 3. If the value is Null, create at least 10 concurrent snapshots
             and verify they are created successfully
        # 4. Else, create as many snapshots specified in the global value, and
             verify they are created successfully
        """

        # Create an account
        account = Account.create(
            self.apiclient,
            self.testdata["account"],
            domainid=self.domain.id
        )

        self.cleanup.append(account)
        # Create user api client of the account
        userapiclient = self.testClient.getUserApiClient(
            UserName=account.name,
            DomainName=account.domain
        )

        # Create VM
        virtual_machine = VirtualMachine.create(
            userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=account.name,
            domainid=account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id
        )

        # Create 10 concurrent snapshots by default
        # We can have any value, so keeping it 10 as it
        # seems good enough to test
        concurrentSnapshots = 10

        # Step 1
        # Get ROOT Volume Id
        volumes = Volume.list(
            self.apiclient,
            virtualmachineid=virtual_machine.id,
            type='ROOT',
            listall=True
        )

        self.assertEqual(validateList(volumes)[0], PASS,
                         "Volumes list validation failed")

        root_volume = volumes[0]

        config = Configurations.list(
            self.apiclient,
            name="concurrent.snapshots.threshold.perhost"
        )
        self.assertEqual(
            isinstance(
                config,
                list),
            True,
            "concurrent.snapshots.threshold.perhost should be present\
                    in global config")
        if config[0].value:
            concurrentSnapshots = int(config[0].value)
        self.debug("concurrent Snapshots: %s" % concurrentSnapshots)

        threads = []
        for i in range(0, (concurrentSnapshots)):
            thread = Thread(
                target=Snapshot.create,
                args=(
                    self.apiclient,
                    root_volume.id
                ))
            threads.append(thread)
            thread.start()
        for thread in threads:
            thread.join()

        snapshots = Snapshot.list(self.apiclient,
                                  volumeid=root_volume.id,
                                  listall=True)

        self.assertEqual(validateList(snapshots)[0], PASS,
                         "Snapshots list validation failed")
        self.assertEqual(
            len(snapshots),
            concurrentSnapshots,
            "There should be exactly %s snapshots present" %
            concurrentSnapshots)

        for snapshot in snapshots:
            self.assertEqual(str(snapshot.state).lower(), BACKED_UP,
                             "Snapshot state should be backedUp but it is\
                            %s" % snapshot.state)
        return
 def is_ssh_enabled(cls):
     conf = Configurations.list(cls.apiclient, name="kvm.ssh.to.agent")
     if not conf:
         return False
     else:
         return bool(strtobool(conf[0].value)) if conf[0].value else False
    def test_01_VRServiceFailureAlerting(self):

        if self.zone.networktype == "Basic":
            list_router_response = list_routers(
                self.apiclient,
                listall="true"
            )
        else:
            list_router_response = list_routers(
                self.apiclient,
                account=self.account.name,
                domainid=self.account.domainid
            )
        self.assertEqual(
            isinstance(list_router_response, list),
            True,
            "Check list response returns a valid list"
        )
        router = list_router_response[0]

        self.debug("Router ID: %s, state: %s" % (router.id, router.state))

        self.assertEqual(
            router.state,
            'Running',
            "Check list router response for router state"
        )

        alertSubject = "Monitoring Service on VR " + router.name

        if self.hypervisor.lower() in ('vmware', 'hyperv'):
            result = get_process_status(
                self.apiclient.connection.mgtSvr,
                22,
                self.apiclient.connection.user,
                self.apiclient.connection.passwd,
                router.linklocalip,
                "service dnsmasq stop",
                hypervisor=self.hypervisor
            )
        else:
            try:
                hosts = list_hosts(
                    self.apiclient,
                    zoneid=router.zoneid,
                    type='Routing',
                    state='Up',
                    id=router.hostid
                )

                self.assertEqual(
                    isinstance(hosts, list),
                    True,
                    "Check list host returns a valid list"
                )

                host = hosts[0]
                result = get_process_status(
                    host.ipaddress,
                    22,
                    self.services["configurableData"]["host"]["username"],
                    self.services["configurableData"]["host"]["password"],
                    router.linklocalip,
                    "service apache2 stop"
                )

            except Exception as e:
                raise Exception("Exception raised in getting host\
                        credentials: %s " % e)

        res = str(result)
        self.debug("apache process status: %s" % res)

        configs = Configurations.list(
                self.apiclient,
                name='router.alerts.check.interval'
            )

        # Set the value for one more minute than
        # actual range to be on safer side
        waitingPeriod = (
                int(configs[0].value) + 600)  # in seconds

        time.sleep(waitingPeriod)
        # wait for (router.alerts.check.interval + 10) minutes meanwhile monitor service on
        # VR starts the apache service (
        # router.alerts.check.interval default value is
        # 30minutes)

        qresultset = self.dbclient.execute(
            "select id from alert where subject = '%s' ORDER BY id DESC LIMIT 1;" %
            str(alertSubject))
        self.assertNotEqual(
            len(qresultset),
            0,
            "Check DB Query result set"
        )
        return
Esempio n. 43
0
    def check_routers_state(self,
                            count=2,
                            status_to_check="MASTER",
                            expected_count=1,
                            showall=False):
        vals = ["MASTER", "BACKUP", "UNKNOWN", "TESTFAILED"]
        cnts = [0, 0, 0]

        result = "TESTFAILED"
        self.logger.debug(
            'check_routers_state count: %s, status_to_check: %s, expected_count: %s, showall: %s'
            % (count, status_to_check, expected_count, showall))

        vrrp_interval = Configurations.list(
            self.apiclient, name="router.redundant.vrrp.interval")
        self.logger.debug("router.redundant.vrrp.interval is ==> %s" %
                          vrrp_interval)

        total_sleep = 20
        if vrrp_interval:
            total_sleep = (int(vrrp_interval[0].value) * 4) + 10
        else:
            self.logger.debug(
                "Could not retrieve the key 'router.redundant.vrrp.interval'. Sleeping for 10 seconds."
            )
        '''
        Sleep (router.redundant.vrrp.interval * 4) seconds here because VRRP will have to be reconfigured. Due to the configuration changes,
        it will start a new election and that will take ~4 multiplied by the advertisement interval seconds. Next to that, we need some time
        for the router to be reconfigured, so adding 10 seconds to be on the safe side.
        '''
        time.sleep(total_sleep)

        self.query_routers(count, showall)
        for router in self.routers:
            if router.state == "Running":
                hosts = list_hosts(self.apiclient,
                                   zoneid=router.zoneid,
                                   type='Routing',
                                   state='Up',
                                   id=router.hostid)
                self.assertEqual(isinstance(hosts, list), True,
                                 "Check list host returns a valid list")
                host = hosts[0]

                try:
                    for _ in range(5):
                        host.user, host.passwd = get_host_credentials(
                            self.config, host.ipaddress)
                        result = str(
                            get_process_status(
                                host.ipaddress, 22, host.user, host.passwd,
                                router.linklocalip,
                                "sh /opt/cosmic/router/scripts/checkrouter.sh "
                            ))

                        self.logger.debug(
                            'check_routers_state router: %s, result: %s' %
                            (router.name, result))

                        if result.count(status_to_check) == 1:
                            cnts[vals.index(status_to_check)] += 1
                            break
                        elif result.count("UNKNOWN") == 1:
                            time.sleep(5)
                        else:
                            break

                except KeyError:
                    self.skipTest(
                        "Marvin configuration has no host credentials to\
                                check router services")

        if cnts[vals.index(status_to_check)] != expected_count:
            self.fail(
                "Expected '%s' router[s] at state '%s', but found '%s'! Result: %s"
                % (expected_count, status_to_check,
                   cnts[vals.index(status_to_check)], result))
    def test_nested_virtualization_vmware(self):
        """Test nested virtualization on Vmware hypervisor"""
        if self.hypervisor.lower() not in ["vmware"]:
             self.skipTest("Skipping test because suitable hypervisor/host not present")
             
        # 1) Update nested virtualization configurations, if needed
        configs = Configurations.list(self.apiclient, name="vmware.nested.virtualization")
        rollback_nv = False
        rollback_nv_per_vm = False
        for conf in configs:
            if (conf.name == "vmware.nested.virtualization" and conf.value == "false"):
                config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "true")
                self.logger.debug("Updated global setting vmware.nested.virtualization to true")
                rollback_nv = True
            elif (conf.name == "vmware.nested.virtualization.perVM" and conf.value == "false"):
                config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization.perVM", "true")
                self.logger.debug("Updated global setting vmware.nested.virtualization.perVM to true")
                rollback_nv_per_vm = True
                
        # 2) Deploy a vm
        virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["small"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            mode=self.services['mode']
        )
        self.assert_(virtual_machine is not None, "VM failed to deploy")
        self.assert_(virtual_machine.state == 'Running', "VM is not running")
        self.logger.debug("Deployed vm: %s" % virtual_machine.id)
        
        isolated_network = Network.create(
            self.apiclient,
            self.services["isolated_network"],
            self.account.name,
            self.account.domainid,
            networkofferingid=self.isolated_network_offering.id)

        virtual_machine.add_nic(self.apiclient, isolated_network.id)
        
        # 3) SSH into vm
        ssh_client = virtual_machine.get_ssh_client()

        if ssh_client:
            # run ping test
            result = ssh_client.execute("cat /proc/cpuinfo | grep flags")
            self.logger.debug(result)
        else:
            self.fail("Failed to setup ssh connection to %s" % virtual_machine.public_ip)
            
        # 4) Revert configurations, if needed
        if rollback_nv:
            config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "false")
            self.logger.debug("Reverted global setting vmware.nested.virtualization back to false")
        if rollback_nv_per_vm:
            config_update = Configurations.update(self.apiclient, "vmware.nested.virtualization", "false")
            self.logger.debug("Reverted global setting vmware.nested.virtualization.perVM back to false")
            
        #5) Check for CPU flags: vmx for Intel and svm for AMD indicates nested virtualization is enabled
        self.assert_(result is not None, "Empty result for CPU flags")
        res = str(result)
        self.assertTrue('vmx' in res or 'svm' in res)
    def test_network_gc(self):
        """Test network garbage collection with RVR
        """

        # Steps to validate
        # 1. createNetwork using network offering for redundant virtual router
        # 2. listRouters in above network
        # 3. deployVM in above user account in the created network
        # 4. stop the running user VM
        # 5. wait for network.gc time
        # 6. listRouters
        # 7. start the routers MASTER and BACK
        # 8. wait for network.gc time and listRouters
        # 9. delete the account

        # Creating network using the network offering created
        self.debug("Creating network with network offering: %s" %
                                                    self.network_offering.id)
        network = Network.create(
                                self.apiclient,
                                self.services["network"],
                                accountid=self.account.name,
                                domainid=self.account.domainid,
                                networkofferingid=self.network_offering.id,
                                zoneid=self.zone.id
                                )
        self.debug("Created network with ID: %s" % network.id)

        networks = Network.list(
                                self.apiclient,
                                id=network.id,
                                listall=True
                                )
        self.assertEqual(
            isinstance(networks, list),
            True,
            "List networks should return a valid response for created network"
             )
        nw_response = networks[0]

        self.debug("Network state: %s" % nw_response.state)
        self.assertEqual(
                    nw_response.state,
                    "Allocated",
                    "The network should be in allocated state after creation"
                    )

        self.debug("Listing routers for network: %s" % network.name)
        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
            routers,
            None,
            "Routers should not be spawned when network is in allocated state"
            )

        self.debug("Deploying VM in account: %s" % self.account.name)

        # Spawn an instance in that network
        virtual_machine = VirtualMachine.create(
                                  self.apiclient,
                                  self.services["virtual_machine"],
                                  accountid=self.account.name,
                                  domainid=self.account.domainid,
                                  serviceofferingid=self.service_offering.id,
                                  networkids=[str(network.id)]
                                  )
        self.debug("Deployed VM in network: %s" % network.id)

        vms = VirtualMachine.list(
                                  self.apiclient,
                                  id=virtual_machine.id,
                                  listall=True
                                  )
        self.assertEqual(
                         isinstance(vms, list),
                         True,
                         "List Vms should return a valid list"
                         )
        vm = vms[0]
        self.assertEqual(
                         vm.state,
                         "Running",
                         "Vm should be in running state after deployment"
                         )

        self.debug("Listing routers for network: %s" % network.name)
        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        self.assertEqual(
                    len(routers),
                    2,
                    "Length of the list router should be 2 (Backup & master)"
                    )

        self.debug("Stopping the user VM: %s" % virtual_machine.name)

        try:
            virtual_machine.stop(self.apiclient)
        except Exception as e:
            self.fail("Failed to stop guest Vm: %s - %s" %
                                            (virtual_machine.name, e))

        interval = Configurations.list(
                                    self.apiclient,
                                    name='network.gc.interval'
                                    )
        delay = int(interval[0].value)
        interval = Configurations.list(
                                    self.apiclient,
                                    name='network.gc.wait'
                                    )
        exp = int(interval[0].value)

        self.debug("Sleeping for network gc wait + interval time")
        # Sleep to ensure that all resources are deleted
        time.sleep((delay + exp) * 2)

        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        for router in routers:
            self.assertEqual(
                             router.state,
                             "Stopped",
                             "Router should be in stopped state"
                             )
            self.debug("Starting the stopped router again")
            cmd = startRouter.startRouterCmd()
            cmd.id = router.id
            self.apiclient.startRouter(cmd)

        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        for router in routers:
            self.assertEqual(
                             router.state,
                             "Running",
                             "Router should be in running state"
                             )

        self.debug("Sleeping for network gc wait + interval time")
        # Sleep to ensure that all resources are deleted
        time.sleep((delay + exp) * 3)

        routers = Router.list(
                              self.apiclient,
                              networkid=network.id,
                              listall=True
                              )
        self.assertEqual(
                    isinstance(routers, list),
                    True,
                    "list router should return Master and backup routers"
                    )
        for router in routers:
            self.assertEqual(
                             router.state,
                             "Stopped",
                             "Router should be in stopped state"
                             )
        return
Esempio n. 46
0
    def setUpClass(cls):
        cls.testClient = super(TestKubernetesCluster, cls).getClsTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
        cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower(
        )

        cls.setup_failed = False

        cls.initial_configuration_cks_enabled = Configurations.list(
            cls.apiclient, name="cloud.kubernetes.service.enabled")[0].value
        if cls.initial_configuration_cks_enabled not in ["true", True]:
            cls.debug(
                "Enabling CloudStack Kubernetes Service plugin and restarting management server"
            )
            Configurations.update(cls.apiclient,
                                  "cloud.kubernetes.service.enabled", "true")
            cls.restartServer()

        cls.cks_template = None
        cls.initial_configuration_cks_template_name = None
        cls.cks_service_offering = None

        cls.kubernetes_version_ids = []
        if cls.setup_failed == False:
            try:
                cls.kuberetes_version_1 = cls.addKubernetesSupportedVersion(
                    '1.14.9',
                    'http://staging.yadav.xyz/cks/binaries-iso/setup-1.14.9.iso'
                )
                cls.kubernetes_version_ids.append(cls.kuberetes_version_1.id)
            except Exception as e:
                cls.setup_failed = True
                cls.debug(
                    "Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.14.9.iso, %s"
                    % e)
        if cls.setup_failed == False:
            try:
                cls.kuberetes_version_2 = cls.addKubernetesSupportedVersion(
                    '1.15.0',
                    'http://staging.yadav.xyz/cks/binaries-iso/setup-1.15.0.iso'
                )
                cls.kubernetes_version_ids.append(cls.kuberetes_version_2.id)
            except Exception as e:
                cls.setup_failed = True
                cls.debug(
                    "Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.15.0.iso, %s"
                    % e)
        if cls.setup_failed == False:
            try:
                cls.kuberetes_version_3 = cls.addKubernetesSupportedVersion(
                    '1.16.0',
                    'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.0.iso'
                )
                cls.kubernetes_version_ids.append(cls.kuberetes_version_3.id)
            except Exception as e:
                cls.setup_failed = True
                cls.debug(
                    "Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.0.is, %s"
                    % e)
        if cls.setup_failed == False:
            try:
                cls.kuberetes_version_4 = cls.addKubernetesSupportedVersion(
                    '1.16.3',
                    'http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.iso'
                )
                cls.kubernetes_version_ids.append(cls.kuberetes_version_4.id)
            except Exception as e:
                cls.setup_failed = True
                cls.debug(
                    "Failed to get Kubernetes version ISO in ready state, http://staging.yadav.xyz/cks/binaries-iso/setup-1.16.3.is, %s"
                    % e)

        cks_template_data = {
            "name": "Kubernetes-Service-Template",
            "displaytext": "Kubernetes-Service-Template",
            "format": "qcow2",
            "hypervisor": "kvm",
            "ostype": "CoreOS",
            "url":
            "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-kvm.qcow2.bz2",
            "ispublic": "True",
            "isextractable": "True"
        }
        # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2"
        cks_template_data_details = []
        if cls.hypervisor.lower() == "vmware":
            cks_template_data[
                "url"] = "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-vmware.ova"  # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova"
            cks_template_data["format"] = "OVA"
            cks_template_data_details = [{
                "keyboard": "us",
                "nicAdapter": "Vmxnet3",
                "rootDiskController": "pvscsi"
            }]
        elif cls.hypervisor.lower() == "xenserver":
            cks_template_data[
                "url"] = "http://staging.yadav.xyz/cks/templates/coreos_production_cloudstack_image-xen.vhd.bz2"  # "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2"
            cks_template_data["format"] = "VHD"
        elif cls.hypervisor.lower() == "kvm":
            cks_template_data["requireshvm"] = "True"
        if cls.setup_failed == False:
            cls.cks_template = Template.register(
                cls.apiclient,
                cks_template_data,
                zoneid=cls.zone.id,
                hypervisor=cls.hypervisor,
                details=cks_template_data_details)
            cls.debug("Waiting for CKS template with ID %s to be ready" %
                      cls.cks_template.id)
            try:
                cls.waitForTemplateReadyState(cls.cks_template.id)
            except Exception as e:
                cls.setup_failed = True
                cls.debug(
                    "Failed to get CKS template in ready state, {}, {}".format(
                        cks_template_data["url"], e))

            cls.initial_configuration_cks_template_name = Configurations.list(
                cls.apiclient, name=cls.cks_template_name_key)[0].value
            Configurations.update(cls.apiclient, cls.cks_template_name_key,
                                  cls.cks_template.name)

        cks_offering_data = {
            "name": "CKS-Instance",
            "displaytext": "CKS Instance",
            "cpunumber": 2,
            "cpuspeed": 1000,
            "memory": 2048,
        }
        cks_offering_data[
            "name"] = cks_offering_data["name"] + '-' + random_gen()
        if cls.setup_failed == False:
            cls.cks_service_offering = ServiceOffering.create(
                cls.apiclient, cks_offering_data)

        cls._cleanup = []
        if cls.cks_template != None:
            cls._cleanup.append(cls.cks_template)
        if cls.cks_service_offering != None:
            cls._cleanup.append(cls.cks_service_offering)
        return
 def test01_template_download_URL_expire(self):
     """
     @Desc:Template files are deleted from secondary storage after download URL expires
     Step1:Deploy vm with default cent os template
     Step2:Stop the vm
     Step3:Create template from the vm's root volume
     Step4:Extract Template and wait for the download url to expire
     Step5:Deploy another vm with the template created at Step3
     Step6:Verify that vm deployment succeeds
     """
     params = [
         'extract.url.expiration.interval', 'extract.url.cleanup.interval'
     ]
     wait_time = 0
     for param in params:
         config = Configurations.list(
             self.apiClient,
             name=param,
         )
         self.assertEqual(
             validateList(config)[0], PASS,
             "Config list returned invalid response")
         wait_time = wait_time + int(config[0].value)
     self.debug("Total wait time for url expiry: %s" % wait_time)
     # Creating Virtual Machine
     self.virtual_machine = VirtualMachine.create(
         self.userapiclient,
         self.services["virtual_machine"],
         accountid=self.account.name,
         domainid=self.account.domainid,
         serviceofferingid=self.service_offering.id,
     )
     self.assertIsNotNone(self.virtual_machine,
                          "Virtual Machine creation failed")
     self.cleanup.append(self.virtual_machine)
     #Stop virtual machine
     self.virtual_machine.stop(self.userapiclient)
     list_volume = Volume.list(self.userapiclient,
                               virtualmachineid=self.virtual_machine.id,
                               type='ROOT',
                               listall=True)
     self.assertEqual(
         validateList(list_volume)[0], PASS,
         "list volumes with type ROOT returned invalid list")
     self.volume = list_volume[0]
     self.create_template = Template.create(self.userapiclient,
                                            self.services["template"],
                                            volumeid=self.volume.id,
                                            account=self.account.name,
                                            domainid=self.account.domainid)
     self.assertIsNotNone(self.create_template,
                          "Failed to create template from root volume")
     self.cleanup.append(self.create_template)
     """
     Extract template
     """
     try:
         Template.extract(self.userapiclient, self.create_template.id,
                          'HTTP_DOWNLOAD', self.zone.id)
     except Exception as e:
         self.fail("Extract template failed with error %s" % e)
     self.debug("Waiting for %s seconds for url to expire" %
                repr(wait_time + 20))
     time.sleep(wait_time + 20)
     self.debug("Waited for %s seconds for url to expire" %
                repr(wait_time + 20))
     """
     Deploy vm with the template created from the volume. After url expiration interval only
     url should be deleted not the template. To validate this deploy vm with the template
     """
     try:
         self.vm = VirtualMachine.create(
             self.userapiclient,
             self.services["virtual_machine"],
             accountid=self.account.name,
             domainid=self.account.domainid,
             serviceofferingid=self.service_offering.id,
             templateid=self.create_template.id)
         self.cleanup.append(self.vm)
     except Exception as e:
         self.fail("Template is automatically deleted after URL expired.\
                   So vm deployment failed with error: %s" % e)
     return
Esempio n. 48
0
    def test_deployVmWithCustomDisk(self):
        """Test custom disk sizes beyond range
        """
        # Steps for validation
        # 1. listConfigurations - custom.diskoffering.size.min
        #    and custom.diskoffering.size.max
        # 2. deployVm with custom disk offering size < min
        # 3. deployVm with custom disk offering min< size < max
        # 4. deployVm with custom disk offering size > max
        # Validate the following
        # 2. and 4. of deploy VM should fail.
        #    Only case 3. should succeed.
        #    cleanup all created data disks from the account

        config = Configurations.list(
            self.apiclient,
            name="custom.diskoffering.size.min"
        )
        self.assertEqual(
            isinstance(config, list),
            True,
            "custom.diskoffering.size.min should be present in global config"
        )
        # minimum size of custom disk (in GBs)
        min_size = int(config[0].value)
        self.debug("custom.diskoffering.size.min: %s" % min_size)

        config = Configurations.list(
            self.apiclient,
            name="custom.diskoffering.size.max"
        )
        self.assertEqual(
            isinstance(config, list),
            True,
            "custom.diskoffering.size.min should be present in global config"
        )
        # maximum size of custom disk (in GBs)
        max_size = int(config[0].value)
        self.debug("custom.diskoffering.size.max: %s" % max_size)

        self.debug("Creating a volume with size less than min cust disk size")
        self.services["custom_volume"]["customdisksize"] = (min_size - 1)
        self.services["custom_volume"]["zoneid"] = self.zone.id
        with self.assertRaises(Exception):
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
        self.debug("Create volume failed!")

        self.debug("Creating a volume with size more than max cust disk size")
        self.services["custom_volume"]["customdisksize"] = (max_size + 1)
        with self.assertRaises(Exception):
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
        self.debug("Create volume failed!")

        self.debug("Creating a volume with size more than min cust disk " +
                   "but less than max cust disk size"
                   )
        self.services["custom_volume"]["customdisksize"] = (min_size + 1)
        try:
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
            self.debug("Create volume of cust disk size succeeded")
        except Exception as e:
            self.fail("Create volume failed with exception: %s" % e)
        return
    def test_nested_virtualization_vmware(self):
        """Test nested virtualization on Vmware hypervisor"""
        if self.hypervisor.lower() not in ["vmware"]:
            self.skipTest(
                "Skipping test because suitable hypervisor/host not present")

        # 1) Update nested virtualization configurations, if needed
        configs = Configurations.list(self.apiclient,
                                      name="vmware.nested.virtualization")
        rollback_nv = False
        rollback_nv_per_vm = False
        for conf in configs:
            if (conf.name == "vmware.nested.virtualization"
                    and conf.value == "false"):
                config_update = Configurations.update(
                    self.apiclient, "vmware.nested.virtualization", "true")
                self.logger.debug(
                    "Updated global setting vmware.nested.virtualization to true"
                )
                rollback_nv = True
            elif (conf.name == "vmware.nested.virtualization.perVM"
                  and conf.value == "false"):
                config_update = Configurations.update(
                    self.apiclient, "vmware.nested.virtualization.perVM",
                    "true")
                self.logger.debug(
                    "Updated global setting vmware.nested.virtualization.perVM to true"
                )
                rollback_nv_per_vm = True

        # 2) Deploy a vm
        virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["small"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            mode=self.services['mode'])
        self.assert_(virtual_machine is not None, "VM failed to deploy")
        self.assert_(virtual_machine.state == 'Running', "VM is not running")
        self.logger.debug("Deployed vm: %s" % virtual_machine.id)

        isolated_network = Network.create(
            self.apiclient,
            self.services["isolated_network"],
            self.account.name,
            self.account.domainid,
            networkofferingid=self.isolated_network_offering.id)

        virtual_machine.add_nic(self.apiclient, isolated_network.id)

        # 3) SSH into vm
        ssh_client = virtual_machine.get_ssh_client()

        if ssh_client:
            # run ping test
            result = ssh_client.execute("cat /proc/cpuinfo | grep flags")
            self.logger.debug(result)
        else:
            self.fail("Failed to setup ssh connection to %s" %
                      virtual_machine.public_ip)

        # 4) Revert configurations, if needed
        if rollback_nv:
            config_update = Configurations.update(
                self.apiclient, "vmware.nested.virtualization", "false")
            self.logger.debug(
                "Reverted global setting vmware.nested.virtualization back to false"
            )
        if rollback_nv_per_vm:
            config_update = Configurations.update(
                self.apiclient, "vmware.nested.virtualization", "false")
            self.logger.debug(
                "Reverted global setting vmware.nested.virtualization.perVM back to false"
            )

        #5) Check for CPU flags: vmx for Intel and svm for AMD indicates nested virtualization is enabled
        self.assert_(result is not None, "Empty result for CPU flags")
        res = str(result)
        self.assertTrue('vmx' in res or 'svm' in res)
    def setUpClass(cls):
        cls.testClient = super(TestHostHighAvailability, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()

        cls.services = Services().services
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())

        cls.template = get_template(
            cls.api_client,
            cls.zone.id,
            cls.services["ostype"]
        )
        cls.hypervisor = cls.testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ['lxc']:
            raise unittest.SkipTest("Template creation from root volume is not supported in LXC")


        clusterWithSufficientHosts = None
        clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)
        for cluster in clusters:
            cls.hosts = Host.list(cls.api_client, clusterid=cluster.id, type="Routing")
            if len(cls.hosts) >= 3:
                clusterWithSufficientHosts = cluster
                break

        if clusterWithSufficientHosts is None:
            raise unittest.SkipTest("No Cluster with 3 hosts found")

        configs = Configurations.list(
                                      cls.api_client,
                                      name='ha.tag'
                                      )

        assert isinstance(configs, list), "Config list not\
                retrieved for ha.tag"

        if configs[0].value != "ha":
            raise unittest.SkipTest("Please set the global config\
                    value for ha.tag as 'ha'")

        Host.update(cls.api_client, id=cls.hosts[2].id, hosttags="ha")

        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id

        cls.service_offering_with_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_with_ha"],
            offerha=True
        )

        cls.service_offering_without_ha = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering_without_ha"],
            offerha=False
        )

        cls._cleanup = [
            cls.service_offering_with_ha,
            cls.service_offering_without_ha,
        ]
        return
    def setUpClass(cls):
        testClient = super(TestDeltaSnapshots, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        cls.hypervisor = cls.testClient.getHypervisorInfo()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())

        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])

        cls.snapshots_created = []
        cls._cleanup = []

        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
        cls.skiptest = False

        if cls.hypervisor.lower() not in ["xenserver"]:
            cls.skiptest = True

        try:

            # Create an account
            cls.account = Account.create(
                cls.apiclient,
                cls.testdata["account"],
                domainid=cls.domain.id
            )
            cls._cleanup.append(cls.account)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name,
                DomainName=cls.account.domain
            )

            # Create Service offering
            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
            )
            cls._cleanup.append(cls.service_offering)

            if cls.testdata["configurableData"][
                    "restartManagementServerThroughTestCase"]:
                # Set snapshot delta max value
                Configurations.update(cls.apiclient,
                                      name="snapshot.delta.max",
                                      value="3"
                                      )

                # Restart management server
                cls.RestartServer()
                time.sleep(120)

            configs = Configurations.list(
                cls.apiclient,
                name="snapshot.delta.max")
            cls.delta_max = configs[0].value

            cls.vm = VirtualMachine.create(
                cls.apiclient,
                cls.testdata["small"],
                templateid=cls.template.id,
                accountid=cls.account.name,
                domainid=cls.account.domainid,
                serviceofferingid=cls.service_offering.id,
                zoneid=cls.zone.id,
                mode=cls.zone.networktype
            )

        except Exception as e:
            cls.tearDownClass()
            raise e
        return
    def test_02_concurrent_snapshot_global_limit(self):
        """ Test if global value concurrent.snapshots.threshold.perhost
            value is respected
            This is negative test cases and tests no more concurrent
            snapshots as specified in global value are created
        # 1. Read the global value for concurrent.snapshots.threshold.perhost
        # 2. If the value is Null, skip the test case
        # 3. Create an account and a VM in it
        # 4. Create more concurrent snapshots than specified in
             global allowed limit
        # 5. Verify that exception is raised while creating snapshots
        """

        config = Configurations.list(
            self.apiclient,
            name="concurrent.snapshots.threshold.perhost"
        )
        self.assertEqual(
            isinstance(
                config,
                list),
            True,
            "concurrent.snapshots.threshold.perhost should be present\
                    in global config")
        if config[0].value:
            concurrentSnapshots = int(config[0].value)
        else:
            self.skipTest("Skipping tests as the config value \
                    concurrent.snapshots.threshold.perhost is Null")

        # Create an account
        account = Account.create(
            self.apiclient,
            self.testdata["account"],
            domainid=self.domain.id
        )

        self.cleanup.append(account)
        # Create user api client of the account
        userapiclient = self.testClient.getUserApiClient(
            UserName=account.name,
            DomainName=account.domain
        )

        # Create VM
        virtual_machine = VirtualMachine.create(
            userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=account.name,
            domainid=account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id
        )

        # Step 1
        # Get ROOT Volume Id
        volumes = Volume.list(
            self.apiclient,
            virtualmachineid=virtual_machine.id,
            type='ROOT',
            listall=True
        )

        self.assertEqual(validateList(volumes)[0], PASS,
                         "Volumes list validation failed")

        root_volume = volumes[0]

        threads = []
        for i in range(0, (concurrentSnapshots + 1)):
            thread = Thread(
                target=self.createSnapshot,
                args=(
                    self.apiclient,
                    root_volume.id
                ))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

        self.assertTrue(self.exceptionOccured, "Concurrent snapshots\
                more than concurrent.snapshots.threshold.perhost\
                value successfully created")
        return
Esempio n. 53
0
    def test_01_create_tier_Vmxnet3(self):
        """
            Test to create vpc tier with nic type as Vmxnet3
            #1.Set global setting parameter "vmware.systemvm.nic.device.type"
            to "Vmxnet3"
            #2.Create VPC
            #3.Create one tier
            #4.Deploy one guest vm in the tier created in step3
        """
        if self.hypervisor.lower() not in ['vmware']:
            self.skipTest("This test can only run on vmware setup")

        nic_types = Configurations.list(
            self.apiclient,
            name="vmware.systemvm.nic.device.type"
        )
        self.assertEqual(validateList(nic_types)[0], PASS, "Invalid list config")
        nic_type = nic_types[0].value
        reset = False
        if nic_type.lower() != "vmxnet3":
            self.updateConfigurAndRestart("vmware.systemvm.nic.device.type", "Vmxnet3")
            reset = True

        self.services["vpc"]["cidr"] = "10.1.1.1/16"
        self.debug("creating a VPC network in the account: %s" %
                   self.account.name)
        try:
            vpc = VPC.create(
                self.apiclient,
                self.services["vpc"],
                vpcofferingid=self.vpc_off.id,
                zoneid=self.zone.id,
                account=self.account.name,
                domainid=self.account.domainid
            )
            vpc_res = VPC.list(self.apiclient, id=vpc.id)
            self.assertEqual(validateList(vpc_res)[0], PASS, "Invalid response from listvpc")

            self.network_offering = NetworkOffering.create(
                self.apiclient,
                self.services["network_offering"],
                conservemode=False
            )
            # Enable Network offering
            self.network_offering.update(self.apiclient, state='Enabled')
            self.cleanup.append(self.network_offering)

            gateway = vpc.cidr.split('/')[0]
            # Split the cidr to retrieve gateway
            # for eg. cidr = 10.0.0.1/24
            # Gateway = 10.0.0.1
            # Creating network using the network offering created
            self.debug("Creating network with network offering: %s" %
                       self.network_offering.id)
            network = Network.create(
                self.apiclient,
                self.services["network"],
                accountid=self.account.name,
                domainid=self.account.domainid,
                networkofferingid=self.network_offering.id,
                zoneid=self.zone.id,
                gateway=gateway,
                vpcid=vpc.id
            )
            self.debug("Created network with ID: %s" % network.id)
            vm = VirtualMachine.create(
                self.apiclient,
                self.services["virtual_machine"],
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                networkids=[str(network.id)]
            )
            self.assertIsNotNone(vm, "VM creation failed")
            self.debug("Deployed VM in network: %s" % network.id)
            vm_res = VirtualMachine.list(self.apiclient, id=vm.id)
            self.assertEqual(
                validateList(vm_res)[0],
                PASS,
                "list vm returned invalid response"
            )
            vr_res = Router.list(
                self.apiclient,
                vpcid=vpc.id,
                listall="true"
            )
            self.assertEqual(validateList(vr_res)[0], PASS, "list vrs failed for vpc")
            vr_linklocal_ip = vr_res[0].linklocalip
            result = get_process_status(
                self.apiclient.connection.mgtSvr,
                22,
                self.apiclient.connection.user,
                self.apiclient.connection.passwd,
                vr_linklocal_ip,
                'lspci | grep "Ethernet controller"',
                hypervisor=self.hypervisor
            )
            self.assertEqual(
                validateList(result)[0],
                PASS,
                "We didn't find NICS with adapter type VMXNET3"
            )
            reg = re.compile("VMware VMXNET3")
            count = 0
            for line in result:
                if reg.search(line):
                    count += 1
            self.assertEqual(
                count,
                3,
                "Not all NICs on VR are of type VMXNET3"
            )
        except Exception as e:
            self.fail("NIC creation failed for vpc tier with systemvm nic \
                        adapter type as Vmxnet3: %s" % e)
        finally:
            if reset:
                self.updateConfigurAndRestart("vmware.systemvm.nic.device.type", nic_type)
        return
    def test_02_instancename_from_default_configuration(self):
        """ Verify for globally set instancename
        """

        # Validate the following
        # 1. Set the vm.instancename.flag to true. Hostname and displayname
        #    should be user provided display name
        # 2. Dont give the user provided user name. Internal name should be
        #    i-<userid>-<vmid>-<instance.name> in global config
        if not is_config_suitable(
                apiclient=self.apiclient,
                name='vm.instancename.flag',
                value='true'):
            self.skipTest('vm.instancename.flag should be true. skipping')

        # Removing display name from config
        del self.services["virtual_machine"]["displayname"]
        self.debug("Deploying VM in account: %s" % self.account.name)
        virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.debug(
            "Checking if the virtual machine is created properly or not?")
        vms = VirtualMachine.list(
            self.apiclient,
            id=virtual_machine.id,
            listall=True
        )

        self.assertEqual(
            isinstance(vms, list),
            True,
            "List vms should retuen a valid name"
        )
        vm = vms[0]
        self.assertEqual(
            vm.state,
            "Running",
            "Vm state should be running after deployment"
        )
        self.assertNotEqual(
            vm.displayname,
            vm.id,
            "Vm display name should not match the given name"
        )
        # Fetch account ID and VMID from database to check internal name
        self.debug("select id from account where uuid = '%s';"
                   % self.account.id)

        qresultset = self.dbclient.execute(
            "select id from account where uuid = '%s';"
            % self.account.id
        )
        self.assertEqual(
            isinstance(qresultset, list),
            True,
            "Check DB query result set for valid data"
        )

        self.assertNotEqual(
            len(qresultset),
            0,
            "Check DB Query result set"
        )
        qresult = qresultset[0]
        account_id = qresult[0]

        self.debug("select id from vm_instance where uuid = '%s';" % vm.id)

        qresultset = self.dbclient.execute(
            "select id from vm_instance where uuid = '%s';" %
            vm.id)

        self.assertEqual(
            isinstance(qresultset, list),
            True,
            "Check DB query result set for valid data"
        )

        self.assertNotEqual(
            len(qresultset),
            0,
            "Check DB Query result set"
        )
        qresult = qresultset[0]
        self.debug("Query result: %s" % qresult)
        vmid = qresult[0]

        self.debug("Fetching the global config value for instance.name")
        configs = Configurations.list(
            self.apiclient,
            name="instance.name",
            listall=True
        )

        config = configs[0]
        instance_name = config.value
        self.debug("Instance.name: %s" % instance_name)

        # internal Name = i-<user ID>-<VM ID>- Instance_name
        internal_name = "i-" + \
            str(account_id) + "-" + str(vmid) + "-" + instance_name
        self.assertEqual(
            vm.instancename,
            internal_name,
            "VM internal name should match with that of the format"
        )
        return
    def test_02_concurrent_snapshots_configuration(self):
        """Concurrent Snapshots
            1. Verify that CreateSnapshot command fails when it
                takes more time than job.expire.minute
            2. Verify that snapshot creation fails if CreateSnapshot command
                takes more time than concurrent.snapshots.threshold.perhost
            3. Check the event generation when snapshot creation
                fails if CreateSnapshot takes more time than
                concurrent.snapshots.threshold.perhost

        """

        # Step 1
        if not self.testdata["configurableData"][
                "restartManagementServerThroughTestCase"]:
            self.skipTest(
                "Skip test if restartManagementServerThroughTestCase \
                        is not provided")

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")
        orig_expire = configs[0].value

        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="2"
                              )

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value="1"
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        try:
            thread_pool = []
            for i in range(4):
                create_snapshot_thread_1 = Thread(
                    target=CreateSnapshot,
                    args=(
                        self,
                        self.root_pool[i],
                        False))
                thread_pool.append(create_snapshot_thread_1)

            for thread in thread_pool:
                thread.start()

            for thread in thread_pool:
                thread.join()

        except Exception as e:
            raise Exception(
                "Warning: Exception unable to start thread : %s" %
                e)

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value=orig_expire
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        # Step 2
        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value="1"
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], False)

        Configurations.update(self.apiclient,
                              name="job.expire.minutes",
                              value=orig_expire
                              )

        # Restart management server
        self.RestartServer()
        time.sleep(120)

        # Step 3
        configs = Configurations.list(
            self.apiclient,
            name="job.cancel.threshold.minutes")
        orig_cancel = configs[0].value

        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value="1"
                              )

        self.RestartServer()
        time.sleep(120)

        configs = Configurations.list(
            self.apiclient,
            name="job.expire.minutes")

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], False)

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value=orig_cancel
                              )

        self.RestartServer()
        time.sleep(120)
        # Step 4
        Configurations.update(self.apiclient,
                              name="concurrent.snapshots.threshold.perhost",
                              value="3"
                              )

        self.RestartServer()
        time.sleep(120)

        Configurations.update(self.apiclient,
                              name="job.cancel.threshold.minutes",
                              value="1"
                              )

        self.RestartServer()
        time.sleep(120)

        with self.assertRaises(Exception):
            CreateSnapshot(self, self.root_pool[0], True)

        return
 def test01_template_download_URL_expire(self):
     """
     @Desc:Template files are deleted from secondary storage after download URL expires
     Step1:Deploy vm with default cent os template
     Step2:Stop the vm
     Step3:Create template from the vm's root volume
     Step4:Extract Template and wait for the download url to expire
     Step5:Deploy another vm with the template created at Step3
     Step6:Verify that vm deployment succeeds
     """
     params = ["extract.url.expiration.interval", "extract.url.cleanup.interval"]
     wait_time = 0
     for param in params:
         config = Configurations.list(self.apiClient, name=param)
         self.assertEqual(validateList(config)[0], PASS, "Config list returned invalid response")
         wait_time = wait_time + int(config[0].value)
     self.debug("Total wait time for url expiry: %s" % wait_time)
     # Creating Virtual Machine
     self.virtual_machine = VirtualMachine.create(
         self.userapiclient,
         self.services["virtual_machine"],
         accountid=self.account.name,
         domainid=self.account.domainid,
         serviceofferingid=self.service_offering.id,
     )
     self.assertIsNotNone(self.virtual_machine, "Virtual Machine creation failed")
     self.cleanup.append(self.virtual_machine)
     # Stop virtual machine
     self.virtual_machine.stop(self.userapiclient)
     list_volume = Volume.list(
         self.userapiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True
     )
     self.assertEqual(validateList(list_volume)[0], PASS, "list volumes with type ROOT returned invalid list")
     self.volume = list_volume[0]
     self.create_template = Template.create(
         self.userapiclient,
         self.services["template"],
         volumeid=self.volume.id,
         account=self.account.name,
         domainid=self.account.domainid,
     )
     self.assertIsNotNone(self.create_template, "Failed to create template from root volume")
     self.cleanup.append(self.create_template)
     """
     Extract template
     """
     try:
         Template.extract(self.userapiclient, self.create_template.id, "HTTP_DOWNLOAD", self.zone.id)
     except Exception as e:
         self.fail("Extract template failed with error %s" % e)
     self.debug("Waiting for %s seconds for url to expire" % repr(wait_time + 20))
     time.sleep(wait_time + 20)
     self.debug("Waited for %s seconds for url to expire" % repr(wait_time + 20))
     """
     Deploy vm with the template created from the volume. After url expiration interval only
     url should be deleted not the template. To validate this deploy vm with the template
     """
     try:
         self.vm = VirtualMachine.create(
             self.userapiclient,
             self.services["virtual_machine"],
             accountid=self.account.name,
             domainid=self.account.domainid,
             serviceofferingid=self.service_offering.id,
             templateid=self.create_template.id,
         )
         self.cleanup.append(self.vm)
     except Exception as e:
         self.fail(
             "Template is automatically deleted after URL expired.\
                   So vm deployment failed with error: %s"
             % e
         )
     return
Esempio n. 57
0
    def test_01_custom_hostname_instancename_false(self):
        """ Verify custom hostname for the instance when
            vm.instancename.flag=false
        """

        # Validate the following
        # 1. Set the vm.instancename.flog to false. Hostname and displayname
        #    should be UUID
        # 2. Give the user provided display name. Internal name should be
        #    i-<userid>-<vmid>-instance name (It should not contain display name)

        if not is_config_suitable(apiclient=self.apiclient, name="vm.instancename.flag", value="false"):
            self.skipTest("vm.instancename.flag should be false. skipping")

        self.debug("Deploying VM in account: %s" % self.account.name)
        # Spawn an instance in that network
        virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
        )
        self.debug("Checking if the virtual machine is created properly or not?")
        vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True)

        self.assertEqual(isinstance(vms, list), True, "List vms should retuen a valid name")
        vm = vms[0]
        self.assertEqual(vm.state, "Running", "Vm state should be running after deployment")
        self.debug(
            "vm.displayname: %s, original: %s" % (vm.displayname, self.services["virtual_machine"]["displayname"])
        )
        self.assertEqual(
            vm.displayname,
            self.services["virtual_machine"]["displayname"],
            "Vm display name should match the given name",
        )

        # Fetch account ID and VMID from database to check internal name
        self.debug("select id from account where uuid = '%s';" % self.account.id)

        qresultset = self.dbclient.execute("select id from account where uuid = '%s';" % self.account.id)
        self.assertEqual(isinstance(qresultset, list), True, "Check DB query result set for valid data")

        self.assertNotEqual(len(qresultset), 0, "Check DB Query result set")
        qresult = qresultset[0]
        account_id = qresult[0]

        self.debug("select id from vm_instance where uuid = '%s';" % vm.id)

        qresultset = self.dbclient.execute("select id from vm_instance where uuid = '%s';" % vm.id)

        self.assertEqual(isinstance(qresultset, list), True, "Check DB query result set for valid data")

        self.assertNotEqual(len(qresultset), 0, "Check DB Query result set")
        qresult = qresultset[0]
        self.debug("Query result: %s" % qresult)
        vmid = qresult[0]

        self.debug("Fetching the global config value for instance.name")
        configs = Configurations.list(self.apiclient, name="instance.name", listall=True)

        config = configs[0]
        self.debug("Config value : %s" % config)
        instance_name = config.value
        self.debug("Instance.name: %s" % instance_name)

        # internal Name = i-<user ID>-<VM ID>-<instance_name>
        # internal_name = "i-" + str(account_id) + "-" + str(vmid) + "-" + instance_name
        internal_name = "i-%s-%s-%s" % (str(account_id), str(vmid), instance_name)
        self.debug("Internal name: %s" % internal_name)
        self.debug("vm instance name : %s" % vm.instancename)
        self.assertEqual(vm.instancename, internal_name, "VM internal name should match with that of the format")
        return
Esempio n. 58
0
    def test_deployVmWithCustomDisk(self):
        """Test custom disk sizes beyond range
        """
        # Steps for validation
        # 1. listConfigurations - custom.diskoffering.size.min
        #    and custom.diskoffering.size.max
        # 2. deployVm with custom disk offering size < min
        # 3. deployVm with custom disk offering min< size < max
        # 4. deployVm with custom disk offering size > max
        # Validate the following
        # 2. and 4. of deploy VM should fail.
        #    Only case 3. should succeed.
        #    cleanup all created data disks from the account

        config = Configurations.list(
            self.apiclient,
            name="custom.diskoffering.size.min"
        )
        self.assertEqual(
            isinstance(config, list),
            True,
            "custom.diskoffering.size.min should be present in global config"
        )
        # minimum size of custom disk (in GBs)
        min_size = int(config[0].value)
        self.debug("custom.diskoffering.size.min: %s" % min_size)

        config = Configurations.list(
            self.apiclient,
            name="custom.diskoffering.size.max"
        )
        self.assertEqual(
            isinstance(config, list),
            True,
            "custom.diskoffering.size.min should be present in global config"
        )
        # maximum size of custom disk (in GBs)
        max_size = int(config[0].value)
        self.debug("custom.diskoffering.size.max: %s" % max_size)

        self.debug("Creating a volume with size less than min cust disk size")
        self.services["custom_volume"]["customdisksize"] = (min_size - 1)
        self.services["custom_volume"]["zoneid"] = self.zone.id
        with self.assertRaises(Exception):
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
        self.debug("Create volume failed!")

        self.debug("Creating a volume with size more than max cust disk size")
        self.services["custom_volume"]["customdisksize"] = (max_size + 1)
        with self.assertRaises(Exception):
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
        self.debug("Create volume failed!")

        self.debug("Creating a volume with size more than min cust disk " +
                   "but less than max cust disk size"
                   )
        self.services["custom_volume"]["customdisksize"] = (min_size + 1)
        try:
            Volume.create_custom_disk(
                self.apiclient,
                self.services["custom_volume"],
                account=self.account.name,
                domainid=self.account.domainid,
                diskofferingid=self.disk_offering.id
            )
            self.debug("Create volume of cust disk size succeeded")
        except Exception as e:
            self.fail("Create volume failed with exception: %s" % e)
        return
    def check_routers_state(self, count=2, status_to_check="MASTER", expected_count=1, showall=False):
        vals = ["MASTER", "BACKUP", "UNKNOWN", "TESTFAILED"]
        cnts = [0, 0, 0]

        result = "TESTFAILED"
        self.logger.debug('check_routers_state count: %s, status_to_check: %s, expected_count: %s, showall: %s' % (count, status_to_check, expected_count, showall))

        vrrp_interval = Configurations.list(self.apiclient, name="router.redundant.vrrp.interval")
        self.logger.debug("router.redundant.vrrp.interval is ==> %s" % vrrp_interval)

        total_sleep = 20
        if vrrp_interval:
            total_sleep = (int(vrrp_interval[0].value) * 4) + 10
        else:
            self.logger.debug("Could not retrieve the key 'router.redundant.vrrp.interval'. Sleeping for 10 seconds.")

        '''
        Sleep (router.redundant.vrrp.interval * 4) seconds here because VRRP will have to be reconfigured. Due to the configuration changes,
        it will start a new election and that will take ~4 multiplied by the advertisement interval seconds. Next to that, we need some time
        for the router to be reconfigured, so adding 10 seconds to be on the safe side.
        '''
        time.sleep(total_sleep)

        self.query_routers(count, showall)
        for router in self.routers:
            if router.state == "Running":
                hosts = list_hosts(
                    self.apiclient,
                    zoneid=router.zoneid,
                    type='Routing',
                    state='Up',
                    id=router.hostid
                )
                self.assertEqual(
                    isinstance(hosts, list),
                    True,
                    "Check list host returns a valid list"
                )
                host = hosts[0]

                for _ in range(5):
                    host.user, host.passwd = get_host_credentials(self.config, host.name)
                    result = str(get_process_status(
                        host.ipaddress,
                        22,
                        host.user,
                        host.passwd,
                        router.linklocalip,
                        "sh /opt/cosmic/router/scripts/checkrouter.sh "
                    ))

                    self.logger.debug('check_routers_state router: %s, result: %s' % (router.name, result))

                    if result.count(status_to_check) == 1:
                        cnts[vals.index(status_to_check)] += 1
                        break
                    elif result.count("UNKNOWN") == 1:
                        time.sleep(5)
                    else:
                        break

        if cnts[vals.index(status_to_check)] != expected_count:
            self.logger.debug("Investigate! not MASTER/BACKUP")
            while True:
                time.sleep(1)
            self.fail("Expected '%s' router[s] at state '%s', but found '%s'! Result: %s" % (expected_count, status_to_check, cnts[vals.index(status_to_check)], result))