def test_02_storage_migrate_root_and_data_disks(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(self.apiClient,
                                               primarystorage2,
                                               clusterid=self.cluster_1.id)

        primary_storage_3 = StoragePool.create(self.apiClient,
                                               primarystorage2,
                                               clusterid=self.cluster_2.id)

        src_host, dest_host = self._get_source_and_dest_hosts()

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering_3.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            hostid=src_host.id,
            startvm=True)

        cs_data_volume = Volume.create(self.apiClient,
                                       self.testdata[TestData.volume_1],
                                       account=self.account.name,
                                       domainid=self.domain.id,
                                       zoneid=self.zone.id,
                                       diskofferingid=self.disk_offering_1.id)

        self.cleanup = [
            virtual_machine, cs_data_volume, primary_storage_2,
            primary_storage_3
        ]

        cs_data_volume = virtual_machine.attach_volume(self.apiClient,
                                                       cs_data_volume)

        sf_account_id = sf_util.get_sf_account_id(
            self.cs_api, self.account.id, self.primary_storage.id, self,
            TestVMMigrationWithStorage.
            _sf_account_id_should_be_non_zero_int_err_msg)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_data_volume = sf_util.check_and_get_sf_volume(
            sf_volumes, cs_data_volume.name, self)

        sf_data_volume = self._migrate_and_verify_one_disk_only(
            virtual_machine, dest_host, cs_data_volume, sf_account_id,
            sf_data_volume, self.xen_session_1, self.xen_session_2)

        src_host, dest_host = dest_host, src_host

        self._migrate_and_verify_one_disk_only(virtual_machine, dest_host,
                                               cs_data_volume, sf_account_id,
                                               sf_data_volume,
                                               self.xen_session_2,
                                               self.xen_session_1)
    def test_add_remove_host_with_solidfire_plugin_3(self):
        if TestData.hypervisor_type != TestData.xenServer:
            return

        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage)

        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        root_volume = self._get_root_volume(self.virtual_machine)

        sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)

        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage_2)

        self._perform_add_remove_xenserver_host(primary_storage.id, sf_iscsi_name)
    def test_add_remove_host_with_solidfire_plugin_3(self):
        if TestData.hypervisor_type != TestData.xenServer:
            return

        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        self.cleanup.append(primary_storage)

        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        root_volume = self._get_root_volume(self.virtual_machine)

        sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)

        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor])

        self.cleanup.append(primary_storage_2)

        self._perform_add_remove_xenserver_host(primary_storage.id,
                                                sf_iscsi_name)
    def test12_primary_storage_with_zero_iops(self):
        primarystorage5 = self.testdata[TestData.primaryStorage5]

        primary_storage5 = StoragePool.create(
            self.apiClient,
            primarystorage5,
            scope=primarystorage5[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage5[TestData.provider],
            tags=primarystorage5[TestData.tags],
            capacityiops=primarystorage5[TestData.capacityIops],
            capacitybytes=primarystorage5[TestData.capacityBytes],
            hypervisor=primarystorage5[TestData.hypervisor])

        self.cleanup.append(primary_storage5)
        primary_storage_name = "cloudstack-" + primary_storage5.id
        self.assertEqual(
            any(primary_storage_name == app_instance['name']
                for app_instance in self.datera_api.app_instances.list()),
            True, "app instance not created")

        primary_storage_url = primarystorage5[TestData.url]

        self._verify_attributes(primary_storage5.id, primary_storage_url)
Exemple #5
0
    def create_sp_template_and_storage_pool(self, apiclient, template_name,
                                            primary_storage, zoneid):
        spapiRemote = spapi.Api.fromConfig()
        logging.debug("================ %s" % spapiRemote)

        sp_api = spapi.Api.fromConfig(multiCluster=True)
        logging.debug("================ %s" % sp_api)

        remote_cluster = self.get_remote_storpool_cluster()
        logging.debug("================ %s" % remote_cluster)

        newTemplate = sptypes.VolumeTemplateCreateDesc(name=template_name,
                                                       placeAll="ssd",
                                                       placeTail="ssd",
                                                       placeHead="ssd",
                                                       replication=1)
        template_on_remote = spapiRemote.volumeTemplateCreate(
            newTemplate, clusterName=remote_cluster)
        template_on_local = spapiRemote.volumeTemplateCreate(newTemplate)
        storage_pool = StoragePool.create(
            apiclient,
            primary_storage,
            zoneid=zoneid,
        )
        return storage_pool, spapiRemote, sp_api
    def test09_add_vm_with_datera_storage(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(
            primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        self._validate_storage(primary_storage, self.virtual_machine)
    def test09_add_vm_with_datera_storage(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        self._validate_storage(primary_storage, self.virtual_machine)
    def test12_primary_storage_with_zero_iops(self):
        primarystorage5 = self.testdata[TestData.primaryStorage5]

        primary_storage5 = StoragePool.create(
            self.apiClient,
            primarystorage5,
            scope=primarystorage5[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage5[TestData.provider],
            tags=primarystorage5[TestData.tags],
            capacityiops=primarystorage5[TestData.capacityIops],
            capacitybytes=primarystorage5[TestData.capacityBytes],
            hypervisor=primarystorage5[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage5)
        primary_storage_name = "cloudstack-" + primary_storage5.id
        self.assertEqual(
            any(primary_storage_name == app_instance['name']
                for app_instance in self.datera_api.app_instances.list()),
            True, "app instance not created")

        primary_storage_url = primarystorage5[TestData.url]

        self._verify_attributes(
            primary_storage5.id, primary_storage_url)
    def test10_add_vm_with_datera_storage_and_volume(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(
            primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        self._validate_storage(primary_storage, self.virtual_machine)

        volume = Volume.create(
            self.apiClient,
            self.testdata[TestData.volume_1],
            account=self.account.name,
            domainid=self.domain.id,
            zoneid=self.zone.id,
            diskofferingid=self.disk_offering.id
        )

        virtual_machine.attach_volume(
            self.apiClient,
            volume
        )
        storage_pools_response = list_storage_pools(
            self.apiClient, id=primary_storage.id)

        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == primary_storage.id:
                xen_server_response = value

        self.assertNotEqual(
            int(storage_pools_response[0].disksizeused),
            int(xen_server_response['physical_utilisation']))
    def test10_add_vm_with_datera_storage_and_volume(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        self._validate_storage(primary_storage, self.virtual_machine)

        volume = Volume.create(self.apiClient,
                               self.testdata[TestData.volume_1],
                               account=self.account.name,
                               domainid=self.domain.id,
                               zoneid=self.zone.id,
                               diskofferingid=self.disk_offering.id)

        virtual_machine.attach_volume(self.apiClient, volume)
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    id=primary_storage.id)

        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == primary_storage.id:
                xen_server_response = value

        self.assertNotEqual(int(storage_pools_response[0].disksizeused),
                            int(xen_server_response['physical_utilisation']))
Exemple #11
0
    def test_add_remove_host_with_solidfire_plugin_2(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor])

        self.cleanup.append(primary_storage_2)

        sf_iscsi_name = self._get_iqn_2(primary_storage_2)

        self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
    def test_add_remove_host_with_solidfire_plugin_2(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage_2)

        sf_iscsi_name = self._get_iqn_2(primary_storage_2)

        self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
    def setUp(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        self.primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])
        self.primary_storage_id = self.primary_storage.id
        self._primary_storage = [self.primary_storage]
        self.primary_tag = primarystorage[TestData.tags]
        self.cleanup = [self.primary_storage]
        primary_storage_url = primarystorage[TestData.url]
        self._verify_priamry_storage(self.primary_storage_id,
                                     primary_storage_url)
    def setUp(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        self.primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )
        self.primary_storage_id = self.primary_storage.id
        self._primary_storage = [self.primary_storage]
        self.primary_tag = primarystorage[TestData.tags]
        self.cleanup = [self.primary_storage]
        primary_storage_url = primarystorage[TestData.url]
        self._verify_priamry_storage(
            self.primary_storage_id, primary_storage_url)
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestManagedClusteredFilesystems,
                           cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        sf_util.set_supports_resign(True, cls.dbConnection)

        cls._connect_to_hypervisor()

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip],
                                        solidfire[TestData.username],
                                        solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient,
                            zone_id=cls.testdata[TestData.zoneId])
        cls.template = get_template(cls.apiClient,
                                    cls.zone.id,
                                    hypervisor=TestData.hypervisor_type)
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(cls.apiClient,
                                     cls.testdata["account"],
                                     admin=1)

        # Set up connection to make customized API calls
        cls.user = User.create(cls.apiClient,
                               cls.testdata["user"],
                               account=cls.account.name,
                               domainid=cls.domain.id)

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey,
                                              userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        cls.compute_offering = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering])

        # Resources that are to be destroyed
        cls._cleanup = [cls.compute_offering, cls.user, cls.account]
Exemple #16
0
    def setUpCloudStack(cls):
        super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass()
        cls._cleanup = []
        cls.helper = HelperUtil(cls)
        with open(cls.ARGS.cfg) as json_text:
            cfg.logger.info(cls.ARGS.cfg)
            cfg.logger.info(json_text)
            conf = json.load(json_text)
            cfg.logger.info(conf)

            zone = conf['mgtSvr'][0].get('zone')

        cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS)
        cfg.logger.info("Starting CloudStack")
        cls.mvn_proc = subprocess.Popen(
            ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'],
            cwd=cls.ARGS.forked,
            preexec_fn=os.setsid,
            stdout=cfg.misc,
            stderr=subprocess.STDOUT,
        )
        cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid)
        cfg.logger.info("Started CloudStack in process group %d",
                        cls.mvn_proc_grp)
        cfg.logger.info("Waiting for a while to give it a chance to start")
        proc = subprocess.Popen(["tail", "-f", cfg.misc_name],
                                shell=False,
                                bufsize=0,
                                stdout=subprocess.PIPE)
        while True:
            line = proc.stdout.readline()
            if not line:
                cfg.logger.info("tail ended, was this expected?")
                cfg.logger.info("Stopping CloudStack")
                os.killpg(cls.mvn_proc_grp, signal.SIGINT)
                break
            if "[INFO] Started Jetty Server" in line:
                cfg.logger.info("got it!")
                break
        proc.terminate()
        proc.wait()
        time.sleep(15)
        cfg.logger.info("Processing with the setup")

        cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg)
        cls.testClient = cls.obj_marvininit.getTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        dbclient = cls.testClient.getDbConnection()
        v = dbclient.execute(
            "select * from configuration where name='sp.migration.to.global.ids.completed'"
        )
        cfg.logger.info("Configuration setting for update of db is %s", v)
        if len(v) > 0:
            update = dbclient.execute(
                "update configuration set value='false' where name='sp.migration.to.global.ids.completed'"
            )
            cfg.logger.info("DB configuration table was updated %s", update)

        td = TestData()
        cls.testdata = td.testdata

        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, zone_name=zone)
        cls.cluster = list_clusters(cls.apiclient)[0]
        cls.hypervisor = get_hypervisor_type(cls.apiclient)
        cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id)

        #The version of CentOS has to be supported
        cls.template = get_template(cls.apiclient,
                                    cls.zone.id,
                                    account="system")

        if cls.template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = cls.template.ostypeid
        cls.services["zoneid"] = cls.zone.id
        cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()])
        cfg.logger.info(
            pprint.pformat("############################ %s" % cls.zone))
        storpool_primary_storage = {
            "name":
            cls.sp_template_1,
            "zoneid":
            cls.zone.id,
            "url":
            "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s"
            % cls.sp_template_1,
            "scope":
            "zone",
            "capacitybytes":
            564325555333,
            "capacityiops":
            155466,
            "hypervisor":
            "kvm",
            "provider":
            "StorPool",
            "tags":
            cls.sp_template_1
        }

        cls.storpool_primary_storage = storpool_primary_storage
        host, port, auth = cls.getCfgFromUrl(
            url=storpool_primary_storage["url"])
        cls.spapi = spapi.Api(host=host, port=port, auth=auth)

        storage_pool = list_storage_pools(
            cls.apiclient, name=storpool_primary_storage["name"])

        if storage_pool is None:
            newTemplate = sptypes.VolumeTemplateCreateDesc(
                name=storpool_primary_storage["name"],
                placeAll="ssd",
                placeTail="ssd",
                placeHead="ssd",
                replication=1)
            template_on_local = cls.spapi.volumeTemplateCreate(newTemplate)
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.primary_storage = storage_pool

        storpool_service_offerings_ssd = {
            "name": cls.sp_template_1,
            "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": cls.sp_template_1
        }

        service_offerings_ssd = list_service_offering(
            cls.apiclient, name=storpool_service_offerings_ssd["name"])

        if service_offerings_ssd is None:
            service_offerings_ssd = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings_ssd)
        else:
            service_offerings_ssd = service_offerings_ssd[0]

        cls.service_offering = service_offerings_ssd
        cls._cleanup.append(cls.service_offering)
        cfg.logger.info(pprint.pformat(cls.service_offering))

        cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()])

        storpool_primary_storage2 = {
            "name":
            cls.sp_template_2,
            "zoneid":
            cls.zone.id,
            "url":
            "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s"
            % cls.sp_template_2,
            "scope":
            "zone",
            "capacitybytes":
            564325555333,
            "capacityiops":
            1554,
            "hypervisor":
            "kvm",
            "provider":
            "StorPool",
            "tags":
            cls.sp_template_2
        }

        cls.storpool_primary_storage2 = storpool_primary_storage2
        storage_pool = list_storage_pools(
            cls.apiclient, name=storpool_primary_storage2["name"])

        if storage_pool is None:
            newTemplate = sptypes.VolumeTemplateCreateDesc(
                name=storpool_primary_storage2["name"],
                placeAll="ssd",
                placeTail="ssd",
                placeHead="ssd",
                replication=1)
            template_on_local = cls.spapi.volumeTemplateCreate(newTemplate)
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage2)
        else:
            storage_pool = storage_pool[0]
        cls.primary_storage2 = storage_pool

        storpool_service_offerings_ssd2 = {
            "name": cls.sp_template_2,
            "displaytext": "SP_CO_2",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "tags": cls.sp_template_2
        }

        service_offerings_ssd2 = list_service_offering(
            cls.apiclient, name=storpool_service_offerings_ssd2["name"])

        if service_offerings_ssd2 is None:
            service_offerings_ssd2 = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings_ssd2)
        else:
            service_offerings_ssd2 = service_offerings_ssd2[0]

        cls.service_offering2 = service_offerings_ssd2
        cls._cleanup.append(cls.service_offering2)

        os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

        time.sleep(30)

        cls.mvn_proc = subprocess.Popen(
            ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'],
            cwd=cls.ARGS.forked,
            preexec_fn=os.setsid,
            stdout=cfg.misc,
            stderr=subprocess.STDOUT,
        )
        cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid)
        cfg.logger.info("Started CloudStack in process group %d",
                        cls.mvn_proc_grp)
        cfg.logger.info("Waiting for a while to give it a chance to start")
        proc = subprocess.Popen(["tail", "-f", cfg.misc_name],
                                shell=False,
                                bufsize=0,
                                stdout=subprocess.PIPE)
        while True:
            line = proc.stdout.readline()
            if not line:
                cfg.logger.info("tail ended, was this expected?")
                cfg.logger.info("Stopping CloudStack")
                os.killpg(cls.mvn_proc_grp, signal.SIGINT)
                break
            if "[INFO] Started Jetty Server" in line:
                cfg.logger.info("got it!")
                break
        proc.terminate()
        proc.wait()
        time.sleep(15)

        disk_offering = list_disk_offering(cls.apiclient, name="Small")

        disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium")

        disk_offering_100 = list_disk_offering(cls.apiclient, name="Large")

        assert disk_offering is not None
        assert disk_offering_20 is not None
        assert disk_offering_100 is not None

        cls.disk_offering = disk_offering[0]
        cls.disk_offering_20 = disk_offering_20[0]
        cls.disk_offering_100 = disk_offering_100[0]
        account = list_accounts(cls.apiclient, name="admin")
        cls.account = account[0]

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine)

        cls.virtual_machine2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine2)

        cls.virtual_machine3 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine3)

        cls.virtual_machine4 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine4)

        cls.virtual_machine5 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine5)

        cls.virtual_machine6 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine6)

        cls.volume1 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_1],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)
        cls._cleanup.append(cls.volume1)

        cls.volume2 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_2],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)
        cls._cleanup.append(cls.volume2)

        cls.volume3 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_3],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)
        cls._cleanup.append(cls.volume3)

        cls.volume4 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_4],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)
        cls._cleanup.append(cls.volume4)

        cls.volume5 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_5],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)

        cls._cleanup.append(cls.volume5)
        cls.volume6 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_6],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)
        cls._cleanup.append(cls.volume6)

        cls.volume7 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_7],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)

        cls.volume8 = Volume.create(cls.apiclient,
                                    cls.testdata[TestData.volume_7],
                                    account=cls.account.name,
                                    domainid=cls.domain.id,
                                    zoneid=cls.zone.id,
                                    diskofferingid=cls.disk_offering.id)
        cls.virtual_machine.stop(cls.apiclient, forced=True)

        cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(
            cls.apiclient, cls.volume1)

        vol = list_volumes(cls.apiclient, id=cls.volume3.id)

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3)
        cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3)

        vol = list_volumes(cls.apiclient, id=cls.volume3.id)

        cls.volume_on_sp_3 = vol[0]

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2)
        cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2)

        cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4)
        cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4)

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5)
        cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5)

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6)
        cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6)

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7)
        cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7)

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8)
        cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8)

        cls.virtual_machine.start(cls.apiclient)

        list_root = list_volumes(cls.apiclient,
                                 virtualmachineid=cls.virtual_machine5.id,
                                 type="ROOT")

        cls.snapshot_uuid1 = Snapshot.create(cls.apiclient,
                                             volume_id=list_root[0].id)
        cls._cleanup.append(cls.snapshot_uuid1)
        cls.snapshot_uuid2 = Snapshot.create(cls.apiclient,
                                             volume_id=list_root[0].id)
        cls._cleanup.append(cls.snapshot_uuid2)

        #Snapshot on secondary
        cls.helper.bypass_secondary(False)
        cls.snapshot_uuid_on_secondary = Snapshot.create(
            cls.apiclient, volume_id=list_root[0].id)
        cls._cleanup.append(cls.snapshot_uuid_on_secondary)
        cls.snapshot_uuid3 = Snapshot.create(cls.apiclient,
                                             volume_id=cls.volume7.id)
        cls._cleanup.append(cls.snapshot_uuid3)

        cls.snapshot_uuid4 = Snapshot.create(cls.apiclient,
                                             volume_id=cls.volume7.id)
        cls._cleanup.append(cls.snapshot_uuid4)

        cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient,
                                                     volume_id=list_root[0].id)
        cls._cleanup.append(cls.snapshot_uuid_bypassed)

        Volume.delete(cls.volume7, cls.apiclient)

        cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS)
        cfg.logger.info("The setup is done, proceeding with the tests")
    def setUpClass(cls):
        testClient = super(TestStoragePool, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())

        storpool_primary_storage = {
            "name": "cloudLocal",
            "zoneid": cls.zone.id,
            "url": "cloudLocal",
            "scope": "zone",
            "capacitybytes": 4500000,
            "capacityiops": 155466464221111121,
            "hypervisor": "kvm",
            "provider": "StorPool",
            "tags": "cloudLocal"
        }

        storpool_service_offerings = {
            "name": "tags",
            "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": "test_tags"
        }
        storage_pool = list_storage_pools(cls.apiclient, name='cloudLocal')

        service_offerings = list_service_offering(cls.apiclient, name='tags')

        disk_offerings = list_disk_offering(cls.apiclient, name="Small")

        cls.debug(pprint.pformat(storage_pool))
        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]
        #The version of CentOS has to be supported
        template = get_template(apiclient=cls.apiclient,
                                zone_id=cls.zone.id,
                                template_filter='self',
                                template_name="centos6",
                                domain_id=cls.domain.id,
                                hypervisor=cls.hypervisor)

        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id

        cls.service_offering = service_offerings
        cls.debug(pprint.pformat(cls.service_offering))

        cls.volume_1 = Volume.create(
            cls.apiclient,
            {"diskname": "StorPoolDisk-1"},
            zoneid=cls.zone.id,
            diskofferingid=disk_offerings[0].id,
        )

        cls.volume_2 = Volume.create(
            cls.apiclient,
            {"diskname": "StorPoolDisk-2"},
            zoneid=cls.zone.id,
            diskofferingid=disk_offerings[0].id,
        )

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)},
            zoneid=cls.zone.id,
            templateid=template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        cls._cleanup = []
        cls._cleanup.append(cls.virtual_machine)
        cls._cleanup.append(cls.volume_1)
        cls._cleanup.append(cls.volume_2)
        return
    def test_vag_per_host_5(self):
        hosts = list_hosts(self.apiClient, clusterid=self.cluster.id)

        self.assertTrue(
            len(hosts) >= 2,
            "There needs to be at least two hosts."
        )

        unique_vag_ids = self._get_unique_vag_ids(hosts)

        self.assertTrue(len(hosts) == len(unique_vag_ids), "To run this test, each host should be in its own VAG.")

        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage)

        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        root_volume = self._get_root_volume(self.virtual_machine)

        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, primary_storage.id, self, TestAddRemoveHosts._sf_account_id_should_be_non_zero_int_err_msg)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, root_volume.name, self)

        sf_vag_ids = sf_util.get_vag_ids(self.cs_api, self.cluster.id, primary_storage.id, self)

        sf_util.check_vags(sf_volume, sf_vag_ids, self)

        host = Host(hosts[0].__dict__)

        host_iqn = self._get_host_iqn(host)

        all_vags = sf_util.get_all_vags(self.sfe)

        host_vag = self._get_host_vag(host_iqn, all_vags)

        self.assertTrue(host_vag != None, "The host should be in a VAG.")

        host.delete(self.apiClient)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, root_volume.name, self)

        sf_util.check_vags(sf_volume, sf_vag_ids, self)

        all_vags = sf_util.get_all_vags(self.sfe)

        host_vag = self._get_host_vag(host_iqn, all_vags)

        self.assertTrue(host_vag == None, "The host should not be in a VAG.")

        details = {
            TestData.username: "******",
            TestData.password: "******",
            TestData.url: "http://" + host.ipaddress,
            TestData.podId : host.podid,
            TestData.zoneId: host.zoneid
        }

        host = Host.create(
            self.apiClient,
            self.cluster,
            details,
            hypervisor=host.hypervisor
        )

        self.assertTrue(
            isinstance(host, Host),
            "'host' is not a 'Host'."
        )

        hosts = list_hosts(self.apiClient, clusterid=self.cluster.id)

        unique_vag_ids = self._get_unique_vag_ids(hosts)

        self.assertTrue(len(hosts) == len(unique_vag_ids) + 1, "There should be one more host than unique VAG.")
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        xenserver = cls.testdata[TestData.xenServer]

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session_1 = XenAPI.Session(host_ip)

        cls.xen_session_1.xenapi.login_with_password(
            xenserver[TestData.username], xenserver[TestData.password])

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session_2 = XenAPI.Session(host_ip)

        cls.xen_session_2.xenapi.login_with_password(
            xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip],
                                        solidfire[TestData.username],
                                        solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient,
                            zone_id=cls.testdata[TestData.zoneId])
        cls.cluster_1 = list_clusters(cls.apiClient,
                                      id=cls.testdata[TestData.clusterId1])[0]
        cls.cluster_2 = list_clusters(cls.apiClient,
                                      id=cls.testdata[TestData.clusterId2])[0]
        cls.template = get_template(cls.apiClient, cls.zone.id,
                                    cls.configData["ostype"])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(cls.apiClient,
                                     cls.testdata["account"],
                                     admin=1)

        # Set up connection to make customized API calls
        cls.user = User.create(cls.apiClient,
                               cls.testdata["user"],
                               account=cls.account.name,
                               domainid=cls.domain.id)

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey,
                                              userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(cls.apiClient, primarystorage)

        cls.compute_offering_1 = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering1])

        cls.compute_offering_2 = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering2])

        cls.compute_offering_3 = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering3])

        cls.disk_offering_1 = DiskOffering.create(
            cls.apiClient, cls.testdata[TestData.diskOffering1])

        cls.disk_offering_2 = DiskOffering.create(
            cls.apiClient, cls.testdata[TestData.diskOffering2])

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.compute_offering_1, cls.compute_offering_2,
            cls.compute_offering_3, cls.disk_offering_1, cls.disk_offering_2,
            cls.user, cls.account
        ]
Exemple #20
0
    def setUpCloudStack(cls):
        cls._cleanup = []
        cls.spapi = spapi.Api.fromConfig(multiCluster=True)

        testClient = super(TestStoragePool, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        cls.debug("##################### zone %s" % cls.zone)
        td = TestData()
        cls.testdata = td.testdata
        cls.helper = StorPoolHelper()

        storpool_primary_storage = {
            "name": "ssd",
            TestData.scope: "ZONE",
            "url": "ssd",
            TestData.provider: "StorPool",
            "path": "/dev/storpool",
            TestData.capacityBytes: 2251799813685248,
            TestData.hypervisor: "KVM"
        }

        storpool_without_qos = {
            "name": "ssd",
            "displaytext": "ssd",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": "ssd"
        }

        storpool_qos_template = {
            "name": "qos",
            "displaytext": "qos",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": "ssd"
        }

        disk_offering_ssd = {
            "name": "ssd",
            "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)",
            "disksize": 10,
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            TestData.tags: "ssd",
            "storagetype": "shared"
        }

        disk_offering_qos = {
            "name": "qos",
            "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)",
            "disksize": 10,
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            TestData.tags: "ssd",
            "storagetype": "shared"
        }

        cls.template_name = storpool_primary_storage.get("name")

        cls.template_name_2 = "qos"

        storage_pool = list_storage_pools(cls.apiclient, name="ssd")
        cls.primary_storage = storage_pool[0]

        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))

        service_offerings_ssd = list_service_offering(cls.apiclient,
                                                      name=cls.template_name)

        service_offerings_qos = list_service_offering(cls.apiclient,
                                                      name=cls.template_name_2)

        if service_offerings_ssd is None:
            service_offerings_ssd = ServiceOffering.create(
                cls.apiclient, storpool_without_qos)
        else:
            service_offerings_ssd = service_offerings_ssd[0]

        if service_offerings_qos is None:
            service_offerings_qos = ServiceOffering.create(
                cls.apiclient, storpool_qos_template)
            ResourceDetails.create(cls.apiclient,
                                   resourceid=service_offerings_qos.id,
                                   resourcetype="ServiceOffering",
                                   details={"SP_TEMPLATE": "qos"},
                                   fordisplay=True)

        else:
            service_offerings_qos = service_offerings_qos[0]

        cls.service_offering_ssd = service_offerings_ssd
        cls.service_offering_qos = service_offerings_qos

        cls.disk_offering_ssd = list_disk_offering(cls.apiclient,
                                                   name=cls.template_name)
        cls.disk_offering_qos = list_disk_offering(cls.apiclient,
                                                   name=cls.template_name_2)

        if cls.disk_offering_ssd is None:
            cls.disk_offering_ssd = DiskOffering.create(
                cls.apiclient, disk_offering_ssd)
        else:
            cls.disk_offering_ssd = cls.disk_offering_ssd[0]

        if cls.disk_offering_qos is None:
            cls.disk_offering_qos = DiskOffering.create(
                cls.apiclient, disk_offering_qos)
            ResourceDetails.create(cls.apiclient,
                                   resourceid=cls.disk_offering_qos.id,
                                   resourcetype="DiskOffering",
                                   details={"SP_TEMPLATE": "qos"},
                                   fordisplay=True)
        else:
            cls.disk_offering_qos = cls.disk_offering_qos[0]

        #The version of CentOS has to be supported
        template = get_template(cls.apiclient, cls.zone.id, account="system")

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id
        cls.services["diskofferingid"] = cls.disk_offering_ssd.id

        cls.account = cls.helper.create_account(cls.apiclient,
                                                cls.services["account"],
                                                accounttype=1,
                                                domainid=cls.domain.id,
                                                roleid=1)
        cls._cleanup.append(cls.account)

        securitygroup = SecurityGroup.list(cls.apiclient,
                                           account=cls.account.name,
                                           domainid=cls.account.domainid)[0]
        cls.helper.set_securityGroups(cls.apiclient,
                                      account=cls.account.name,
                                      domainid=cls.account.domainid,
                                      id=securitygroup.id)

        cls.volume_1 = Volume.create(cls.apiclient,
                                     cls.services,
                                     account=cls.account.name,
                                     domainid=cls.account.domainid)

        cls.volume_2 = Volume.create(cls.apiclient,
                                     cls.services,
                                     account=cls.account.name,
                                     domainid=cls.account.domainid)

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering_ssd.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_1)

        cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_2)

        cls.template = template
        cls.hostid = cls.virtual_machine.hostid

        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        return
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestUploadDownload, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip],
                                        solidfire[TestData.username],
                                        solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient,
                            zone_id=cls.testdata[TestData.zoneId])
        cls.cluster = list_clusters(cls.apiClient)[0]
        cls.template = get_template(cls.apiClient,
                                    cls.zone.id,
                                    hypervisor=TestData.hypervisor_type)
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(cls.apiClient,
                                     cls.testdata[TestData.account],
                                     admin=1)

        # Set up connection to make customized API calls
        user = User.create(cls.apiClient,
                           cls.testdata[TestData.user],
                           account=cls.account.name,
                           domainid=cls.domain.id)

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey,
                                              userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        compute_offering = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering])

        cls.disk_offering = DiskOffering.create(
            cls.apiClient, cls.testdata[TestData.diskOffering], custom=True)

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=compute_offering.id,
            templateid=cls.template.id,
            domainid=cls.domain.id,
            startvm=True)

        cls._cleanup = [compute_offering, cls.disk_offering, user, cls.account]
Exemple #22
0
    def setUpCloudStack(cls):
        super(TestRenameObjectsWithUuids, cls).setUpClass()

        cls.spapi = spapi.Api.fromConfig(multiCluster=True)
        cls.helper = HelperUtil(cls)

        cls._cleanup = []
        testClient = super(TestRenameObjectsWithUuids, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None


        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        storpool_primary_storage = {
            "name" : "ssd",
            "zoneid": cls.zone.id,
            "url": "ssd",
            "scope": "zone",
            "capacitybytes": 4500000,
            "capacityiops": 155466464221111121,
            "hypervisor": "kvm",
            "provider": "StorPool",
            "tags": "ssd"
            }

        storpool_service_offerings = {
            "name": "ssd",
                "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
                "cpunumber": 1,
                "cpuspeed": 500,
                "memory": 512,
                "storagetype": "shared",
                "customizediops": False,
                "hypervisorsnapshotreserve": 200,
                "tags": "ssd"
            }

        storage_pool = list_storage_pools(
            cls.apiclient,
            name='ssd'
            )

        service_offerings = list_service_offering(
            cls.apiclient,
            name='ssd'
            )

        disk_offerings = list_disk_offering(
            cls.apiclient,
            name="Small"
            )

        disk_offering_20 = list_disk_offering(
            cls.apiclient,
            name="Medium"
            )

        disk_offering_100 = list_disk_offering(
            cls.apiclient,
            name="Large"
            )

        cls.disk_offering = disk_offerings[0]
        cls.disk_offering_20 = disk_offering_20[0]
        cls.disk_offering_100 = disk_offering_100[0]
        cls.debug(pprint.pformat(storage_pool))
        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]
        #The version of CentOS has to be supported
        template = get_template(
             cls.apiclient,
            cls.zone.id,
            account = "system"
        )

        cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id)
        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id


        cls.service_offering = service_offerings
        cls.debug(pprint.pformat(cls.service_offering))

        cls.local_cluster = cls.get_local_cluster(zoneid = cls.zone.id)
        cls.host = cls.list_hosts_by_cluster_id(cls.local_cluster.id)

        assert len(cls.host) > 1, "Hosts list is less than 1"
        cls.host_on_local_1 = cls.host[0]
        cls.host_on_local_2 = cls.host[1]

        cls.remote_cluster = cls.get_remote_cluster(zoneid = cls.zone.id)
        cls.host_remote = cls.list_hosts_by_cluster_id(cls.remote_cluster.id)
        assert len(cls.host_remote) > 1, "Hosts list is less than 1"

        cls.host_on_remote1 = cls.host_remote[0]
        cls.host_on_remote2 = cls.host_remote[1]

        cls.volume_1 = Volume.create(
            cls.apiclient,
            {"diskname":"StorPoolDisk-1" },
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offering.id,
        )
        cls._cleanup.append(cls.volume_1)

        cls.volume = Volume.create(
            cls.apiclient,
            {"diskname":"StorPoolDisk-3" },
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offering.id,
        )
        cls._cleanup.append(cls.volume)

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=cls.zone.id,
            templateid=template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            hostid = cls.host_on_local_1.id,
            rootdisksize=10
        )

        cls.volume_on_remote = Volume.create(
            cls.apiclient,
            {"diskname":"StorPoolDisk-3" },
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offering.id,
        )
        cls._cleanup.append(cls.volume_on_remote)

        cls.virtual_machine_on_remote = VirtualMachine.create(
            cls.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=cls.zone.id,
            templateid=template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            hostid = cls.host_on_remote1.id,
            rootdisksize=10
        )

        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        return
    def test_01_create_system_vms_on_managed_storage(self):
        self._disable_zone_and_delete_system_vms(None, False)

        primary_storage = self.testdata[TestData.primaryStorage]

        primary_storage_1 = StoragePool.create(
            self.apiClient,
            primary_storage
        )

        self._prepare_to_use_managed_storage_for_system_vms()

        enabled = "Enabled"

        self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled)

        system_vms = self._wait_for_and_get_running_system_vms(2)

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        # This virtual machine was only created and started so that the virtual router would be created and started.
        # Just delete this virtual machine once it has been created and started.
        virtual_machine.delete(self.apiClient, True)

        virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0]

        system_vms.append(virtual_router)

        self._check_system_vms(system_vms, primary_storage_1.id)

        primary_storage[TestData.name] = TestData.get_name_for_solidfire_storage()

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primary_storage
        )

        StoragePool.enableMaintenance(self.apiClient, primary_storage_1.id)

        self._wait_for_storage_cleanup_thread(system_vms)

        sf_util.purge_solidfire_volumes(self.sfe)

        system_vms = self._wait_for_and_get_running_system_vms(2)

        virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0]

        system_vms.append(virtual_router)

        self._check_system_vms(system_vms, primary_storage_2.id)

        StoragePool.cancelMaintenance(self.apiClient, primary_storage_1.id)

        primary_storage_1.delete(self.apiClient)

        self._disable_zone_and_delete_system_vms(virtual_router)

        self._wait_for_storage_cleanup_thread(system_vms)

        sf_util.purge_solidfire_volumes(self.sfe)

        primary_storage_2.delete(self.apiClient)

        self._verify_no_active_solidfire_volumes()

        self._prepare_to_stop_using_managed_storage_for_system_vms()

        self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled)

        self._wait_for_and_get_running_system_vms(2)
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVolumes, cls).getClsTestClient()
        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        cls.supports_resign = True

        sf_util.set_supports_resign(cls.supports_resign, cls.dbConnection)

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session = XenAPI.Session(host_ip)

        xenserver = cls.testdata[TestData.xenServer]

        cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        cls.cluster = list_clusters(cls.apiClient)[0]
        cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata["account"],
            admin=1
        )

        # Set up connection to make customized API calls
        cls.user = User.create(
            cls.apiClient,
            cls.testdata["user"],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        cls.compute_offering = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering]
        )

        cls.disk_offering = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering]
        )

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=cls.compute_offering.id,
            templateid=cls.template.id,
            domainid=cls.domain.id,
            startvm=True
        )

        cls.volume = Volume.create(
            cls.apiClient,
            cls.testdata[TestData.volume_1],
            account=cls.account.name,
            domainid=cls.domain.id,
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offering.id
        )

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.volume,
            cls.virtual_machine,
            cls.compute_offering,
            cls.disk_offering,
            cls.user,
            cls.account
        ]
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestCapacityManagement, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        sf_util.set_supports_resign(True, cls.dbConnection)

        cls._connect_to_hypervisor()

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata["account"],
            admin=1
        )

        # Set up connection to make customized API calls
        cls.user = User.create(
            cls.apiClient,
            cls.testdata["user"],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        primarystorage2 = cls.testdata[TestData.primaryStorage2]

        cls.primary_storage_2 = StoragePool.create(
            cls.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor]
        )

        cls.compute_offering = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering]
        )

        cls.compute_offering_2 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering2]
        )

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.compute_offering,
            cls.compute_offering_2,
            cls.user,
            cls.account
        ]
Exemple #26
0
    def setUpClass(cls):
        """
        1. Init ACS API and DB connection
        2. Init Datera API connection
        3. Create ACS Primary storage
        4. Create ACS compute and disk offering.
        5. Create ACS data disk without attaching to a VM 
        """
        logger.info("Setting up Class")

        # Set up API client
        testclient = super(TestVolumes, cls).getClsTestClient()
        cls.apiClient = testclient.getApiClient()
        cls.dbConnection = testclient.getDbConnection()

        # Setup test data
        td = TestData()
        if cls.config.TestData and cls.config.TestData.Path:
            td.update(cls.config.TestData.Path)
        cls.testdata = td.testdata

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_name=cls.config.zones[0].name)
        cls.cluster = list_clusters(cls.apiClient)[0]
        cls.template = get_template(cls.apiClient, cls.zone.id)
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Set up datera connection
        datera = cls.testdata[TestData.Datera]
        cls.dt_client = get_api(username=datera[TestData.login],
                                password=datera[TestData.password],
                                hostname=datera[TestData.mvip],
                                version="v2")

        # Create test account
        cls.account = Account.create(cls.apiClient,
                                     cls.testdata["account"],
                                     admin=1)

        # Set up connection to make customized API calls
        cls.user = User.create(cls.apiClient,
                               cls.testdata["user"],
                               account=cls.account.name,
                               domainid=cls.domain.id)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        cls.disk_offering = DiskOffering.create(
            cls.apiClient, cls.testdata[TestData.diskOffering])

        cls.disk_offering_new = DiskOffering.create(
            cls.apiClient,
            cls.testdata['testdiskofferings']['newsizeandiopsdo'])

        cls.supports_resign = cls._get_supports_resign()

        # Set up hypervisor specific connections
        if cls.cluster.hypervisortype.lower() == 'xenserver':
            cls.setUpXenServer()
        if cls.cluster.hypervisortype.lower() == 'kvm':
            cls.setUpKVM()

        # Create 1 data volume_1
        cls.volume = Volume.create(cls.apiClient,
                                   cls.testdata[TestData.volume_1],
                                   account=cls.account.name,
                                   domainid=cls.domain.id,
                                   zoneid=cls.zone.id,
                                   diskofferingid=cls.disk_offering.id)

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.volume, cls.compute_offering, cls.disk_offering,
            cls.disk_offering_new, cls.user, cls.account
        ]
Exemple #27
0
    def setUpCloudStack(cls):
        cls._cleanup = []

        cls.spapi = spapi.Api.fromConfig(multiCluster=True)
        testClient = super(TestResizeVolumes, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        td = TestData()
        cls.testdata = td.testdata
        cls.helper = StorPoolHelper()

        storpool_primary_storage = cls.testdata[TestData.primaryStorage]

        cls.template_name = storpool_primary_storage.get("name")

        storpool_service_offerings = cls.testdata[TestData.serviceOfferingsIops]

        storpool_disk_offerings = {
            "name": "iops",
            "displaytext": "Testing IOPS on StorPool",
            "customizediops": True,
            "storagetype": "shared",
            "tags" : cls.template_name,
            }

        storage_pool = list_storage_pools(
            cls.apiclient,
            name = cls.template_name
            )

        service_offerings = list_service_offering(
            cls.apiclient,
            name='iops'
            )

        disk_offerings = list_disk_offering(
            cls.apiclient,
            name="iops"
            )

        if disk_offerings is None:
            disk_offerings = DiskOffering.create(cls.apiclient, storpool_disk_offerings, custom = True)
        else:
            disk_offerings = disk_offerings[0]
        cls.disk_offerings = disk_offerings
        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]

        template = get_template(
             cls.apiclient,
            cls.zone.id,
            account = "system"
        )

        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id

        cls.account = cls.helper.create_account(
                            cls.apiclient,
                            cls.services["account"],
                            accounttype = 1,
                            domainid=cls.domain.id,
                            )
        cls._cleanup.append(cls.account)

        securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0]
        cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id)

        cls.service_offering = service_offerings
        cls.debug(pprint.pformat(cls.service_offering))

        cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid = cls.zone.id)
        cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id)

        assert len(cls.host) > 1, "Hosts list is less than 1"
        cls.host_on_local_1 = cls.host[0]
        cls.host_on_local_2 = cls.host[1]

        cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid = cls.zone.id)
        cls.host_remote = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.remote_cluster.id)
        assert len(cls.host_remote) > 1, "Hosts list is less than 1"

        cls.host_on_remote1 = cls.host_remote[0]
        cls.host_on_remote2 = cls.host_remote[1]

        cls.volume_1 = cls.helper.create_custom_disk(
            cls.apiclient,
            {"diskname":"StorPoolDisk" },
            zoneid=cls.zone.id,
            size = 5,
            miniops = 2000,
            maxiops = 5000,
            account=cls.account.name,
            domainid=cls.account.domainid,
            diskofferingid=cls.disk_offerings.id,
        )
        cls.volume_2 = cls.helper.create_custom_disk(
            cls.apiclient,
            {"diskname":"StorPoolDisk" },
            zoneid=cls.zone.id,
            size = 5,
            miniops = 2000,
            maxiops = 5000,
            account=cls.account.name,
            domainid=cls.account.domainid,
            diskofferingid=cls.disk_offerings.id,
        )
        cls.volume = cls.helper.create_custom_disk(
            cls.apiclient,
            {"diskname":"StorPoolDisk" },
            zoneid=cls.zone.id,
            size = 5,
            miniops = 2000,
            maxiops = 5000,
            account=cls.account.name,
            domainid=cls.account.domainid,
            diskofferingid=cls.disk_offerings.id,
        )
        cls.virtual_machine = cls.helper.create_vm_custom(
            cls.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=cls.zone.id,
            templateid=template.id,
            account=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            minIops = 1000,
            maxIops = 5000,
            rootdisksize = 10
        )
        cls.virtual_machine2= cls.helper.create_vm_custom(
            cls.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=cls.zone.id,
            templateid=template.id,
            account=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            minIops = 1000,
            maxIops = 5000,
            rootdisksize = 10
        )

        cls.virtual_machine_live_migration_1 = cls.helper.create_vm_custom(
            cls.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=cls.zone.id,
            templateid=template.id,
            account=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            minIops = 1000,
            maxIops = 5000,
            hostid = cls.host_on_local_1.id,
            rootdisksize = 10
        )
        cls.virtual_machine_live_migration_2= cls.helper.create_vm_custom(
            cls.apiclient,
            {"name":"StorPool-%s" % uuid.uuid4() },
            zoneid=cls.zone.id,
            templateid=template.id,
            account=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            minIops = 1000,
            maxIops = 5000,
            hostid = cls.host_on_remote1.id,
            rootdisksize = 10
        )

        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        return
Exemple #28
0
    def test_02_list_snapshots_with_removed_data_store(self):
        """Test listing volume snapshots with removed data stores
        """

        # 1 - Create new volume -> V
        # 2 - Create new Primary Storage -> PS
        # 3 - Attach and detach volume V from vm
        # 4 - Migrate volume V to PS
        # 5 - Take volume V snapshot -> S
        # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed

        # Create new volume
        vol = Volume.create(
            self.apiclient,
            self.services["volume"],
            diskofferingid=self.disk_offering.id,
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )
        self.cleanup.append(vol)
        self.assertIsNotNone(vol, "Failed to create volume")
        vol_res = Volume.list(self.apiclient, id=vol.id)
        self.assertEqual(
            validateList(vol_res)[0], PASS,
            "Invalid response returned for list volumes")
        vol_uuid = vol_res[0].id
        clusters = list_clusters(self.apiclient, zoneid=self.zone.id)
        assert isinstance(clusters, list) and len(clusters) > 0

        # Attach created volume to vm, then detach it to be able to migrate it
        self.virtual_machine_with_disk.stop(self.apiclient)
        self.virtual_machine_with_disk.attach_volume(self.apiclient, vol)

        # Create new Primary Storage
        storage = StoragePool.create(self.apiclient,
                                     self.services["nfs2"],
                                     clusterid=clusters[0].id,
                                     zoneid=self.zone.id,
                                     podid=self.pod.id)

        self.cleanup.append(storage)
        self.assertEqual(storage.state, 'Up', "Check primary storage state")
        self.assertEqual(storage.type, 'NetworkFilesystem',
                         "Check storage pool type")
        storage_pools_response = list_storage_pools(self.apiclient,
                                                    id=storage.id)
        self.assertEqual(isinstance(storage_pools_response, list), True,
                         "Check list response returns a valid list")
        self.assertNotEqual(len(storage_pools_response), 0,
                            "Check list Hosts response")
        storage_response = storage_pools_response[0]
        self.assertEqual(storage_response.id, storage.id,
                         "Check storage pool ID")
        self.assertEqual(storage.type, storage_response.type,
                         "Check storage pool type ")

        self.virtual_machine_with_disk.detach_volume(self.apiclient, vol)

        # Migrate volume to new Primary Storage
        Volume.migrate(self.apiclient, storageid=storage.id, volumeid=vol.id)

        volume_response = list_volumes(
            self.apiclient,
            id=vol.id,
        )
        self.assertNotEqual(len(volume_response), 0,
                            "Check list Volumes response")
        volume_migrated = volume_response[0]
        self.assertEqual(volume_migrated.storageid, storage.id,
                         "Check volume storage id")

        # Take snapshot of new volume
        snapshot = Snapshot.create(self.apiclient,
                                   volume_migrated.id,
                                   account=self.account.name,
                                   domainid=self.account.domainid)

        self.debug("Snapshot created: ID - %s" % snapshot.id)

        # Delete volume, VM and created Primary Storage
        cleanup_resources(self.apiclient, self.cleanup)

        # List snapshot and verify it gets properly listed although Primary Storage was removed
        snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertNotEqual(len(snapshot_response), 0,
                            "Check list Snapshot response")
        self.assertEqual(snapshot_response[0].id, snapshot.id,
                         "Check snapshot id")

        # Delete snapshot and verify it gets properly deleted (should not be listed)
        self.cleanup = [snapshot]
        cleanup_resources(self.apiclient, self.cleanup)

        self.cleanup = []
        snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertEqual(snapshot_response_2, None,
                         "Check list Snapshot response")

        return
Exemple #29
0
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestSnapshots, cls).getClsTestClient()
        cls.apiClient = testclient.getApiClient()
        cls.dbConnection = testclient.getDbConnection()

        td = TestData()

        if cls.config.TestData and cls.config.TestData.Path:
            td.update(cls.config.TestData.Path)

        cls.testdata = td.testdata

        cls.supports_resign = cls._get_supports_resign()

        # Set up xenAPI connection
        hosts = list_hosts(cls.apiClient,
                           clusterid=cls.testdata[TestData.clusterId])
        xenserver = cls.testdata[TestData.xenServer]

        for h in hosts:
            host_ip = "https://" + h.ipaddress
            try:
                cls.xen_session = XenAPI.Session(host_ip)
                cls.xen_session.xenapi.login_with_password(
                    xenserver[TestData.username], xenserver[TestData.password])
                break
            except XenAPI.Failure as e:
                pass

        # Set up datera connection
        datera = cls.testdata[TestData.Datera]
        cls.dt_client = dfs_sdk.DateraApi(username=datera[TestData.login],
                                          password=datera[TestData.password],
                                          hostname=datera[TestData.mvip])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient,
                            zone_id=cls.testdata[TestData.zoneId])
        cls.cluster = list_clusters(cls.apiClient)[0]
        cls.template = get_template(
            cls.apiClient,
            cls.zone.id,
            template_name=cls.testdata[TestData.templateName])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(cls.apiClient,
                                     cls.testdata["account"],
                                     admin=1)

        # Set up connection to make customized API calls
        cls.user = User.create(cls.apiClient,
                               cls.testdata["user"],
                               account=cls.account.name,
                               domainid=cls.domain.id)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        cls.compute_offering = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering])

        cls.disk_offering = DiskOffering.create(
            cls.apiClient, cls.testdata[TestData.diskOffering])

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.compute_offering, cls.disk_offering, cls.user, cls.account
        ]
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        xenserver = cls.testdata[TestData.xenServer]

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session_1 = XenAPI.Session(host_ip)

        cls.xen_session_1.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session_2 = XenAPI.Session(host_ip)

        cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0]
        cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0]
        cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata["account"],
            admin=1
        )

        # Set up connection to make customized API calls
        cls.user = User.create(
            cls.apiClient,
            cls.testdata["user"],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage
        )

        cls.compute_offering_1 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering1]
        )

        cls.compute_offering_2 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering2]
        )

        cls.compute_offering_3 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering3]
        )

        cls.disk_offering_1 = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering1]
        )

        cls.disk_offering_2 = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering2]
        )

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.compute_offering_1,
            cls.compute_offering_2,
            cls.compute_offering_3,
            cls.disk_offering_1,
            cls.disk_offering_2,
            cls.user,
            cls.account
        ]
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVMSnapshots, cls).getClsTestClient()
        cls.apiClient = testclient.getApiClient()

        cls.testdata = TestData().testdata

        # Set up XenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress

        cls.xen_session = XenAPI.Session(host_ip)

        xenserver = cls.testdata[TestData.xenServer]

        cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata[TestData.account],
            admin=1
        )

        # Set up connection to make customized API calls
        user = User.create(
            cls.apiClient,
            cls.testdata[TestData.user],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        compute_offering = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering]
        )

        cls.disk_offering = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering]
        )

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=compute_offering.id,
            templateid=template.id,
            domainid=cls.domain.id,
            startvm=True
        )

        cls._cleanup = [
            cls.virtual_machine,
            compute_offering,
            cls.disk_offering,
            user,
            cls.account
        ]
    def setUpCloudStack(cls):
        cls.spapi = spapi.Api.fromConfig(multiCluster=True)
        testClient = super(TestMigrateVMWithVolumes, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()

        cls._cleanup = []

        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        td = TestData()
        cls.testdata = td.testdata
        cls.helper = StorPoolHelper()
        storpool_primary_storage = cls.testdata[TestData.primaryStorage]
        cls.template_name = storpool_primary_storage.get("name")
        storpool_service_offerings = cls.testdata[TestData.serviceOffering]

        nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary]
        ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph]

        nfs_disk_offerings = cls.testdata[TestData.nfsDiskOffering]
        ceph_disk_offerings = cls.testdata[TestData.cephDiskOffering]

        storage_pool = list_storage_pools(cls.apiclient,
                                          name=cls.template_name)

        nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary')

        ceph_primary_storage = cls.testdata[TestData.primaryStorage4]

        cls.ceph_storage_pool = list_storage_pools(
            cls.apiclient, name=ceph_primary_storage.get("name"))[0]

        service_offerings = list_service_offering(cls.apiclient,
                                                  name=cls.template_name)
        nfs_service_offering = list_service_offering(cls.apiclient, name='nfs')

        ceph_service_offering = list_service_offering(
            cls.apiclient, name=ceph_primary_storage.get("name"))

        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]
        if nfs_service_offering is None:
            nfs_service_offering = ServiceOffering.create(
                cls.apiclient, nfs_service_offerings)
        else:
            nfs_service_offering = nfs_service_offering[0]

        if ceph_service_offering is None:
            ceph_service_offering = ServiceOffering.create(
                cls.apiclient, ceph_service_offerings)
        else:
            ceph_service_offering = ceph_service_offering[0]

        nfs_disk_offering = list_disk_offering(cls.apiclient, name="nfs")
        if nfs_disk_offering is None:
            nfs_disk_offering = DiskOffering.create(cls.apiclient,
                                                    nfs_disk_offerings)
        else:
            cls.nfs_disk_offering = nfs_disk_offering[0]

        ceph_disk_offering = list_disk_offering(cls.apiclient, name="ceph")
        if ceph_disk_offering is None:
            cls.ceph_disk_offering = DiskOffering.create(
                cls.apiclient, ceph_disk_offerings)
        else:
            cls.ceph_disk_offering = ceph_disk_offering[0]

        template = get_template(cls.apiclient, cls.zone.id, account="system")

        cls.nfs_storage_pool = nfs_storage_pool[0]
        if cls.nfs_storage_pool.state == "Maintenance":
            cls.nfs_storage_pool = StoragePool.cancelMaintenance(
                cls.apiclient, cls.nfs_storage_pool.id)

        if cls.ceph_storage_pool.state == "Maintenance":
            cls.ceph_storage_pool = StoragePool.cancelMaintenance(
                cls.apiclient, cls.ceph_storage_pool.id)

        cls.account = cls.helper.create_account(cls.apiclient,
                                                cls.services["account"],
                                                accounttype=1,
                                                domainid=cls.domain.id,
                                                roleid=1)
        cls._cleanup.append(cls.account)

        securitygroup = SecurityGroup.list(cls.apiclient,
                                           account=cls.account.name,
                                           domainid=cls.account.domainid)[0]
        cls.helper.set_securityGroups(cls.apiclient,
                                      account=cls.account.name,
                                      domainid=cls.account.domainid,
                                      id=securitygroup.id)

        cls.vm = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=nfs_service_offering.id,
            diskofferingid=cls.nfs_disk_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)

        cls.vm2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=ceph_service_offering.id,
            diskofferingid=cls.ceph_disk_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)

        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id

        cls.service_offering = service_offerings
        cls.nfs_service_offering = nfs_service_offering
        cls.debug(pprint.pformat(cls.service_offering))

        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        return
Exemple #33
0
    def setUpClass(cls):
        cls.spapi = spapi.Api.fromConfig(multiCluster=True)

        testClient = super(TestStoragePool, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.userapiclient = testClient.getUserApiClient(
            UserName="******", DomainName="ROOT")
        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        storpool_primary_storage = {
            "name": "ssd",
            "zoneid": cls.zone.id,
            "url": "ssd",
            "scope": "zone",
            "capacitybytes": 4500000,
            "capacityiops": 155466464221111121,
            "hypervisor": "kvm",
            "provider": "StorPool",
            "tags": "ssd"
        }

        storpool_service_offerings = {
            "name": "ssd",
            "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": "ssd"
        }

        storage_pool = list_storage_pools(cls.apiclient, name='ssd')

        service_offerings = list_service_offering(cls.apiclient, name='ssd')

        disk_offerings = list_disk_offering(cls.apiclient, name="Small")

        cls.disk_offerings = disk_offerings[0]
        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]

        template = get_template(cls.apiclient, cls.zone.id, account="system")

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id

        cls.service_offering = service_offerings

        user = list_users(cls.apiclient,
                          account='StorPoolUser',
                          domainid=cls.domain.id)
        account = list_accounts(cls.apiclient, id=user[0].accountid)
        if account is None:
            role = Role.list(cls.apiclient, name='User')
            cmd = createAccount.createAccountCmd()
            cmd.email = '*****@*****.**'
            cmd.firstname = 'StorPoolUser'
            cmd.lastname = 'StorPoolUser'

            cmd.password = '******'
            cmd.username = '******'
            cmd.roleid = role[0].id
            account = cls.apiclient.createAccount(cmd)
        else:
            account = account[0]

        cls.account = account

        #         cls.tmp_files = []
        #         cls.keypair = SSHKeyPair.create(
        #                                     cls.apiclient,
        #                                     name=random_gen() + ".pem",
        #                                     account=cls.account.name,
        #                                     domainid=cls.account.domainid)
        #
        #         keyPairFilePath = tempfile.gettempdir() + os.sep + cls.keypair.name
        #         # Clenaup at end of execution
        #         cls.tmp_files.append(keyPairFilePath)
        #
        #         cls.debug("File path: %s" % keyPairFilePath)
        #
        #         f = open(keyPairFilePath, "w+")
        #         f.write(cls.keypair.privatekey)
        #         f.close()
        #
        #         os.system("chmod 400 " + keyPairFilePath)
        #
        #         cls.keyPairFilePath = keyPairFilePath

        cls.volume_1 = Volume.create(
            cls.userapiclient,
            {"diskname": "StorPoolDisk-1"},
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offerings.id,
        )
        cls.volume_2 = Volume.create(
            cls.userapiclient,
            {"diskname": "StorPoolDisk-2"},
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offerings.id,
        )
        cls.volume = Volume.create(
            cls.userapiclient,
            {"diskname": "StorPoolDisk-3"},
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offerings.id,
        )
        cls.virtual_machine = VirtualMachine.create(
            cls.userapiclient,
            {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10,
        )
        cls.virtual_machine2 = VirtualMachine.create(
            cls.userapiclient,
            {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10,
        )
        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        cls._cleanup = []
        cls._cleanup.append(cls.virtual_machine)
        cls._cleanup.append(cls.virtual_machine2)
        cls._cleanup.append(cls.volume_1)
        cls._cleanup.append(cls.volume_2)
        cls._cleanup.append(cls.volume)
        return
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestLinstorVolumes, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()
        cls.testdata = TestData().testdata

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        cls.cluster = list_clusters(cls.apiClient)[0]
        cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata["account"],
            admin=1
        )

        # Set up connection to make customized API calls
        cls.user = User.create(
            cls.apiClient,
            cls.testdata["user"],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        cls.compute_offering = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering]
        )

        cls.disk_offering = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering]
        )

        if cls.testdata[TestData.migrationTests]:
            primarystorage_sameinst = cls.testdata[TestData.primaryStorageSameInstance]
            cls.primary_storage_same_inst = StoragePool.create(
                cls.apiClient,
                primarystorage_sameinst,
                scope=primarystorage_sameinst[TestData.scope],
                zoneid=cls.zone.id,
                provider=primarystorage_sameinst[TestData.provider],
                tags=primarystorage_sameinst[TestData.tags],
                hypervisor=primarystorage_sameinst[TestData.hypervisor]
            )

            primarystorage_distinctinst = cls.testdata[TestData.primaryStorageDistinctInstance]
            cls.primary_storage_distinct_inst = StoragePool.create(
                cls.apiClient,
                primarystorage_distinctinst,
                scope=primarystorage_distinctinst[TestData.scope],
                zoneid=cls.zone.id,
                provider=primarystorage_distinctinst[TestData.provider],
                tags=primarystorage_distinctinst[TestData.tags],
                hypervisor=primarystorage_distinctinst[TestData.hypervisor]
            )

            cls.disk_offering_same_inst = DiskOffering.create(
                cls.apiClient,
                cls.testdata[TestData.diskOfferingSameInstance]
            )

            cls.disk_offering_distinct_inst = DiskOffering.create(
                cls.apiClient,
                cls.testdata[TestData.diskOfferingDistinctInstance]
            )

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=cls.compute_offering.id,
            templateid=cls.template.id,
            domainid=cls.domain.id,
            startvm=False
        )

        TestLinstorVolumes._start_vm(cls.virtual_machine)

        cls.volume = Volume.create(
            cls.apiClient,
            cls.testdata[TestData.volume_1],
            account=cls.account.name,
            domainid=cls.domain.id,
            zoneid=cls.zone.id,
            diskofferingid=cls.disk_offering.id
        )

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.volume,
            cls.virtual_machine,
            cls.compute_offering,
            cls.disk_offering,
            cls.user,
            cls.account
        ]
    def test_vag_per_host_5(self):
        hosts = list_hosts(self.apiClient, clusterid=self.cluster.id)

        self.assertTrue(
            len(hosts) >= 2, "There needs to be at least two hosts.")

        unique_vag_ids = self._get_unique_vag_ids(hosts)

        self.assertTrue(
            len(hosts) == len(unique_vag_ids),
            "To run this test, each host should be in its own VAG.")

        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        self.cleanup.append(primary_storage)

        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        root_volume = self._get_root_volume(self.virtual_machine)

        sf_account_id = sf_util.get_sf_account_id(
            self.cs_api, self.account.id, primary_storage.id, self,
            TestAddRemoveHosts._sf_account_id_should_be_non_zero_int_err_msg)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes,
                                                    root_volume.name, self)

        sf_vag_ids = sf_util.get_vag_ids(self.cs_api, self.cluster.id,
                                         primary_storage.id, self)

        sf_util.check_vags(sf_volume, sf_vag_ids, self)

        host = Host(hosts[0].__dict__)

        host_iqn = self._get_host_iqn(host)

        all_vags = sf_util.get_all_vags(self.sfe)

        host_vag = self._get_host_vag(host_iqn, all_vags)

        self.assertTrue(host_vag != None, "The host should be in a VAG.")

        host.delete(self.apiClient)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes,
                                                    root_volume.name, self)

        sf_util.check_vags(sf_volume, sf_vag_ids, self)

        all_vags = sf_util.get_all_vags(self.sfe)

        host_vag = self._get_host_vag(host_iqn, all_vags)

        self.assertTrue(host_vag == None, "The host should not be in a VAG.")

        details = {
            TestData.username: "******",
            TestData.password: "******",
            TestData.url: "http://" + host.ipaddress,
            TestData.podId: host.podid,
            TestData.zoneId: host.zoneid
        }

        host = Host.create(self.apiClient,
                           self.cluster,
                           details,
                           hypervisor=host.hypervisor)

        self.assertTrue(isinstance(host, Host), "'host' is not a 'Host'.")

        hosts = list_hosts(self.apiClient, clusterid=self.cluster.id)

        unique_vag_ids = self._get_unique_vag_ids(hosts)

        self.assertTrue(
            len(hosts) == len(unique_vag_ids) + 1,
            "There should be one more host than unique VAG.")
    def test_02_storage_migrate_root_and_data_disks(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            clusterid=self.cluster_1.id
        )

        primary_storage_3 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            clusterid=self.cluster_2.id
        )

        src_host, dest_host = self._get_source_and_dest_hosts()

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering_3.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            hostid=src_host.id,
            startvm=True
        )

        cs_data_volume = Volume.create(
            self.apiClient,
            self.testdata[TestData.volume_1],
            account=self.account.name,
            domainid=self.domain.id,
            zoneid=self.zone.id,
            diskofferingid=self.disk_offering_1.id
        )

        self.cleanup = [
            virtual_machine,
            cs_data_volume,
            primary_storage_2,
            primary_storage_3
        ]

        cs_data_volume = virtual_machine.attach_volume(
            self.apiClient,
            cs_data_volume
        )

        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
                                                  TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)

        sf_data_volume = self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id,
                                                                sf_data_volume, self.xen_session_1, self.xen_session_2)

        src_host, dest_host = dest_host, src_host

        self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume,
                                               self.xen_session_2, self.xen_session_1)
Exemple #37
0
    def test_02_list_snapshots_with_removed_data_store(self):
        """Test listing volume snapshots with removed data stores
        """

        # 1) Create new Primary Storage
        clusters = list_clusters(self.apiclient, zoneid=self.zone.id)
        assert isinstance(clusters, list) and len(clusters) > 0

        storage = StoragePool.create(self.apiclient,
                                     self.services["nfs"],
                                     clusterid=clusters[0].id,
                                     zoneid=self.zone.id,
                                     podid=self.pod.id)
        self.assertEqual(storage.state, 'Up', "Check primary storage state")
        self.assertEqual(storage.type, 'NetworkFilesystem',
                         "Check storage pool type")
        storage_pools_response = list_storage_pools(self.apiclient,
                                                    id=storage.id)
        self.assertEqual(isinstance(storage_pools_response, list), True,
                         "Check list response returns a valid list")
        self.assertNotEqual(len(storage_pools_response), 0,
                            "Check list Hosts response")
        storage_response = storage_pools_response[0]
        self.assertEqual(storage_response.id, storage.id,
                         "Check storage pool ID")
        self.assertEqual(storage.type, storage_response.type,
                         "Check storage pool type ")

        # 2) Migrate VM ROOT volume to new Primary Storage
        volumes = list_volumes(
            self.apiclient,
            virtualmachineid=self.virtual_machine_with_disk.id,
            type='ROOT',
            listall=True)
        Volume.migrate(self.apiclient,
                       storageid=storage.id,
                       volumeid=volumes[0].id,
                       livemigrate="true")

        volume_response = list_volumes(
            self.apiclient,
            id=volumes[0].id,
        )
        self.assertNotEqual(len(volume_response), 0,
                            "Check list Volumes response")
        volume_migrated = volume_response[0]
        self.assertEqual(volume_migrated.storageid, storage.id,
                         "Check volume storage id")
        self.cleanup.append(self.virtual_machine_with_disk)
        self.cleanup.append(storage)

        # 3) Take snapshot of VM ROOT volume
        snapshot = Snapshot.create(self.apiclient,
                                   volume_migrated.id,
                                   account=self.account.name,
                                   domainid=self.account.domainid)
        self.debug("Snapshot created: ID - %s" % snapshot.id)

        # 4) Delete VM and created Primery Storage
        cleanup_resources(self.apiclient, self.cleanup)

        # 5) List snapshot and verify it gets properly listed although Primary Storage was removed
        snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertNotEqual(len(snapshot_response), 0,
                            "Check list Snapshot response")
        self.assertEqual(snapshot_response[0].id, snapshot.id,
                         "Check snapshot id")

        # 6) Delete snapshot and verify it gets properly deleted (should not be listed)
        self.cleanup = [snapshot]
        cleanup_resources(self.apiclient, self.cleanup)

        snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertEqual(snapshot_response_2, None,
                         "Check list Snapshot response")

        return
    def setUpCloudStack(cls):
        testClient = super(TestStoragePool, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None
        cls._cleanup = []

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        td = TestData()
        cls.testdata = td.testdata
        cls.helper = StorPoolHelper()

        cls.account = cls.helper.create_account(cls.apiclient,
                                                cls.services["account"],
                                                accounttype=1,
                                                domainid=cls.domain.id,
                                                roleid=1)
        cls._cleanup.append(cls.account)

        securitygroup = SecurityGroup.list(cls.apiclient,
                                           account=cls.account.name,
                                           domainid=cls.account.domainid)[0]
        cls.helper.set_securityGroups(cls.apiclient,
                                      account=cls.account.name,
                                      domainid=cls.account.domainid,
                                      id=securitygroup.id)

        storpool_primary_storage = cls.testdata[TestData.primaryStorage]

        storpool_service_offerings = cls.testdata[TestData.serviceOffering]

        cls.template_name = storpool_primary_storage.get("name")

        storage_pool = list_storage_pools(cls.apiclient,
                                          name=cls.template_name)

        service_offerings = list_service_offering(cls.apiclient,
                                                  name=cls.template_name)

        disk_offerings = list_disk_offering(cls.apiclient, name="Small")

        disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium")

        disk_offering_100 = list_disk_offering(cls.apiclient, name="Large")

        cls.disk_offerings = disk_offerings[0]
        cls.disk_offering_20 = disk_offering_20[0]
        cls.disk_offering_100 = disk_offering_100[0]
        cls.debug(pprint.pformat(storage_pool))
        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]
        #The version of CentOS has to be supported
        template = get_template(cls.apiclient, cls.zone.id, account="system")

        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.service_offering = service_offerings
        cls.debug(pprint.pformat(cls.service_offering))

        cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient,
                                                         zoneid=cls.zone.id)
        cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient,
                                                       cls.local_cluster.id)

        cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient,
                                                           zoneid=cls.zone.id)
        cls.host_remote = cls.helper.list_hosts_by_cluster_id(
            cls.apiclient, cls.remote_cluster.id)

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id
        cls.services["diskofferingid"] = cls.disk_offerings.id

        cls.volume_1 = Volume.create(cls.apiclient,
                                     cls.services,
                                     account=cls.account.name,
                                     domainid=cls.account.domainid)

        cls.volume = Volume.create(cls.apiclient,
                                   cls.services,
                                   account=cls.account.name,
                                   domainid=cls.account.domainid)

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            hostid=cls.host[0].id,
            rootdisksize=10)
        cls.virtual_machine2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            hostid=cls.host[0].id,
            rootdisksize=10)
        cls.virtual_machine3 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            hostid=cls.host[0].id,
            rootdisksize=10)
        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"

        return
    def test_02_list_snapshots_with_removed_data_store(self):
        """Test listing volume snapshots with removed data stores
        """

        # 1 - Create new volume -> V
        # 2 - Create new Primary Storage -> PS
        # 3 - Attach and detach volume V from vm
        # 4 - Migrate volume V to PS
        # 5 - Take volume V snapshot -> S
        # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed
        
        # Create new volume
        vol = Volume.create(
            self.apiclient,
            self.services["volume"],
            diskofferingid=self.disk_offering.id,
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )
        self.cleanup.append(vol)
        self.assertIsNotNone(vol, "Failed to create volume")
        vol_res = Volume.list(
            self.apiclient,
            id=vol.id
        )
        self.assertEqual(
            validateList(vol_res)[0],
            PASS,
            "Invalid response returned for list volumes")
        vol_uuid = vol_res[0].id
        
        # Create new Primary Storage
        clusters = list_clusters(
            self.apiclient,
            zoneid=self.zone.id
        )
        assert isinstance(clusters,list) and len(clusters)>0

        storage = StoragePool.create(self.apiclient,
                                     self.services["nfs2"],
                                     clusterid=clusters[0].id,
                                     zoneid=self.zone.id,
                                     podid=self.pod.id
                                     )
        self.cleanup.append(self.virtual_machine_with_disk)
        self.cleanup.append(storage)

        self.assertEqual(
            storage.state,
            'Up',
            "Check primary storage state"
        )
        self.assertEqual(
            storage.type,
            'NetworkFilesystem',
            "Check storage pool type"
        )
        storage_pools_response = list_storage_pools(self.apiclient,
                                                    id=storage.id)
        self.assertEqual(
            isinstance(storage_pools_response, list),
            True,
            "Check list response returns a valid list"
        )
        self.assertNotEqual(
            len(storage_pools_response),
            0,
            "Check list Hosts response"
        )
        storage_response = storage_pools_response[0]
        self.assertEqual(
            storage_response.id,
            storage.id,
            "Check storage pool ID"
        )
        self.assertEqual(
            storage.type,
            storage_response.type,
            "Check storage pool type "
        )

        # Attach created volume to vm, then detach it to be able to migrate it
        self.virtual_machine_with_disk.stop(self.apiclient)
        self.virtual_machine_with_disk.attach_volume(
            self.apiclient,
            vol
        )
        self.virtual_machine_with_disk.detach_volume(
            self.apiclient,
            vol
        )

        # Migrate volume to new Primary Storage
        Volume.migrate(self.apiclient,
            storageid=storage.id,
            volumeid=vol.id
        )

        volume_response = list_volumes(
            self.apiclient,
            id=vol.id,
        )
        self.assertNotEqual(
            len(volume_response),
            0,
            "Check list Volumes response"
        )
        volume_migrated = volume_response[0]
        self.assertEqual(
            volume_migrated.storageid,
            storage.id,
            "Check volume storage id"
        )

        # Take snapshot of new volume
        snapshot = Snapshot.create(
            self.apiclient,
            volume_migrated.id,
            account=self.account.name,
            domainid=self.account.domainid
        )

        self.debug("Snapshot created: ID - %s" % snapshot.id)

        # Delete volume, VM and created Primary Storage
        cleanup_resources(self.apiclient, self.cleanup)

        # List snapshot and verify it gets properly listed although Primary Storage was removed
        snapshot_response = Snapshot.list(
            self.apiclient,
            id=snapshot.id
        )
        self.assertNotEqual(
            len(snapshot_response),
            0,
            "Check list Snapshot response"
        )
        self.assertEqual(
            snapshot_response[0].id,
            snapshot.id,
            "Check snapshot id"
        )

        # Delete snapshot and verify it gets properly deleted (should not be listed)
        self.cleanup = [snapshot]
        cleanup_resources(self.apiclient, self.cleanup)

        self.cleanup = []
        snapshot_response_2 = Snapshot.list(
            self.apiclient,
            id=snapshot.id
        )
        self.assertEqual(
            snapshot_response_2,
            None,
            "Check list Snapshot response"
        )

        return
Exemple #40
0
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVMSnapshots, cls).getClsTestClient()
        cls.apiClient = testclient.getApiClient()

        cls.testdata = TestData().testdata

        # Set up XenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress

        cls.xen_session = XenAPI.Session(host_ip)

        xenserver = cls.testdata[TestData.xenServer]

        cls.xen_session.xenapi.login_with_password(
            xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        cls.sf_client = sf_api.SolidFireAPI(
            endpoint_dict=cls.testdata[TestData.solidFire])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient,
                            zone_id=cls.testdata[TestData.zoneId])
        template = get_template(
            cls.apiClient,
            cls.zone.id,
            template_name=cls.testdata[TestData.templateName])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(cls.apiClient,
                                     cls.testdata[TestData.account],
                                     admin=1)

        # Set up connection to make customized API calls
        user = User.create(cls.apiClient,
                           cls.testdata[TestData.user],
                           account=cls.account.name,
                           domainid=cls.domain.id)

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey,
                                              userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        compute_offering = ServiceOffering.create(
            cls.apiClient, cls.testdata[TestData.computeOffering])

        cls.disk_offering = DiskOffering.create(
            cls.apiClient, cls.testdata[TestData.diskOffering])

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=compute_offering.id,
            templateid=template.id,
            domainid=cls.domain.id,
            startvm=True)

        cls._cleanup = [
            cls.virtual_machine, compute_offering, cls.disk_offering, user,
            cls.account
        ]
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestUploadDownload, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        cls.cluster = list_clusters(cls.apiClient)[0]
        cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata[TestData.account],
            admin=1
        )

        # Set up connection to make customized API calls
        user = User.create(
            cls.apiClient,
            cls.testdata[TestData.user],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        compute_offering = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering]
        )

        cls.disk_offering = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering],
            custom=True
        )

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=compute_offering.id,
            templateid=cls.template.id,
            domainid=cls.domain.id,
            startvm=True
        )

        cls._cleanup = [
            compute_offering,
            cls.disk_offering,
            user,
            cls.account
        ]