コード例 #1
0
    def tearDown(self):
        try:
            for storagePool in self.pools:
                StoragePool.update(self.apiclient, id=storagePool.id, tags="")

            if hasattr(self, "data_volume_created"):
                data_volumes_list = Volume.list(
                    self.userapiclient,
                    id=self.data_volume_created.id,
                    virtualmachineid=self.vm.id
                )
                if data_volumes_list:
                    self.vm.detach_volume(
                        self.userapiclient,
                        data_volumes_list[0]
                    )

                status = validateList(data_volumes_list)
                self.assertEqual(
                    status[0],
                    PASS,
                    "DATA Volume List Validation Failed")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #2
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                StoragePool.update(cls.api_client, id=cls.storageID,
                                   tags="")
                cls.restartServer()

                #Giving 30 seconds to management to warm-up,
                #Experienced failures when trying to deploy a VM exactly when management came up
                time.sleep(30)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #3
0
    def test13_update_primary_storage_capacityIops_to_zero(self):
        updatedIops = 0
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacityiops=updatedIops,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(
            storage_pool.capacityiops, updatedIops,
            "Primary storage capacityiops not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_iops = (
            datera_instance['storage_instances']
            ['storage-1']['volumes']['volume-1']['performance_policy']
            ['total_iops_max'])

        self.assertEqual(
            app_instance_response_iops, updatedIops,
            "app-instance capacityiops not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
    def cleanUpCloudStack(cls):
        cfg.logger.info("Cleaning up after the whole test run")
        try:
            cls.nfs_storage_pool = StoragePool.enableMaintenance(
                cls.apiclient, cls.nfs_storage_pool.id)
            cls.storage_pool = StoragePool.update(cls.apiclient,
                                                  id=cls.primary_storage.id,
                                                  tags=["ssd"])
            cls.storage_pool2 = StoragePool.update(cls.apiclient,
                                                   id=cls.primary_storage2.id,
                                                   tags=["ssd2"])
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            cfg.logger.info("cleanup_resources failed: %s", e)
            os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

            raise Exception("Warning: Exception during cleanup : %s" % e)

        cfg.logger.info("Stopping CloudStack")
        os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

        time.sleep(30)

        return
    def test_02_migrate_vm_from_ceph_to_storpool_live(self):
        """
        Migrate VMs/Volumes live
        """
        self.storage_pool = StoragePool.update(self.apiclient,
                                               id=self.storage_pool.id,
                                               tags=["ssd, ceph"])
        random_data = self.writeToFile(self.vm2)
        cmd = listHosts.listHostsCmd()
        cmd.type = "Routing"
        cmd.state = "Up"
        cmd.zoneid = self.zone.id
        hosts = self.apiclient.listHosts(cmd)
        destinationHost = self.helper.getDestinationHost(
            self.vm2.hostid, hosts)
        vol_pool_map = {}
        volumes = list_volumes(self.apiclient,
                               virtualmachineid=self.vm2.id,
                               listall=True)
        for v in volumes:
            vol_pool_map[v.id] = self.storage_pool.id

        # Migrate the vm2
        print(vol_pool_map)
        vm2 = self.vm2.migrate_vm_with_volume(self.apiclient,
                                              hostid=destinationHost.id,
                                              migrateto=vol_pool_map)
        self.checkFileAndContentExists(self.vm2, random_data)

        self.storage_pool = StoragePool.update(self.apiclient,
                                               id=self.storage_pool.id,
                                               tags=["ssd"])
コード例 #6
0
    def test13_update_primary_storage_capacityIops_to_zero(self):
        updatedIops = 0
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacityiops=updatedIops,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(storage_pool.capacityiops, updatedIops,
                         "Primary storage capacityiops not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_iops = (
            datera_instance['storage_instances']['storage-1']['volumes']
            ['volume-1']['performance_policy']['total_iops_max'])

        self.assertEqual(app_instance_response_iops, updatedIops,
                         "app-instance capacityiops not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
コード例 #7
0
    def test_01_attach_datadisk_to_vm_on_zwps(self):
        """ Attach Data Disk To VM on ZWPS
            1.  Check if zwps storage pool exists.
            2.  Adding tag to zone wide primary storage
            3.  Launch a VM on ZWPS
            4.  Attach data disk to vm which is on zwps.
            5.  Verify disk is attached.
        """

        # Step 1
        if len(
                list(storagePool for storagePool in self.pools
                     if storagePool.scope == "ZONE")) < 1:
            self.skipTest("There must be at least one zone wide \
                storage pools available in the setup")

        # Adding tags to Storage Pools
        zone_no = 1
        for storagePool in self.pools:
            if storagePool.scope == "ZONE":
                StoragePool.update(self.apiclient,
                                   id=storagePool.id,
                                   tags=[ZONETAG1[:-1] + repr(zone_no)])
                zone_no += 1

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zone1.id,
            zoneid=self.zone.id)

        self.data_volume_created = Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id)

        self.cleanup.append(self.data_volume_created)

        # Step 2
        self.vm.attach_volume(self.userapiclient, self.data_volume_created)

        data_volumes_list = Volume.list(self.userapiclient,
                                        id=self.data_volume_created.id,
                                        virtualmachineid=self.vm.id)

        data_volume = data_volumes_list[0]

        status = validateList(data_volume)

        # Step 3
        self.assertEqual(status[0], PASS,
                         "Check: Data if Disk is attached to VM")

        return
コード例 #8
0
ファイル: test_volumes.py プロジェクト: slavkap/cloudstack
    def test_11_migrate_volume_and_change_offering(self):

        # Validates the following
        #
        # 1. Creates a new Volume with a small disk offering
        #
        # 2. Migrates the Volume to another primary storage and changes the offering
        #
        # 3. Verifies the Volume has new offering when migrated to the new storage.

        small_offering = list_disk_offering(self.apiclient, name="Small")[0]

        large_offering = list_disk_offering(self.apiclient, name="Large")[0]
        volume = Volume.create(self.apiClient,
                               self.services,
                               zoneid=self.zone.id,
                               account=self.account.name,
                               domainid=self.account.domainid,
                               diskofferingid=small_offering.id)
        self.debug("Created a small volume: %s" % volume.id)

        self.virtual_machine.attach_volume(self.apiclient, volume=volume)

        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.stop(self.apiclient)

        pools = StoragePool.listForMigration(self.apiclient, id=volume.id)

        pool = None

        if pools and len(pools) > 0:
            pool = pools[0]
        else:
            raise self.skipTest(
                "Not enough storage pools found, skipping test")

        if hasattr(pool, 'tags'):
            StoragePool.update(self.apiclient, id=pool.id, tags="")

        self.debug("Migrating Volume-ID: %s to Pool: %s" %
                   (volume.id, pool.id))
        livemigrate = False
        if self.virtual_machine.hypervisor.lower(
        ) == "vmware" or self.virtual_machine.hypervisor.lower(
        ) == 'xenserver':
            livemigrate = True

        Volume.migrate(self.apiclient,
                       volumeid=volume.id,
                       storageid=pool.id,
                       newdiskofferingid=large_offering.id,
                       livemigrate=livemigrate)
        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.start(self.apiclient)
        migrated_vol = Volume.list(self.apiclient, id=volume.id)[0]
        self.assertEqual(migrated_vol.diskofferingname, large_offering.name,
                         "Offering name did not match with the new one ")
        return
コード例 #9
0
    def tearDown(self):
        try:
            for storagePool in self.pools:
                StoragePool.update(self.apiclient, id=storagePool.id, tags="")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #10
0
    def tearDown(self):
        try:
            for storagePool in self.pools:
                StoragePool.update(self.apiclient, id=storagePool.id, tags="")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #11
0
    def test07_update_primary_storage_capacityBytes(self):
        updatedDiskSize = self.testdata[TestData.newCapacityBytes]
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacitybytes=updatedDiskSize,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(
            storage_pool.disksizetotal, updatedDiskSize,
            "Primary storage not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_disk_size = (
            datera_instance['storage_instances']
            ['storage-1']['volumes']['volume-1']['size'] * 1073741824)

        self.assertEqual(
            app_instance_response_disk_size, updatedDiskSize,
            "app-instance not updated")

        # Verify in xenserver
       #for key, value in self.xen_session.xenapi.SR.get_all_records().items():
        #    if value['name_description'] == self.primary_storage_id:
        #        xen_sr = value
        #Uncomment after xen fix
        #print xen_sr
        #print xen_sr['physical_size'], updatedDiskSize
        #self.assertEqual(
        #    int(xen_sr['physical_size']) + 12582912, updatedDiskSize,
        #    "Xen server physical storage not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
コード例 #12
0
    def test07_update_primary_storage_capacityBytes(self):
        updatedDiskSize = self.testdata[TestData.newCapacityBytes]
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacitybytes=updatedDiskSize,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(storage_pool.disksizetotal, updatedDiskSize,
                         "Primary storage not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_disk_size = (
            datera_instance['storage_instances']['storage-1']['volumes']
            ['volume-1']['size'] * 1073741824)

        self.assertEqual(app_instance_response_disk_size, updatedDiskSize,
                         "app-instance not updated")

        # Verify in xenserver
        #for key, value in self.xen_session.xenapi.SR.get_all_records().items():
        #    if value['name_description'] == self.primary_storage_id:
        #        xen_sr = value
        #Uncomment after xen fix
        #print xen_sr
        #print xen_sr['physical_size'], updatedDiskSize
        #self.assertEqual(
        #    int(xen_sr['physical_size']) + 12582912, updatedDiskSize,
        #    "Xen server physical storage not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
コード例 #13
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false", storageid=cls.storageID)
                if cls.storageID:
                    StoragePool.update(cls.api_client, id=cls.storageID,
                                    tags="")

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #14
0
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false", storageid=cls.storageID)
                if cls.storageID:
                    StoragePool.update(cls.api_client, id=cls.storageID,
                                    tags="")

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #15
0
    def cleanUpCloudStack(cls):
        try:
            if cls.nfs_storage_pool.state is not "Maintenance":
                cls.nfs_storage_pool = StoragePool.enableMaintenance(
                    cls.apiclient, cls.nfs_storage_pool.id)

            if cls.ceph_storage_pool.state is not "Maintenance":
                cls.ceph_storage_pool = StoragePool.enableMaintenance(
                    cls.apiclient, cls.ceph_storage_pool.id)

            cls.storage_pool = StoragePool.update(cls.apiclient,
                                                  id=cls.storage_pool.id,
                                                  tags=["ssd"])
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
コード例 #16
0
    def test_01_multiple_snapshot_in_zwps(self):
        """ Test multiple volume snapshot in zwps

        # 1. Verify if setup has a ZWPS and 2 CWPS
        # 2. Deploy a VM with data disk in ZWPS
        # 1. Verify ROOT and DATA Disk of the VM is in ZWPS.
        # 2. Take a snapshot of VM.
        # 3. Create Multiple Snapshots till operation fails.
        """
        try:
            self.pools = StoragePool.list(self.apiclient, zoneid=self.zone.id)
            status = validateList(self.pools)

            self.assertEqual(
                status[0],
                PASS,
                "Check: Failed to list storage pools due to %s" %
                status[2])

            zonepoolList = list(storagePool for storagePool in self.pools
                                if storagePool.scope == "ZONE")

            if len(zonepoolList) < 1:
                self.skipTest("There must be at least one zone wide\
                storage pools available in the setup")
            if len(list(storagePool for storagePool in self.pools
                        if storagePool.scope == "CLUSTER")) < 2:
                self.skipTest("There must be at atleast two cluster wide\
                storage pools available in the setup")
        except Exception as e:
            self.skipTest(e)

        # Adding tags to Storage Pools
        zone_no = 1
        StoragePool.update(
            self.apiclient,
            id=zonepoolList[0].id,
            tags=[ZONETAG1[:-1] + repr(zone_no)])

        self.vm_zwps = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zwps.id,
            diskofferingid=self.disk_offering_zwps.id,
            zoneid=self.zone.id,
        )

        self.cleanup.append(self.vm_zwps)

        # Step 1
        volumes_root_list = list_volumes(
            self.apiclient,
            virtualmachineid=self.vm_zwps.id,
            type=ROOT,
            listall=True
        )
        status = validateList(volumes_root_list)

        self.assertEqual(
            status[0],
            PASS,
            "Check: Failed to list root vloume due to %s" %
            status[2])

        root_volume = volumes_root_list[0]

        if root_volume.storage != zonepoolList[0].name:
            self.fail("Root Volume not in Zone-Wide Storage Pool !")

        volumes_data_list = list_volumes(
            self.apiclient,
            virtualmachineid=self.vm_zwps.id,
            type=DATA,
            listall=True
        )
        status = validateList(volumes_data_list)

        self.assertEqual(
            status[0],
            PASS,
            "Check: Failed to list data vloume due to %s" %
            status[2])

        data_volume = volumes_data_list[0]

        if data_volume.storage != zonepoolList[0].name:
            self.fail("Data Volume not in Zone-Wide Storage Pool !")

        # Step 2
        self.vm_zwps.stop(self.apiclient)

        self.debug(
            "Creation of Snapshot of Data Volume after VM is stopped.....")

        Snapshot.create(
            self.apiclient,
            data_volume.id)

        snapshots_list = Snapshot.list(
            self.apiclient,
            volumeid=data_volume.id,
            listall=True)

        snap_list_validation_result = validateList(snapshots_list)

        self.assertEqual(
            snap_list_validation_result[0],
            PASS,
            "snapshot list validation failed due to %s" %
            snap_list_validation_result[2])

        snap_count = len(snapshots_list)

        # Step 3
        self.debug(
            "Creating Multiple Snapshots(Should create more than 10).....")
        try:
            while snap_count <= 12:
                Snapshot.create(
                    self.apiclient,
                    data_volume.id)

                snapshots_list = Snapshot.list(
                    self.apiclient,
                    volumeid=data_volume.id,
                    listall=True)

                snap_list_validation_result = validateList(snapshots_list)

                self.assertEqual(
                    snap_list_validation_result[0],
                    PASS,
                    "snapshot list validation failed due to %s" %
                    snap_list_validation_result[2])

                snap_count = len(snapshots_list)
        except Exception as e:
            snapshots_list = Snapshot.list(
                self.apiclient,
                volumeid=data_volume.id,
                listall=True)

            snap_list_validation_result = validateList(snapshots_list)

            self.assertEqual(
                snap_list_validation_result[0],
                PASS,
                "snapshot list validation failed due to %s" %
                snap_list_validation_result[2])

            assert len(snapshots_list) >= 10,\
                "Less than 10 snapshots created...."
            raise Exception("Snapshot creation failed !: %s" % e)

        return
コード例 #17
0
    def setUpClass(cls):
        cls.testClient = super(TestResizeVolume, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower()
        cls.storageID = None
        # Fill services from the external config file
        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(
            cls.api_client,
            cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.unsupportedStorageType = False
        cls.unsupportedHypervisorType = False
        cls.updateclone = False
        if cls.hypervisor not in ['xenserver',"kvm","vmware"]:
            cls.unsupportedHypervisorType=True
            return
        cls.template = get_template(
            cls.api_client,
            cls.zone.id
        )
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id
        cls.services["volume"]["zoneid"] = cls.zone.id
        try:
            cls.parent_domain = Domain.create(cls.api_client,
                                              services=cls.services[
                                                  "domain"],
                                              parentdomainid=cls.domain.id)
            cls.parentd_admin = Account.create(cls.api_client,
                                               cls.services["account"],
                                               admin=True,
                                               domainid=cls.parent_domain.id)
            cls._cleanup.append(cls.parentd_admin)
            cls._cleanup.append(cls.parent_domain)
            list_pool_resp = list_storage_pools(cls.api_client,
                                               account=cls.parentd_admin.name,domainid=cls.parent_domain.id)
            res = validateList(list_pool_resp)
            if res[2]== INVALID_INPUT:
                raise Exception("Failed to  list storage pool-no storagepools found ")
            #Identify the storage pool type  and set vmware fullclone to true if storage is VMFS
            if cls.hypervisor == 'vmware':
                for strpool in list_pool_resp:
                    if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
                        list_config_storage_response = list_configurations(
                            cls.api_client
                            , name=
                            "vmware.create.full.clone",storageid=strpool.id)
                        res = validateList(list_config_storage_response)
                        if res[2]== INVALID_INPUT:
                         raise Exception("Failed to  list configurations ")
                        if list_config_storage_response[0].value == "false":
                            Configurations.update(cls.api_client,
                                                  "vmware.create.full.clone",
                                                  value="true",storageid=strpool.id)
                            cls.updateclone = True
                            StoragePool.update(cls.api_client,id=strpool.id,tags="scsi")
                            cls.storageID = strpool.id
                            cls.unsupportedStorageType = False
                            break
                    else:
                        cls.unsupportedStorageType = True
            # Creating service offering with normal config
            cls.service_offering = ServiceOffering.create(
                cls.api_client,
                cls.services["service_offering"])
            cls.services_offering_vmware=ServiceOffering.create(
                cls.api_client,cls.services["service_offering"],tags="scsi")
            cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])

        except Exception as e:
            cls.tearDownClass()
        return
コード例 #18
0
    def setUpClass(cls):
        cls.cloudstacktestclient = super(TestDeployVmRootSize,
                                         cls).getClsTestClient()
        cls.api_client = cls.cloudstacktestclient.getApiClient()
        cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower()
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__

        # Get Zone, Domain and Default Built-in template
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client,
                            cls.cloudstacktestclient.getZoneForTests())
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.updateclone = False
        cls.restartreq = False
        cls.defaultdiskcontroller = "ide"
        cls.template = get_template(cls.api_client, cls.zone.id)
        if cls.template == FAILED:
            assert False, "get_template() failed to return template "

        #create a user account
        cls.account = Account.create(cls.api_client,
                                     cls.services["account"],
                                     domainid=cls.domain.id,
                                     admin=True)
        cls._cleanup.append(cls.account)
        list_pool_resp = list_storage_pools(cls.api_client,
                                            account=cls.account.name,
                                            domainid=cls.domain.id)
        #Identify the storage pool type  and set vmware fullclone to
        # true if storage is VMFS
        if cls.hypervisor == 'vmware':
            # please make sure url of templateregister dictionary in
            # test_data.config pointing to .ova file

            list_config_storage_response = list_configurations(
                cls.api_client, name="vmware.root.disk.controller")
            cls.defaultdiskcontroller = list_config_storage_response[0].value
            if list_config_storage_response[0].value == "ide" or \
                            list_config_storage_response[0].value == \
                            "osdefault":
                Configurations.update(cls.api_client,
                                      "vmware.root.disk.controller",
                                      value="scsi")

                cls.updateclone = True
                cls.restartreq = True

            list_config_fullclone_global_response = list_configurations(
                cls.api_client, name="vmware.create.full.clone")
            if list_config_fullclone_global_response[0].value == "false":
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="true")

                cls.updateclone = True
                cls.restartreq = True

            for strpool in list_pool_resp:
                if strpool.type.lower() == "vmfs" or strpool.type.lower(
                ) == "networkfilesystem":
                    list_config_storage_response = list_configurations(
                        cls.api_client,
                        name="vmware.create.full.clone",
                        storageid=strpool.id)
                    res = validateList(list_config_storage_response)
                    if res[2] == INVALID_INPUT:
                        raise Exception("Failed to  list configurations ")

                    if list_config_storage_response[0].value == "false":
                        Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="true",
                                              storageid=strpool.id)
                        cls.updateclone = True
                        StoragePool.update(cls.api_client,
                                           id=strpool.id,
                                           tags="scsi")
                        cls.storageID = strpool.id
                        break
            if cls.restartreq:
                cls.restartServer()

                #Giving 30 seconds to management to warm-up,
                #Experienced failures when trying to deploy a VM exactly when management came up
                time.sleep(30)

        #create a service offering
        cls.service_offering = ServiceOffering.create(
            cls.api_client, cls.services["service_offering"])
        #build cleanup list
        cls.services_offering_vmware = ServiceOffering.create(
            cls.api_client, cls.services["service_offering"], tags="scsi")
        cls._cleanup.extend(
            [cls.service_offering, cls.services_offering_vmware])
コード例 #19
0
    def test_4_migrate_volume_from_ceph_to_storpool(self):
        '''Test write on disk before migrating volume from Ceph primary storage
         Check that data is on disk after migration'''
        try:
            # Login to VM and write data to file system
            ssh_client = self.vm4.get_ssh_client(reconnect=True)

            cmds = [
                "echo %s > %s/%s" %
                (self.random_data_0, self.test_dir, self.random_data), "sync",
                "sleep 1", "sync", "sleep 1",
                "cat %s/%s" % (self.test_dir, self.random_data)
            ]

            for c in cmds:
                self.debug(c)
                result = ssh_client.execute(c)
                self.debug(result)

        except Exception:
            self.fail("SSH failed for Virtual machine: %s" %
                      self.vm4.ipaddress)

        self.assertEqual(self.random_data_0, result[0],
                         "Check the random data has be write into temp file!")

        self.storage_pool = StoragePool.update(self.apiclient,
                                               id=self.storage_pool.id,
                                               tags=["ceph"])
        self.vm4.stop(self.apiclient, forced=True)
        time.sleep(30)
        volumes = list_volumes(self.apiclient,
                               virtualmachineid=self.vm4.id,
                               listall=True)
        for v in volumes:
            cmd = migrateVolume.migrateVolumeCmd()
            cmd.storageid = self.storage_pool.id
            cmd.volumeid = v.id
            volume = self.apiclient.migrateVolume(cmd)
            self.assertEqual(volume.storageid, self.storage_pool.id,
                             "Did not migrate volume from Ceph to StorPool")

        volumes = list_volumes(self.apiclient,
                               virtualmachineid=self.vm4.id,
                               listall=True)
        for v in volumes:
            name = v.path.split("/")[3]
            try:
                sp_volume = self.spapi.volumeList(volumeName="~" + name)
            except spapi.ApiError as err:
                raise Exception(err)

        self.vm4.start(self.apiclient)
        try:
            ssh_client = self.vm4.get_ssh_client(reconnect=True)

            cmds = ["cat %s/%s" % (self.test_dir, self.random_data)]

            for c in cmds:
                self.debug(c)
                result = ssh_client.execute(c)
                self.debug(result)

        except Exception:
            self.fail("SSH failed for Virtual machine: %s" %
                      self.vm4.ipaddress)

        self.assertEqual(
            self.random_data_0, result[0],
            "Check the random data is equal with the ramdom file!")
コード例 #20
0
    def setUpClass(cls):
        cls.cloudstacktestclient = super(TestDeployVmRootSize,
                                     cls).getClsTestClient()
        cls.api_client = cls.cloudstacktestclient.getApiClient()
        cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower()
        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__

        # Get Zone, Domain and Default Built-in template
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client,
                            cls.cloudstacktestclient.getZoneForTests())
        cls.services = cls.testClient.getParsedTestDataConfig()
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.updateclone = False
        cls.restartreq = False
        cls.defaultdiskcontroller = "ide"
        cls.template = get_template(cls.api_client, cls.zone.id)
        if cls.template == FAILED:
            assert False, "get_template() failed to return template "

        #create a user account
        cls.account = Account.create(
            cls.api_client,
            cls.services["account"],
            domainid=cls.domain.id,admin=True
        )
        cls._cleanup.append(cls.account)
        list_pool_resp = list_storage_pools(cls.api_client,
                                            account=cls.account.name,
                                            domainid=cls.domain.id)
        #Identify the storage pool type  and set vmware fullclone to
        # true if storage is VMFS
        if cls.hypervisor == 'vmware':
             # please make sure url of templateregister dictionary in
             # test_data.config pointing to .ova file

             list_config_storage_response = list_configurations(
                        cls.api_client
                        , name=
                        "vmware.root.disk.controller")
             cls.defaultdiskcontroller = list_config_storage_response[0].value
             if list_config_storage_response[0].value == "ide" or \
                             list_config_storage_response[0].value == \
                             "osdefault":
                        Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value="scsi")

                        cls.updateclone = True
                        cls.restartreq = True

             list_config_fullclone_global_response = list_configurations(
                        cls.api_client
                        , name=
                        "vmware.create.full.clone")
             if list_config_fullclone_global_response[0].value=="false":
                        Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="true")

                        cls.updateclone = True
                        cls.restartreq = True

             cls.tempobj = Template.register(cls.api_client,
                                    cls.services["templateregister"],
                                    hypervisor=cls.hypervisor,
                                    zoneid=cls.zone.id,
                                         account=cls.account.name,
                                         domainid=cls.domain.id
                                        )
             cls.tempobj.download(cls.api_client)

             for strpool in list_pool_resp:
                if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
                    list_config_storage_response = list_configurations(
                        cls.api_client
                        , name=
                        "vmware.create.full.clone",storageid=strpool.id)
                    res = validateList(list_config_storage_response)
                    if res[2]== INVALID_INPUT:
                        raise Exception("Failed to  list configurations ")

                    if list_config_storage_response[0].value == "false":
                        Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="true",
                                              storageid=strpool.id)
                        cls.updateclone = True
                        StoragePool.update(cls.api_client,id=strpool.id,
                                           tags="scsi")
                        cls.storageID = strpool.id
                        break
             if cls.restartreq:
                cls.restartServer()
        #create a service offering
        cls.service_offering = ServiceOffering.create(
            cls.api_client,
            cls.services["service_offering"]
        )
        #build cleanup list
        cls.services_offering_vmware=ServiceOffering.create(
                cls.api_client,cls.services["service_offering"],tags="scsi")
        cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])
コード例 #21
0
 def setUpClass(cls):
     testClient = super(TestPathVolume, cls).getClsTestClient()
     cls.apiclient = testClient.getApiClient()
     cls.testdata = testClient.getParsedTestDataConfig()
     #Get Zone,Domain and templates
     cls.domain = get_domain(cls.apiclient)
     cls.zone = get_zone(cls.apiclient)
     cls.testdata["mode"] = cls.zone.networktype
     cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"])
     cls.testdata["template"]["ostypeid"] = cls.template.ostypeid
     if cls.template == FAILED:
             cls.fail("get_template() failed to return template with description %s" % cls.testdata["ostype"])
     cls._cleanup = []
     try:
         cls.account = Account.create(cls.apiclient,
                                      cls.testdata["account"],
                                      domainid=cls.domain.id
                                      )
         cls._cleanup.append(cls.account)
         #createa two service offerings
         cls.service_offering_1 = ServiceOffering.create(cls.apiclient, cls.testdata["service_offerings"]["small"])
         cls._cleanup.append(cls.service_offering_1)
         # Create Disk offerings
         cls.disk_offering_1 = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
         cls._cleanup.append(cls.disk_offering_1)
         #check if zone wide storage is enable
         cls.list_storage = StoragePool.list(cls.apiclient,
                                             scope="ZONE"
                                             )
         if cls.list_storage:
             cls.zone_wide_storage = cls.list_storage[0]
             cls.debug("zone wide storage id is %s" % cls.zone_wide_storage.id)
             cls.testdata["tags"] = "zp"
             update1 = StoragePool.update(cls.apiclient,
                                          id=cls.zone_wide_storage.id,
                                          tags=cls.testdata["tags"]
                                          )
             cls.debug("Storage %s pool tag%s" % (cls.zone_wide_storage.id, update1.tags))
             cls.testdata["service_offerings"]["tags"] = "zp"
             cls.tagged_so = ServiceOffering.create(cls.apiclient, cls.testdata["service_offerings"])
             cls.testdata["service_offerings"]["tags"] = " "
             cls._cleanup.append(cls.tagged_so)
             #create tagged disk offerings
             cls.testdata["disk_offering"]["tags"] = "zp"
             cls.disk_offering_tagged = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
             cls._cleanup.append(cls.disk_offering_tagged)
         else:
             cls.debug("No zone wide storage found")
         #check if local storage is enable
         if cls.zone.localstorageenabled:
             cls.testdata["disk_offering"]["tags"] = " "
             cls.testdata["service_offerings"]["storagetype"] = 'local'
             cls.service_offering_2 = ServiceOffering.create(cls.apiclient, cls.testdata["service_offerings"])
             cls._cleanup.append(cls.service_offering_2)
             #craete a compute offering with local storage
             cls.testdata["disk_offering"]["storagetype"] = 'local'
             cls.disk_offering_local = DiskOffering.create(cls.apiclient, cls.testdata["disk_offering"])
             cls._cleanup.append(cls.disk_offering_local)
             cls.testdata["disk_offering"]["storagetype"] = ' '
         else:
             cls.debug("No local storage found")
         cls.userapiclient = testClient.getUserApiClient(UserName=cls.account.name,
                                                         DomainName=cls.account.domain
                                                         )
         #Check if login is successful with new account
         response = User.login(cls.userapiclient,
                               username=cls.account.name,
                               password=cls.testdata["account"]["password"]
                               )
         assert response.sessionkey is not None
         #response should have non null value
     except Exception as e:
             cls.tearDownClass()
             raise e
     return
    def setUpCloudStack(cls):
        super(TestMigrationFromUuidToGlobalId, cls).setUpClass()

        cls._cleanup = []
        cls.helper = HelperUtil(cls)
        cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS)
        cfg.logger.info("Starting CloudStack")
        cls.mvn_proc = subprocess.Popen(
            ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'],
            cwd=cls.ARGS.forked,
            preexec_fn=os.setsid,
            stdout=cfg.misc,
            stderr=subprocess.STDOUT,
        )
        cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid)
        cfg.logger.info("Started CloudStack in process group %d",
                        cls.mvn_proc_grp)
        cfg.logger.info("Waiting for a while to give it a chance to start")
        proc = subprocess.Popen(["tail", "-f", cfg.misc_name],
                                shell=False,
                                bufsize=0,
                                stdout=subprocess.PIPE)
        while True:
            line = proc.stdout.readline()
            if not line:
                cfg.logger.info("tail ended, was this expected?")
                cfg.logger.info("Stopping CloudStack")
                os.killpg(cls.mvn_proc_grp, signal.SIGINT)
                break
            if "[INFO] Started Jetty Server" in line:
                cfg.logger.info("got it!")
                break
        proc.terminate()
        proc.wait()
        time.sleep(15)
        cfg.logger.info("Processing with the setup")

        cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg)
        cls.testClient = cls.obj_marvininit.getTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        dbclient = cls.testClient.getDbConnection()
        v = dbclient.execute(
            "select * from configuration where name='sp.migration.to.global.ids.completed'"
        )
        cfg.logger.info("Configuration setting for update of db is %s", v)
        if len(v) > 0:
            update = dbclient.execute(
                "update configuration set value='false' where name='sp.migration.to.global.ids.completed'"
            )
            cfg.logger.info("DB configuration table was updated %s", update)

        cls.spapi = spapi.Api.fromConfig(multiCluster=True)

        td = TestData()
        cls.testdata = td.testdata

        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
        cls.cluster = list_clusters(cls.apiclient)[0]
        cls.hypervisor = get_hypervisor_type(cls.apiclient)

        #The version of CentOS has to be supported
        cls.template = get_template(cls.apiclient,
                                    cls.zone.id,
                                    account="system")

        if cls.template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = cls.template.ostypeid
        cls.services["zoneid"] = cls.zone.id
        primarystorage = cls.testdata[TestData.primaryStorage]
        primarystorage2 = cls.testdata[TestData.primaryStorage2]

        serviceOffering = cls.testdata[TestData.serviceOffering]
        serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2]
        storage_pool = list_storage_pools(cls.apiclient,
                                          name=primarystorage.get("name"))
        storage_pool2 = list_storage_pools(cls.apiclient,
                                           name=primarystorage2.get("name"))
        cls.primary_storage = storage_pool[0]
        cls.primary_storage2 = storage_pool2[0]

        disk_offering = list_disk_offering(cls.apiclient, name="Small")

        assert disk_offering is not None

        service_offering = list_service_offering(cls.apiclient, name="ssd")
        if service_offering is not None:
            cls.service_offering = service_offering[0]
        else:
            cls.service_offering = ServiceOffering.create(
                cls.apiclient, serviceOffering)
        assert cls.service_offering is not None

        service_offering2 = list_service_offering(cls.apiclient, name="ssd2")
        if service_offering2 is not None:
            cls.service_offering2 = service_offering2[0]
        else:
            cls.service_offering2 = ServiceOffering.create(
                cls.apiclient, serviceOffering2)
        assert cls.service_offering2 is not None

        nfs_service_offerings = {
            "name": "nfs",
            "displaytext": "NFS service offerings",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": "nfs"
        }

        nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary')

        nfs_service_offering = list_service_offering(cls.apiclient, name='nfs')

        if nfs_service_offering is None:
            nfs_service_offering = ServiceOffering.create(
                cls.apiclient, nfs_service_offerings)
        else:
            nfs_service_offering = nfs_service_offering[0]

        cls.nfs_service_offering = nfs_service_offering

        cls.nfs_storage_pool = nfs_storage_pool[0]

        cls.nfs_storage_pool = StoragePool.cancelMaintenance(
            cls.apiclient, cls.nfs_storage_pool.id)

        cls.disk_offering = disk_offering[0]

        account = list_accounts(cls.apiclient, name="admin")
        cls.account = account[0]

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine)

        cls.virtual_machine2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine2)

        cls.virtual_machine3 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine3)

        cls.virtual_machine4 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering2.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine4)

        cls.volume = Volume.create(cls.apiclient,
                                   cls.testdata[TestData.volume_1],
                                   account=cls.account.name,
                                   domainid=cls.domain.id,
                                   zoneid=cls.zone.id,
                                   diskofferingid=cls.disk_offering.id)

        cls._cleanup.append(cls.volume)

        cls.primary_storage = StoragePool.update(cls.apiclient,
                                                 id=cls.primary_storage.id,
                                                 tags=["ssd, nfs, ssd2"])
        cls.primary_storage2 = StoragePool.update(cls.apiclient,
                                                  id=cls.primary_storage2.id,
                                                  tags=["ssd, ssd2"])
        #change to latest commit with globalId implementation
        cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS)
        cfg.logger.info("The setup is done, proceeding with the tests")
        cls.primary_storage = list_storage_pools(
            cls.apiclient, name=primarystorage.get("name"))[0]
        cls.primary_storage2 = list_storage_pools(
            cls.apiclient, name=primarystorage2.get("name"))[0]
コード例 #23
0
 def updateStoragePoolTags(self, poolId, tags):
     StoragePool.update(
         self.apiclient,
         id=poolId,
         tags=tags
     )
コード例 #24
0
    def test_01_recover_VM(self):
        """ Test Restore VM on VMWare
            1. Deploy a VM without datadisk
            2. Restore the VM
            3. Verify that VM comes up in Running state
        """
        try:
            self.pools = StoragePool.list(self.apiclient,
                                          zoneid=self.zone.id,
                                          scope="CLUSTER")

            status = validateList(self.pools)

            # Step 3
            self.assertEqual(
                status[0], PASS,
                "Check: Failed to list  cluster wide storage pools")

            if len(self.pools) < 2:
                self.skipTest("There must be at atleast two cluster wide\
                storage pools available in the setup")

        except Exception as e:
            self.skipTest(e)

        # Adding tags to Storage Pools
        cluster_no = 1
        StoragePool.update(self.apiclient,
                           id=self.pools[0].id,
                           tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            templateid=self.template.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_cwps.id,
            zoneid=self.zone.id,
        )
        # Step 2

        volumes_root_list = list_volumes(self.apiclient,
                                         virtualmachineid=self.vm.id,
                                         type=ROOT,
                                         listall=True)

        root_volume = volumes_root_list[0]

        # Restore VM till its ROOT disk is recreated on onother Primary Storage
        while True:
            self.vm.restore(self.apiclient)
            volumes_root_list = list_volumes(self.apiclient,
                                             virtualmachineid=self.vm.id,
                                             type=ROOT,
                                             listall=True)

            root_volume = volumes_root_list[0]

            if root_volume.storage != self.pools[0].name:
                break

        # Step 3
        vm_list = list_virtual_machines(self.apiclient, id=self.vm.id)

        state = vm_list[0].state
        i = 0
        while (state != "Running"):
            vm_list = list_virtual_machines(self.apiclient, id=self.vm.id)

            time.sleep(10)
            i = i + 1
            state = vm_list[0].state
            if i >= 10:
                self.fail("Restore VM Failed")
                break

        return
コード例 #25
0
    def setUpCloudStack(cls):
        cls.spapi = spapi.Api.fromConfig(multiCluster=True)
        testClient = super(TestStoragePool, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()

        cls._cleanup = []

        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        td = TestData()
        cls.testdata = td.testdata
        cls.helper = StorPoolHelper()
        storpool_primary_storage = cls.testdata[TestData.primaryStorage]
        cls.template_name = storpool_primary_storage.get("name")
        storpool_service_offerings = cls.testdata[TestData.serviceOffering]

        nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary]
        ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph]

        storage_pool = list_storage_pools(cls.apiclient,
                                          name=cls.template_name)

        nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary')

        ceph_primary_storage = cls.testdata[TestData.primaryStorage4]

        cls.ceph_storage_pool = list_storage_pools(
            cls.apiclient, name=ceph_primary_storage.get("name"))[0]

        service_offerings = list_service_offering(cls.apiclient,
                                                  name=cls.template_name)
        nfs_service_offering = list_service_offering(cls.apiclient, name='nfs')

        ceph_service_offering = list_service_offering(
            cls.apiclient, name=ceph_primary_storage.get("name"))

        disk_offerings = list_disk_offering(cls.apiclient, name="Small")

        cls.disk_offerings = disk_offerings[0]
        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]
        if nfs_service_offering is None:
            nfs_service_offering = ServiceOffering.create(
                cls.apiclient, nfs_service_offerings)
        else:
            nfs_service_offering = nfs_service_offering[0]

        if ceph_service_offering is None:
            ceph_service_offering = ServiceOffering.create(
                cls.apiclient, ceph_service_offerings)
        else:
            ceph_service_offering = ceph_service_offering[0]
        #The version of CentOS has to be supported
        template = get_template(cls.apiclient, cls.zone.id, account="system")

        cls.nfs_storage_pool = nfs_storage_pool[0]
        if cls.nfs_storage_pool.state == "Maintenance":
            cls.nfs_storage_pool = StoragePool.cancelMaintenance(
                cls.apiclient, cls.nfs_storage_pool.id)

        if cls.ceph_storage_pool.state == "Maintenance":
            cls.ceph_storage_pool = StoragePool.cancelMaintenance(
                cls.apiclient, cls.ceph_storage_pool.id)

        cls.account = cls.helper.create_account(cls.apiclient,
                                                cls.services["account"],
                                                accounttype=1,
                                                domainid=cls.domain.id,
                                                roleid=1)
        cls._cleanup.append(cls.account)

        securitygroup = SecurityGroup.list(cls.apiclient,
                                           account=cls.account.name,
                                           domainid=cls.account.domainid)[0]
        cls.helper.set_securityGroups(cls.apiclient,
                                      account=cls.account.name,
                                      domainid=cls.account.domainid,
                                      id=securitygroup.id)

        cls.vm = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.vm2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.vm3 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.vm4 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=ceph_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.vm5 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=ceph_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls.storage_pool = StoragePool.update(cls.apiclient,
                                              id=cls.storage_pool.id,
                                              tags=["ssd, nfs"])

        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id

        cls.service_offering = service_offerings
        cls.nfs_service_offering = nfs_service_offering
        cls.debug(pprint.pformat(cls.service_offering))

        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        return
コード例 #26
0
    def test_01_multiple_snapshot_in_zwps(self):
        """ Test multiple volume snapshot in zwps

        # 1. Verify if setup has a ZWPS and 2 CWPS
        # 2. Deploy a VM with data disk in ZWPS
        # 1. Verify ROOT and DATA Disk of the VM is in ZWPS.
        # 2. Take a snapshot of VM.
        # 3. Create Multiple Snapshots till operation fails.
        """
        try:
            self.pools = StoragePool.list(self.apiclient, zoneid=self.zone.id)
            status = validateList(self.pools)

            self.assertEqual(
                status[0], PASS,
                "Check: Failed to list storage pools due to %s" % status[2])

            zonepoolList = list(storagePool for storagePool in self.pools
                                if storagePool.scope == "ZONE")

            if len(zonepoolList) < 1:
                self.skipTest("There must be at least one zone wide\
                storage pools available in the setup")
            if len(
                    list(storagePool for storagePool in self.pools
                         if storagePool.scope == "CLUSTER")) < 2:
                self.skipTest("There must be at atleast two cluster wide\
                storage pools available in the setup")
        except Exception as e:
            self.skipTest(e)

        # Adding tags to Storage Pools
        zone_no = 1
        StoragePool.update(self.apiclient,
                           id=zonepoolList[0].id,
                           tags=[ZONETAG1[:-1] + repr(zone_no)])

        self.vm_zwps = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zwps.id,
            diskofferingid=self.disk_offering_zwps.id,
            zoneid=self.zone.id,
        )

        self.cleanup.append(self.vm_zwps)

        # Step 1
        volumes_root_list = list_volumes(self.apiclient,
                                         virtualmachineid=self.vm_zwps.id,
                                         type=ROOT,
                                         listall=True)
        status = validateList(volumes_root_list)

        self.assertEqual(
            status[0], PASS,
            "Check: Failed to list root vloume due to %s" % status[2])

        root_volume = volumes_root_list[0]

        if root_volume.storage != zonepoolList[0].name:
            self.fail("Root Volume not in Zone-Wide Storage Pool !")

        volumes_data_list = list_volumes(self.apiclient,
                                         virtualmachineid=self.vm_zwps.id,
                                         type=DATA,
                                         listall=True)
        status = validateList(volumes_data_list)

        self.assertEqual(
            status[0], PASS,
            "Check: Failed to list data vloume due to %s" % status[2])

        data_volume = volumes_data_list[0]

        if data_volume.storage != zonepoolList[0].name:
            self.fail("Data Volume not in Zone-Wide Storage Pool !")

        # Step 2
        self.vm_zwps.stop(self.apiclient)

        self.debug(
            "Creation of Snapshot of Data Volume after VM is stopped.....")

        Snapshot.create(self.apiclient, data_volume.id)

        snapshots_list = Snapshot.list(self.apiclient,
                                       volumeid=data_volume.id,
                                       listall=True)

        snap_list_validation_result = validateList(snapshots_list)

        self.assertEqual(
            snap_list_validation_result[0], PASS,
            "snapshot list validation failed due to %s" %
            snap_list_validation_result[2])

        snap_count = len(snapshots_list)

        # Step 3
        self.debug(
            "Creating Multiple Snapshots(Should create more than 10).....")
        try:
            while snap_count <= 12:
                Snapshot.create(self.apiclient, data_volume.id)

                snapshots_list = Snapshot.list(self.apiclient,
                                               volumeid=data_volume.id,
                                               listall=True)

                snap_list_validation_result = validateList(snapshots_list)

                self.assertEqual(
                    snap_list_validation_result[0], PASS,
                    "snapshot list validation failed due to %s" %
                    snap_list_validation_result[2])

                snap_count = len(snapshots_list)
        except Exception as e:
            snapshots_list = Snapshot.list(self.apiclient,
                                           volumeid=data_volume.id,
                                           listall=True)

            snap_list_validation_result = validateList(snapshots_list)

            self.assertEqual(
                snap_list_validation_result[0], PASS,
                "snapshot list validation failed due to %s" %
                snap_list_validation_result[2])

            assert len(snapshots_list) >= 10,\
                "Less than 10 snapshots created...."
            raise Exception("Snapshot creation failed !: %s" % e)

        return
コード例 #27
0
    def test_01_attach_datadisk_to_vm_on_zwps(self):
        """ Attach Data Disk on CWPS To VM 
            1.  Check if zwps storage pool exists.
            2.  Adding tag to zone wide primary storage
            3.  Launch a VM
            4.  Attach data disk to vm.
            5.  Verify disk is attached and in correct storage pool.
        """

        # Step 1
        if len(list(self.pools)) < 1:
            self.skipTest("There must be at least one zone wide \
                storage pools available in the setup")

        # Step 2
        # Adding tags to Storage Pools
        StoragePool.update(
            self.apiclient,
            id=self.pools[0].id,
            tags=[CLUSTERTAG1])

        # Launch VM
        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zone1.id,
            zoneid=self.zone.id
        )

        self.testdata["volume"]["zoneid"] = self.zone.id
        self.testdata["volume"]["customdisksize"] = 1
        self.data_volume_created = Volume.create_custom_disk(
            self.userapiclient,
            self.testdata["volume"],
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        self.cleanup.append(self.data_volume_created)

        # Step 4
        self.vm.attach_volume(
            self.userapiclient,
            self.data_volume_created
        )

        data_volumes_list = Volume.list(
            self.userapiclient,
            virtualmachineid=self.vm.id,
            type="DATA",
            listall=True
        )

        self.debug("list volumes using vm id %s" % dir(data_volumes_list[0]))

        data_volumes_list = Volume.list(self.apiclient,
                                        id=self.data_volume_created.id,
                                        listall=True)
        data_volume = data_volumes_list[0]
        status = validateList(data_volume)
        # Step 5
        self.assertEqual(
            status[0],
            PASS,
            "Check: volume list is valid")

        self.assertEqual(
            data_volume.state,
            "Ready",
            "Check: Data volume is attached to VM")

        if data_volume.storage != self.pools[0].name:
            self.fail("check if volume is created in correct storage pool")
        return
コード例 #28
0
ファイル: test_volumes.py プロジェクト: PCextreme/cloudstack
    def test_11_migrate_volume_and_change_offering(self):

    # Validates the following
    #
    # 1. Creates a new Volume with a small disk offering
    #
    # 2. Migrates the Volume to another primary storage and changes the offering
    #
    # 3. Verifies the Volume has new offering when migrated to the new storage.

        small_offering = list_disk_offering(
            self.apiclient,
            name = "Small"
        )[0]

        large_offering = list_disk_offering(
            self.apiclient,
            name = "Large"
        )[0]
        volume = Volume.create(
            self.apiClient,
            self.services,
            zoneid = self.zone.id,
            account = self.account.name,
            domainid = self.account.domainid,
            diskofferingid = small_offering.id
        )
        self.debug("Created a small volume: %s" % volume.id)

        self.virtual_machine.attach_volume(self.apiclient, volume=volume)

        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.stop(self.apiclient)

        pools = StoragePool.listForMigration(
            self.apiclient,
            id=volume.id
            )

        pool = None

        if pools and len(pools) > 0:
            pool = pools[0]
        else:
            raise self.skipTest("Not enough storage pools found, skipping test")
        
        if hasattr(pool, 'tags'):
            StoragePool.update(self.apiclient, id=pool.id, tags="")

        self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id))
        Volume.migrate(
            self.apiclient,
            volumeid = volume.id,
            storageid = pool.id,
            newdiskofferingid = large_offering.id
        )
        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.start(self.apiclient
        )
        migrated_vol = Volume.list(
            self.apiclient,
            id = volume.id
        )[0]
        self.assertEqual(
            migrated_vol.diskofferingname,
            large_offering.name,
            "Offering name did not match with the new one "
        )
        return
コード例 #29
0
    def test_01_attach_datadisk_to_vm_on_zwps(self):
        """ Attach Data Disk To VM on ZWPS
            1.  Check if zwps storage pool exists.
            2.  Adding tag to zone wide primary storage
            3.  Launch a VM on ZWPS
            4.  Attach data disk to vm which is on zwps.
            5.  Verify disk is attached.
        """

        # Step 1
        if len(list(storagePool for storagePool in self.pools
                    if storagePool.scope == "ZONE")) < 1:
            self.skipTest("There must be at least one zone wide \
                storage pools available in the setup")

        # Adding tags to Storage Pools
        zone_no = 1
        for storagePool in self.pools:
            if storagePool.scope == "ZONE":
                StoragePool.update(
                    self.apiclient,
                    id=storagePool.id,
                    tags=[ZONETAG1[:-1] + repr(zone_no)])
                zone_no += 1

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zone1.id,
            zoneid=self.zone.id
        )

        self.data_volume_created = Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id
        )

        self.cleanup.append(self.data_volume_created)

        # Step 2
        self.vm.attach_volume(
            self.userapiclient,
            self.data_volume_created
        )

        data_volumes_list = Volume.list(
            self.userapiclient,
            id=self.data_volume_created.id,
            virtualmachineid=self.vm.id
        )

        data_volume = data_volumes_list[0]

        status = validateList(data_volume)

        # Step 3
        self.assertEqual(
            status[0],
            PASS,
            "Check: Data if Disk is attached to VM")

        return
コード例 #30
0
    def test_01_attach_datadisk_to_vm_on_zwps(self):
        """ Attach Data Disk on CWPS To VM
            1.  Check if zwps storage pool exists.
            2.  Adding tag to zone wide primary storage
            3.  Launch a VM
            4.  Attach data disk to vm.
            5.  Verify disk is attached and in correct storage pool.
        """

        # Step 1
        if len(list(self.pools)) < 1:
            self.skipTest("There must be at least one zone wide \
                storage pools available in the setup")

        # Step 2
        # Adding tags to Storage Pools
        StoragePool.update(self.apiclient,
                           id=self.pools[0].id,
                           tags=[CLUSTERTAG1])

        # Launch VM
        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zone1.id,
            zoneid=self.zone.id)

        self.testdata["volume"]["zoneid"] = self.zone.id
        self.testdata["volume"]["customdisksize"] = 1
        self.data_volume_created = Volume.create_custom_disk(
            self.userapiclient,
            self.testdata["volume"],
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        self.cleanup.append(self.data_volume_created)

        # Step 4
        self.vm.attach_volume(self.userapiclient, self.data_volume_created)

        data_volumes_list = Volume.list(self.userapiclient,
                                        virtualmachineid=self.vm.id,
                                        type="DATA",
                                        listall=True)

        self.debug("list volumes using vm id %s" % dir(data_volumes_list[0]))

        data_volumes_list = Volume.list(self.apiclient,
                                        id=self.data_volume_created.id,
                                        listall=True)
        data_volume = data_volumes_list[0]
        status = validateList(data_volume)
        # Step 5
        self.assertEqual(status[0], PASS, "Check: volume list is valid")

        self.assertEqual(data_volume.state, "Ready",
                         "Check: Data volume is attached to VM")

        if data_volume.storage != self.pools[0].name:
            self.fail("check if volume is created in correct storage pool")
        return
コード例 #31
0
    def test_01_recover_VM(self):
        """ Test Restore VM on VMWare
            1. Deploy a VM without datadisk
            2. Restore the VM
            3. Verify that VM comes up in Running state
        """
        try:
            self.pools = StoragePool.list(
                self.apiclient,
                zoneid=self.zone.id,
                scope="CLUSTER")

            status = validateList(self.pools)

            # Step 3
            self.assertEqual(
                status[0],
                PASS,
                "Check: Failed to list  cluster wide storage pools")

            if len(self.pools) < 2:
                self.skipTest("There must be at atleast two cluster wide\
                storage pools available in the setup")

        except Exception as e:
            self.skipTest(e)

        # Adding tags to Storage Pools
        cluster_no = 1
        StoragePool.update(
            self.apiclient,
            id=self.pools[0].id,
            tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            templateid=self.template.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_cwps.id,
            zoneid=self.zone.id,
        )
        # Step 2

        volumes_root_list = list_volumes(
            self.apiclient,
            virtualmachineid=self.vm.id,
            type=ROOT,
            listall=True
        )

        root_volume = volumes_root_list[0]

        # Restore VM till its ROOT disk is recreated on onother Primary Storage
        while True:
            self.vm.restore(self.apiclient)
            volumes_root_list = list_volumes(
                self.apiclient,
                virtualmachineid=self.vm.id,
                type=ROOT,
                listall=True
            )

            root_volume = volumes_root_list[0]

            if root_volume.storage != self.pools[0].name:
                break

        # Step 3
        vm_list = list_virtual_machines(
            self.apiclient,
            id=self.vm.id)

        state = vm_list[0].state
        i = 0
        while(state != "Running"):
            vm_list = list_virtual_machines(
                self.apiclient,
                id=self.vm.id)

            time.sleep(10)
            i = i + 1
            state = vm_list[0].state
            if i >= 10:
                self.fail("Restore VM Failed")
                break

        return
コード例 #32
0
    def setUpClass(cls):
        cls.testClient = super(TestResizeVolume, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower()
        cls.storageID = None
        # Fill services from the external config file
        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.unsupportedStorageType = False
        cls.unsupportedHypervisorType = False
        cls.updateclone = False
        if cls.hypervisor not in ['xenserver', "kvm", "vmware"]:
            cls.unsupportedHypervisorType = True
            return
        cls.template = get_template(cls.api_client, cls.zone.id)
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id
        cls.services["volume"]["zoneid"] = cls.zone.id
        try:
            cls.parent_domain = Domain.create(cls.api_client,
                                              services=cls.services["domain"],
                                              parentdomainid=cls.domain.id)
            cls.parentd_admin = Account.create(cls.api_client,
                                               cls.services["account"],
                                               admin=True,
                                               domainid=cls.parent_domain.id)
            cls._cleanup.append(cls.parentd_admin)
            cls._cleanup.append(cls.parent_domain)
            list_pool_resp = list_storage_pools(cls.api_client,
                                                account=cls.parentd_admin.name,
                                                domainid=cls.parent_domain.id)
            res = validateList(list_pool_resp)
            if res[2] == INVALID_INPUT:
                raise Exception(
                    "Failed to  list storage pool-no storagepools found ")
            #Identify the storage pool type  and set vmware fullclone to true if storage is VMFS
            if cls.hypervisor == 'vmware':
                for strpool in list_pool_resp:
                    if strpool.type.lower() == "vmfs" or strpool.type.lower(
                    ) == "networkfilesystem":
                        list_config_storage_response = list_configurations(
                            cls.api_client,
                            name="vmware.create.full.clone",
                            storageid=strpool.id)
                        res = validateList(list_config_storage_response)
                        if res[2] == INVALID_INPUT:
                            raise Exception("Failed to  list configurations ")
                        if list_config_storage_response[0].value == "false":
                            Configurations.update(cls.api_client,
                                                  "vmware.create.full.clone",
                                                  value="true",
                                                  storageid=strpool.id)
                            cls.updateclone = True
                            StoragePool.update(cls.api_client,
                                               id=strpool.id,
                                               tags="scsi")
                            cls.storageID = strpool.id
                            cls.unsupportedStorageType = False
                            break
                    else:
                        cls.unsupportedStorageType = True
            # Creating service offering with normal config
            cls.service_offering = ServiceOffering.create(
                cls.api_client, cls.services["service_offering"])
            cls.services_offering_vmware = ServiceOffering.create(
                cls.api_client, cls.services["service_offering"], tags="scsi")
            cls._cleanup.extend(
                [cls.service_offering, cls.services_offering_vmware])

        except Exception as e:
            cls.tearDownClass()
        return