def cleanUpCloudStack(cls):
        cfg.logger.info("Cleaning up after the whole test run")
        try:
            cls.nfs_storage_pool = StoragePool.enableMaintenance(
                cls.apiclient, cls.nfs_storage_pool.id)
            cls.storage_pool = StoragePool.update(cls.apiclient,
                                                  id=cls.primary_storage.id,
                                                  tags=["ssd"])
            cls.storage_pool2 = StoragePool.update(cls.apiclient,
                                                   id=cls.primary_storage2.id,
                                                   tags=["ssd2"])
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            cfg.logger.info("cleanup_resources failed: %s", e)
            os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

            raise Exception("Warning: Exception during cleanup : %s" % e)

        cfg.logger.info("Stopping CloudStack")
        os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

        time.sleep(30)

        return
Example #2
0
    def test_01_attach_datadisk_to_vm_on_zwps(self):
        """ Attach Data Disk To VM on ZWPS
            1.  Check if zwps storage pool exists.
            2.  Adding tag to zone wide primary storage
            3.  Launch a VM on ZWPS
            4.  Attach data disk to vm which is on zwps.
            5.  Verify disk is attached.
        """

        # Step 1
        if len(
                list(storagePool for storagePool in self.pools
                     if storagePool.scope == "ZONE")) < 1:
            self.skipTest("There must be at least one zone wide \
                storage pools available in the setup")

        # Adding tags to Storage Pools
        zone_no = 1
        for storagePool in self.pools:
            if storagePool.scope == "ZONE":
                StoragePool.update(self.apiclient,
                                   id=storagePool.id,
                                   tags=[ZONETAG1[:-1] + repr(zone_no)])
                zone_no += 1

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zone1.id,
            zoneid=self.zone.id)

        self.data_volume_created = Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id)

        self.cleanup.append(self.data_volume_created)

        # Step 2
        self.vm.attach_volume(self.userapiclient, self.data_volume_created)

        data_volumes_list = Volume.list(self.userapiclient,
                                        id=self.data_volume_created.id,
                                        virtualmachineid=self.vm.id)

        data_volume = data_volumes_list[0]

        status = validateList(data_volume)

        # Step 3
        self.assertEqual(status[0], PASS,
                         "Check: Data if Disk is attached to VM")

        return
    def test_02_migrate_vm_from_ceph_to_storpool_live(self):
        """
        Migrate VMs/Volumes live
        """
        self.storage_pool = StoragePool.update(self.apiclient,
                                               id=self.storage_pool.id,
                                               tags=["ssd, ceph"])
        random_data = self.writeToFile(self.vm2)
        cmd = listHosts.listHostsCmd()
        cmd.type = "Routing"
        cmd.state = "Up"
        cmd.zoneid = self.zone.id
        hosts = self.apiclient.listHosts(cmd)
        destinationHost = self.helper.getDestinationHost(
            self.vm2.hostid, hosts)
        vol_pool_map = {}
        volumes = list_volumes(self.apiclient,
                               virtualmachineid=self.vm2.id,
                               listall=True)
        for v in volumes:
            vol_pool_map[v.id] = self.storage_pool.id

        # Migrate the vm2
        print(vol_pool_map)
        vm2 = self.vm2.migrate_vm_with_volume(self.apiclient,
                                              hostid=destinationHost.id,
                                              migrateto=vol_pool_map)
        self.checkFileAndContentExists(self.vm2, random_data)

        self.storage_pool = StoragePool.update(self.apiclient,
                                               id=self.storage_pool.id,
                                               tags=["ssd"])
    def test04_delete_primary_storage(self):
        #cleanup_resources(self.apiClient, self._primary_storage)
        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []

        # Verify in Cloudstack
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for storage in storage_pools_response:
            self.assertNotEqual(storage.id, self.primary_storage_id,
                                "Primary storage not deleted")

        # Verify in Datera
        flag = 0
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for item in self.datera_api.app_instances.list():
            if item['name'] == datera_primary_storage_name:
                flag = 1
        self.assertEqual(flag, 0, "app instance not deleted.")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            self.assertNotEqual(value['name_description'],
                                self.primary_storage_id,
                                "SR not deleted in xenserver")

        # Verify in sql database
        command = "select uuid from storage_pool"
        sql_result = self.dbConnection.execute(command)
        key = 0
        for uuid in sql_result:
            if uuid[0] == self.primary_storage_id:
                key = 1
        self.assertEqual(key, 0, "Primary storage not deleted in database")
    def test13_update_primary_storage_capacityIops_to_zero(self):
        updatedIops = 0
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacityiops=updatedIops,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(storage_pool.capacityiops, updatedIops,
                         "Primary storage capacityiops not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_iops = (
            datera_instance['storage_instances']['storage-1']['volumes']
            ['volume-1']['performance_policy']['total_iops_max'])

        self.assertEqual(app_instance_response_iops, updatedIops,
                         "app-instance capacityiops not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
Example #6
0
    def tearDown(self):
        try:
            for storagePool in self.pools:
                StoragePool.update(self.apiclient, id=storagePool.id, tags="")

            if hasattr(self, "data_volume_created"):
                data_volumes_list = Volume.list(
                    self.userapiclient,
                    id=self.data_volume_created.id,
                    virtualmachineid=self.vm.id
                )
                if data_volumes_list:
                    self.vm.detach_volume(
                        self.userapiclient,
                        data_volumes_list[0]
                    )

                status = validateList(data_volumes_list)
                self.assertEqual(
                    status[0],
                    PASS,
                    "DATA Volume List Validation Failed")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def test_02_storage_migrate_root_and_data_disks(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(self.apiClient,
                                               primarystorage2,
                                               clusterid=self.cluster_1.id)

        primary_storage_3 = StoragePool.create(self.apiClient,
                                               primarystorage2,
                                               clusterid=self.cluster_2.id)

        src_host, dest_host = self._get_source_and_dest_hosts()

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering_3.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            hostid=src_host.id,
            startvm=True)

        cs_data_volume = Volume.create(self.apiClient,
                                       self.testdata[TestData.volume_1],
                                       account=self.account.name,
                                       domainid=self.domain.id,
                                       zoneid=self.zone.id,
                                       diskofferingid=self.disk_offering_1.id)

        self.cleanup = [
            virtual_machine, cs_data_volume, primary_storage_2,
            primary_storage_3
        ]

        cs_data_volume = virtual_machine.attach_volume(self.apiClient,
                                                       cs_data_volume)

        sf_account_id = sf_util.get_sf_account_id(
            self.cs_api, self.account.id, self.primary_storage.id, self,
            TestVMMigrationWithStorage.
            _sf_account_id_should_be_non_zero_int_err_msg)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_data_volume = sf_util.check_and_get_sf_volume(
            sf_volumes, cs_data_volume.name, self)

        sf_data_volume = self._migrate_and_verify_one_disk_only(
            virtual_machine, dest_host, cs_data_volume, sf_account_id,
            sf_data_volume, self.xen_session_1, self.xen_session_2)

        src_host, dest_host = dest_host, src_host

        self._migrate_and_verify_one_disk_only(virtual_machine, dest_host,
                                               cs_data_volume, sf_account_id,
                                               sf_data_volume,
                                               self.xen_session_2,
                                               self.xen_session_1)
Example #8
0
 def updateVmwareCreateFullCloneSetting(self, tearDown):
     if not tearDown:
         Configurations.update(self.apiclient,
                               "vmware.create.full.clone",
                               "true")
         allStoragePools = StoragePool.list(
             self.apiclient
         )
         for pool in allStoragePools:
             Configurations.update(self.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value="true")
     else:
         Configurations.update(self.apiclient,
                               "vmware.create.full.clone",
                               self.fullClone[0].value.lower())
         allStoragePools = StoragePool.list(
             self.apiclient
         )
         for pool in allStoragePools:
             Configurations.update(self.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value=self.storeCloneValues[pool.id])
    def cleanUpCloudStack(cls):
        cfg.logger.info("Cleaning up after the whole test run")
        try:
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)

            primary_storage = list_storage_pools(
                cls.apiclient, name=cls.primary_storage.name)[0]
            primary_storage2 = list_storage_pools(
                cls.apiclient, name=cls.primary_storage2.name)[0]
            storage_pool1 = StoragePool.enableMaintenance(
                cls.apiclient, primary_storage.id)
            storage_pool2 = StoragePool.enableMaintenance(
                cls.apiclient, primary_storage2.id)

            cls.delete_storage_pool(id=primary_storage.id)
            cls.delete_storage_pool(id=primary_storage2.id)

            cls.spapi.volumeTemplateDelete(templateName=cls.sp_template_1)
            cls.spapi.volumeTemplateDelete(templateName=cls.sp_template_2)
        except Exception as e:
            cfg.logger.info("cleanup_resources failed: %s", e)
            os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

            time.sleep(30)

            raise Exception("Warning: Exception during cleanup : %s" % e)

        cfg.logger.info("Stopping CloudStack")
        os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

        time.sleep(30)

        return
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false",storageid=cls.storageID)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                StoragePool.update(cls.api_client, id=cls.storageID,
                                   tags="")
                cls.restartServer()

                #Giving 30 seconds to management to warm-up,
                #Experienced failures when trying to deploy a VM exactly when management came up
                time.sleep(30)

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def test13_update_primary_storage_capacityIops_to_zero(self):
        updatedIops = 0
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacityiops=updatedIops,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(
            storage_pool.capacityiops, updatedIops,
            "Primary storage capacityiops not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_iops = (
            datera_instance['storage_instances']
            ['storage-1']['volumes']['volume-1']['performance_policy']
            ['total_iops_max'])

        self.assertEqual(
            app_instance_response_iops, updatedIops,
            "app-instance capacityiops not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
Example #12
0
    def test_11_migrate_volume_and_change_offering(self):

        # Validates the following
        #
        # 1. Creates a new Volume with a small disk offering
        #
        # 2. Migrates the Volume to another primary storage and changes the offering
        #
        # 3. Verifies the Volume has new offering when migrated to the new storage.

        small_offering = list_disk_offering(self.apiclient, name="Small")[0]

        large_offering = list_disk_offering(self.apiclient, name="Large")[0]
        volume = Volume.create(self.apiClient,
                               self.services,
                               zoneid=self.zone.id,
                               account=self.account.name,
                               domainid=self.account.domainid,
                               diskofferingid=small_offering.id)
        self.debug("Created a small volume: %s" % volume.id)

        self.virtual_machine.attach_volume(self.apiclient, volume=volume)

        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.stop(self.apiclient)

        pools = StoragePool.listForMigration(self.apiclient, id=volume.id)

        pool = None

        if pools and len(pools) > 0:
            pool = pools[0]
        else:
            raise self.skipTest(
                "Not enough storage pools found, skipping test")

        if hasattr(pool, 'tags'):
            StoragePool.update(self.apiclient, id=pool.id, tags="")

        self.debug("Migrating Volume-ID: %s to Pool: %s" %
                   (volume.id, pool.id))
        livemigrate = False
        if self.virtual_machine.hypervisor.lower(
        ) == "vmware" or self.virtual_machine.hypervisor.lower(
        ) == 'xenserver':
            livemigrate = True

        Volume.migrate(self.apiclient,
                       volumeid=volume.id,
                       storageid=pool.id,
                       newdiskofferingid=large_offering.id,
                       livemigrate=livemigrate)
        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.start(self.apiclient)
        migrated_vol = Volume.list(self.apiclient, id=volume.id)[0]
        self.assertEqual(migrated_vol.diskofferingname, large_offering.name,
                         "Offering name did not match with the new one ")
        return
Example #13
0
    def test_validateState_fails_after_retry_limit(self):
        retries = 3
        timeout = 2
        api_client = MockApiClient(retries, 'initial state', 'final state')
        storage_pool = StoragePool({'id': 'snapshot_id'})
        state = storage_pool.validateState(api_client, 'final state', timeout=timeout, interval=1)

        self.assertEqual(state, [FAIL, 'StoragePool state not transited to final state, operation timed out'])
        self.assertEqual(retries, api_client.retry_counter)
    def tearDown(self):
        try:
            for storagePool in self.pools:
                StoragePool.update(self.apiclient, id=storagePool.id, tags="")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
Example #15
0
    def test_validateState_succeeds_at_retry_limit(self):
        retries = 3
        timeout = 3
        api_client = MockApiClient(retries, 'initial state', 'final state')
        storage_pool = StoragePool({'id': 'snapshot_id'})
        state = storage_pool.validateState(api_client, 'final state', timeout=timeout, interval=1)

        self.assertEqual(state, [PASS, None])
        self.assertEqual(retries, api_client.retry_counter)
Example #16
0
    def tearDown(self):
        try:
            for storagePool in self.pools:
                StoragePool.update(self.apiclient, id=storagePool.id, tags="")

            cleanup_resources(self.apiclient, self.cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def test_add_remove_host_with_solidfire_plugin_3(self):
        if TestData.hypervisor_type != TestData.xenServer:
            return

        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage)

        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        root_volume = self._get_root_volume(self.virtual_machine)

        sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)

        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage_2)

        self._perform_add_remove_xenserver_host(primary_storage.id, sf_iscsi_name)
Example #18
0
    def test_validateState_succeeds_at_retry_limit(self):
        retries = 3
        timeout = 3
        api_client = MockApiClient(retries, 'initial state', 'final state')
        storage_pool = StoragePool({'id': 'snapshot_id'})
        state = storage_pool.validateState(api_client,
                                           'final state',
                                           timeout=timeout,
                                           interval=1)

        self.assertEqual(state, [PASS, None])
        self.assertEqual(retries, api_client.retry_counter)
    def test_add_remove_host_with_solidfire_plugin_3(self):
        if TestData.hypervisor_type != TestData.xenServer:
            return

        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        self.cleanup.append(primary_storage)

        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        root_volume = self._get_root_volume(self.virtual_machine)

        sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)

        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor])

        self.cleanup.append(primary_storage_2)

        self._perform_add_remove_xenserver_host(primary_storage.id,
                                                sf_iscsi_name)
    def cleanUpCloudStack(cls):
        try:
            if cls.nfs_storage_pool.state is not "Maintenance":
                cls.nfs_storage_pool = StoragePool.enableMaintenance(
                    cls.apiclient, cls.nfs_storage_pool.id)

            if cls.ceph_storage_pool.state is not "Maintenance":
                cls.ceph_storage_pool = StoragePool.enableMaintenance(
                    cls.apiclient, cls.ceph_storage_pool.id)

            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
Example #21
0
    def test_validateState_fails_after_retry_limit(self):
        retries = 3
        timeout = 2
        api_client = MockApiClient(retries, 'initial state', 'final state')
        storage_pool = StoragePool({'id': 'snapshot_id'})
        state = storage_pool.validateState(api_client,
                                           'final state',
                                           timeout=timeout,
                                           interval=1)

        self.assertEqual(state, [
            FAIL,
            'StoragePool state not trasited to final state, operation timed out'
        ])
        self.assertEqual(retries, api_client.retry_counter)
    def setUpClass(cls):
        testClient = super(TestAttachDataDiskOnCWPS, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        cls.hypervisor = cls.testClient.getHypervisorInfo()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls._cleanup = []
        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])
        cls.skiptest = False

        try:
            cls.pools = StoragePool.list(
                cls.apiclient,
                zoneid=cls.zone.id,
                scope="CLUSTER")
        except Exception as e:
            cls.skiptest = True
            return
        try:

            # Create an account
            cls.account = Account.create(
                cls.apiclient,
                cls.testdata["account"],
                domainid=cls.domain.id
            )
            cls._cleanup.append(cls.account)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name,
                DomainName=cls.account.domain
            )
            # Create Service offering
            cls.service_offering = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
            )
            cls._cleanup.append(cls.service_offering)

            # Create Disk offering
            cls.disk_offering = DiskOffering.create(
                cls.apiclient,
                cls.testdata["disk_offering"],
                custom=True,
                tags=CLUSTERTAG1,
            )

            cls._cleanup.append(cls.disk_offering)

        except Exception as e:
            cls.tearDownClass()
            raise e
        return
Example #23
0
    def create_sp_template_and_storage_pool(self, apiclient, template_name,
                                            primary_storage, zoneid):
        spapiRemote = spapi.Api.fromConfig()
        logging.debug("================ %s" % spapiRemote)

        sp_api = spapi.Api.fromConfig(multiCluster=True)
        logging.debug("================ %s" % sp_api)

        remote_cluster = self.get_remote_storpool_cluster()
        logging.debug("================ %s" % remote_cluster)

        newTemplate = sptypes.VolumeTemplateCreateDesc(name=template_name,
                                                       placeAll="ssd",
                                                       placeTail="ssd",
                                                       placeHead="ssd",
                                                       replication=1)
        template_on_remote = spapiRemote.volumeTemplateCreate(
            newTemplate, clusterName=remote_cluster)
        template_on_local = spapiRemote.volumeTemplateCreate(newTemplate)
        storage_pool = StoragePool.create(
            apiclient,
            primary_storage,
            zoneid=zoneid,
        )
        return storage_pool, spapiRemote, sp_api
    def test12_primary_storage_with_zero_iops(self):
        primarystorage5 = self.testdata[TestData.primaryStorage5]

        primary_storage5 = StoragePool.create(
            self.apiClient,
            primarystorage5,
            scope=primarystorage5[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage5[TestData.provider],
            tags=primarystorage5[TestData.tags],
            capacityiops=primarystorage5[TestData.capacityIops],
            capacitybytes=primarystorage5[TestData.capacityBytes],
            hypervisor=primarystorage5[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage5)
        primary_storage_name = "cloudstack-" + primary_storage5.id
        self.assertEqual(
            any(primary_storage_name == app_instance['name']
                for app_instance in self.datera_api.app_instances.list()),
            True, "app instance not created")

        primary_storage_url = primarystorage5[TestData.url]

        self._verify_attributes(
            primary_storage5.id, primary_storage_url)
Example #25
0
    def get_target_pool(self, volid):
        target_pools = StoragePool.listForMigration(self.apiclient, id=volid)

        if len(target_pools) < 1:
            self.skipTest("Not enough storage pools found")

        return target_pools[0]
    def test12_primary_storage_with_zero_iops(self):
        primarystorage5 = self.testdata[TestData.primaryStorage5]

        primary_storage5 = StoragePool.create(
            self.apiClient,
            primarystorage5,
            scope=primarystorage5[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage5[TestData.provider],
            tags=primarystorage5[TestData.tags],
            capacityiops=primarystorage5[TestData.capacityIops],
            capacitybytes=primarystorage5[TestData.capacityBytes],
            hypervisor=primarystorage5[TestData.hypervisor])

        self.cleanup.append(primary_storage5)
        primary_storage_name = "cloudstack-" + primary_storage5.id
        self.assertEqual(
            any(primary_storage_name == app_instance['name']
                for app_instance in self.datera_api.app_instances.list()),
            True, "app instance not created")

        primary_storage_url = primarystorage5[TestData.url]

        self._verify_attributes(primary_storage5.id, primary_storage_url)
Example #27
0
    def test09_add_vm_with_datera_storage(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(
            primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        self._validate_storage(primary_storage, self.virtual_machine)
Example #28
0
    def test09_add_vm_with_datera_storage(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        self._validate_storage(primary_storage, self.virtual_machine)
Example #29
0
    def setUpClass(cls):
        testClient = super(TestVolumes, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.services = testClient.getParsedTestDataConfig()
        cls._cleanup = []
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.services['mode'] = cls.zone.networktype
        cls.hypervisor = testClient.getHypervisorInfo()
        cls.invalidStoragePoolType = False
        cls.disk_offering = DiskOffering.create(cls.apiclient,
                                                cls.services["disk_offering"])
        cls.resized_disk_offering = DiskOffering.create(
            cls.apiclient, cls.services["resized_disk_offering"])
        cls.custom_resized_disk_offering = DiskOffering.create(
            cls.apiclient, cls.services["resized_disk_offering"], custom=True)

        template = get_template(cls.apiclient, cls.zone.id,
                                cls.services["ostype"])
        if template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services[
                "ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["zoneid"] = cls.zone.id
        cls.services["template"] = template.id
        cls.services["diskofferingid"] = cls.disk_offering.id
        cls.services['resizeddiskofferingid'] = cls.resized_disk_offering.id
        cls.services[
            'customresizeddiskofferingid'] = cls.custom_resized_disk_offering.id

        # Create VMs, VMs etc
        cls.account = Account.create(cls.apiclient,
                                     cls.services["account"],
                                     domainid=cls.domain.id)
        cls.service_offering = ServiceOffering.create(
            cls.apiclient, cls.services["service_offerings"]["tiny"])
        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient,
            cls.services,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            mode=cls.services["mode"])
        pools = StoragePool.list(cls.apiclient)
        # cls.assertEqual(
        #         validateList(pools)[0],
        #         PASS,
        #         "storage pool list validation failed")

        cls.volume = Volume.create(cls.apiclient,
                                   cls.services,
                                   account=cls.account.name,
                                   domainid=cls.account.domainid)
        cls._cleanup = [
            cls.resized_disk_offering, cls.custom_resized_disk_offering,
            cls.service_offering, cls.disk_offering, cls.volume, cls.account
        ]
    def test_03_try_delete_primary_with_snapshots(self):
        virtual_machine = VirtualMachine.create(
            self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=self.zone.id,
            templateid=self.template.id,
            serviceofferingid=self.serviceOfferings.id,
            hypervisor=self.hypervisor,
            rootdisksize=10)

        volume = list_volumes(self.apiclient,
                              virtualmachineid=virtual_machine.id,
                              type="ROOT")

        volume = volume[0]

        name = volume.path.split("/")[3]
        try:
            spvolume = self.spapi.volumeList(volumeName="~" + name)
            if spvolume[0].templateName != self.template_name:
                raise Exception(
                    "Storpool volume's template %s  is not with the same template %s"
                    % (spvolume[0].templateName, self.template_name))
        except spapi.ApiError as err:
            raise Exception(err)

        snapshot = Snapshot.create(
            self.apiclient,
            volume_id=volume.id,
        )
        id = self.helper.get_snapshot_template_id(self.apiclient, snapshot,
                                                  self.storage_pool_id)
        if id is None:
            raise Exception("There isn't primary storgae id")
        virtual_machine.delete(self.apiclient, expunge=True)
        pool = list_storage_pools(self.apiclient, id=id)
        if pool[0].name == self.template_name:
            try:
                StoragePool.delete(self.sp_primary_storage, self.apiclient)
            except Exception as err:
                StoragePool.cancelMaintenance(self.apiclient,
                                              id=self.sp_primary_storage.id)
                self.debug("Storage pool could not be delete due to %s" % err)
        else:
            self.cleanup.append(snapshot)
            raise Exception("Snapshot is not on the same pool")
        Snapshot.delete(snapshot, self.apiclient)
    def cleanUpCloudStack(cls):
        spapiRemote = spapi.Api.fromConfig()
        remote_cluster = cls.helper.get_remote_storpool_cluster()
        try:

            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls.cleanup)

            StoragePool.delete(cls.sp_primary_storage, cls.apiclient)
            ServiceOffering.delete(cls.serviceOfferings, cls.apiclient)
            spapiRemote.volumeTemplateDelete(templateName=cls.template_name,
                                             clusterName=remote_cluster)
            spapiRemote.volumeTemplateDelete(templateName=cls.template_name, )
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)

        return
Example #32
0
    def test10_add_vm_with_datera_storage_and_volume(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(
            primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        self._validate_storage(primary_storage, self.virtual_machine)

        volume = Volume.create(
            self.apiClient,
            self.testdata[TestData.volume_1],
            account=self.account.name,
            domainid=self.domain.id,
            zoneid=self.zone.id,
            diskofferingid=self.disk_offering.id
        )

        virtual_machine.attach_volume(
            self.apiClient,
            volume
        )
        storage_pools_response = list_storage_pools(
            self.apiClient, id=primary_storage.id)

        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == primary_storage.id:
                xen_server_response = value

        self.assertNotEqual(
            int(storage_pools_response[0].disksizeused),
            int(xen_server_response['physical_utilisation']))
Example #33
0
    def setUpClass(cls):
        try:
            cls._cleanup = []
            cls.testClient = super(testHaPoolMaintenance,
                                   cls).getClsTestClient()
            cls.api_client = cls.testClient.getApiClient()
            cls.services = cls.testClient.getParsedTestDataConfig()
            # Get Domain, Zone, Template
            cls.domain = get_domain(cls.api_client)
            cls.zone = get_zone(cls.api_client,
                                cls.testClient.getZoneForTests())
            cls.template = get_template(cls.api_client, cls.zone.id,
                                        cls.services["ostype"])
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services['mode'] = cls.zone.networktype
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services["virtual_machine"]["zoneid"] = cls.zone.id
            cls.services["virtual_machine"]["template"] = cls.template.id
            cls.clusterWithSufficientPool = None
            cls.listResponse = None
            clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)

            if not validateList(clusters)[0]:

                cls.debug("check list cluster response for zone id %s" %
                          cls.zone.id)
                cls.listResponse = True
                return

            for cluster in clusters:
                cls.pool = StoragePool.list(cls.api_client,
                                            clusterid=cluster.id,
                                            keyword="NetworkFilesystem")

                if not validateList(cls.pool)[0]:

                    cls.debug("check list cluster response for zone id %s" %
                              cls.zone.id)
                    cls.listResponse = True
                    return

                if len(cls.pool) >= 2:
                    cls.clusterWithSufficientPool = cluster
                    break
            if not cls.clusterWithSufficientPool:
                return

            cls.services["service_offerings"]["tiny"]["offerha"] = "True"

            cls.services_off = ServiceOffering.create(
                cls.api_client, cls.services["service_offerings"]["tiny"])
            cls._cleanup.append(cls.services_off)

        except Exception as e:
            cls.tearDownClass()
            raise Exception("Warning: Exception in setup : %s" % e)
        return
    def test07_update_primary_storage_capacityBytes(self):
        updatedDiskSize = self.testdata[TestData.newCapacityBytes]
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacitybytes=updatedDiskSize,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(
            storage_pool.disksizetotal, updatedDiskSize,
            "Primary storage not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_disk_size = (
            datera_instance['storage_instances']
            ['storage-1']['volumes']['volume-1']['size'] * 1073741824)

        self.assertEqual(
            app_instance_response_disk_size, updatedDiskSize,
            "app-instance not updated")

        # Verify in xenserver
       #for key, value in self.xen_session.xenapi.SR.get_all_records().items():
        #    if value['name_description'] == self.primary_storage_id:
        #        xen_sr = value
        #Uncomment after xen fix
        #print xen_sr
        #print xen_sr['physical_size'], updatedDiskSize
        #self.assertEqual(
        #    int(xen_sr['physical_size']) + 12582912, updatedDiskSize,
        #    "Xen server physical storage not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
Example #35
0
    def setUpClass(cls):
        testClient = super(TestAttachDataDisk, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.testdata = testClient.getParsedTestDataConfig()
        cls.hypervisor = cls.testClient.getHypervisorInfo()

        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls._cleanup = []
        cls.template = get_template(
            cls.apiclient,
            cls.zone.id,
            cls.testdata["ostype"])
        cls.skiptest = False

        try:
            cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id)
        except Exception as e:
            cls.skiptest = True
            return
        try:

            # Create an account
            cls.account = Account.create(
                cls.apiclient,
                cls.testdata["account"],
                domainid=cls.domain.id
            )
            cls._cleanup.append(cls.account)

            # Create user api client of the account
            cls.userapiclient = testClient.getUserApiClient(
                UserName=cls.account.name,
                DomainName=cls.account.domain
            )
            # Create Service offering
            cls.service_offering_zone1 = ServiceOffering.create(
                cls.apiclient,
                cls.testdata["service_offering"],
                tags=ZONETAG1
            )
            cls._cleanup.append(cls.service_offering_zone1)

            # Create Disk offering
            cls.disk_offering = DiskOffering.create(
                cls.apiclient,
                cls.testdata["disk_offering"]
            )

            cls._cleanup.append(cls.disk_offering)

        except Exception as e:
            cls.tearDownClass()
            raise e
        return
    def test07_update_primary_storage_capacityBytes(self):
        updatedDiskSize = self.testdata[TestData.newCapacityBytes]
        StoragePool.update(self.apiClient,
                           id=self.primary_storage_id,
                           capacitybytes=updatedDiskSize,
                           tags=self.primary_tag)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for data in storage_pools_response:
            if data.id == self.primary_storage_id:
                storage_pool = data

        self.assertEqual(storage_pool.disksizetotal, updatedDiskSize,
                         "Primary storage not updated")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        app_instance_response_disk_size = (
            datera_instance['storage_instances']['storage-1']['volumes']
            ['volume-1']['size'] * 1073741824)

        self.assertEqual(app_instance_response_disk_size, updatedDiskSize,
                         "app-instance not updated")

        # Verify in xenserver
        #for key, value in self.xen_session.xenapi.SR.get_all_records().items():
        #    if value['name_description'] == self.primary_storage_id:
        #        xen_sr = value
        #Uncomment after xen fix
        #print xen_sr
        #print xen_sr['physical_size'], updatedDiskSize
        #self.assertEqual(
        #    int(xen_sr['physical_size']) + 12582912, updatedDiskSize,
        #    "Xen server physical storage not updated")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
Example #37
0
 def test_01_migrateVolume(self):
     """
     @Desc:Volume is not retaining same uuid when migrating from one
           storage to another.
     Step1:Create a volume/data disk
     Step2:Verify UUID of the volume
     Step3:Migrate the volume to another primary storage within
           the cluster
     Step4:Migrating volume to new primary storage should succeed
     Step5:volume UUID should not change even after migration
     """
     vol = Volume.create(
         self.apiclient,
         self.services["volume"],
         diskofferingid=self.disk_offering.id,
         zoneid=self.zone.id,
         account=self.account.name,
         domainid=self.account.domainid,
     )
     self.assertIsNotNone(vol, "Failed to create volume")
     vol_res = Volume.list(self.apiclient, id=vol.id)
     self.assertEqual(validateList(vol_res)[0], PASS, "Invalid response returned for list volumes")
     vol_uuid = vol_res[0].id
     try:
         self.virtual_machine.attach_volume(self.apiclient, vol)
     except Exception as e:
         self.fail("Attaching data disk to vm failed with error %s" % e)
     pools = StoragePool.listForMigration(self.apiclient, id=vol.id)
     if not pools:
         self.skipTest(
             "No suitable storage pools found for volume migration.\
                     Skipping"
         )
     self.assertEqual(validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration")
     pool = pools[0]
     self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id))
     try:
         Volume.migrate(self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate="true")
     except Exception as e:
         self.fail("Volume migration failed with error %s" % e)
     migrated_vols = Volume.list(
         self.apiclient, virtualmachineid=self.virtual_machine.id, listall="true", type="DATADISK"
     )
     self.assertEqual(validateList(migrated_vols)[0], PASS, "invalid volumes response after migration")
     migrated_vol_uuid = migrated_vols[0].id
     self.assertEqual(
         vol_uuid,
         migrated_vol_uuid,
         "Volume is not retaining same uuid when migrating from one\
                 storage to another",
     )
     self.virtual_machine.detach_volume(self.apiclient, vol)
     self.cleanup.append(vol)
     return
    def setUpClass(cls):
        testClient = super(TestVolumes, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()
        cls.services = testClient.getParsedTestDataConfig()
        cls._cleanup = []
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls.hypervisor = testClient.getHypervisorInfo()
        cls.invalidStoragePoolType = False
        cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"])
        cls.resized_disk_offering = DiskOffering.create(cls.apiclient, cls.services["resized_disk_offering"])
        cls.custom_resized_disk_offering = DiskOffering.create(
            cls.apiclient, cls.services["resized_disk_offering"], custom=True
        )

        template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"])
        if template == FAILED:
            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["zoneid"] = cls.zone.id
        cls.services["template"] = template.id
        cls.services["diskofferingid"] = cls.disk_offering.id
        cls.services["resizeddiskofferingid"] = cls.resized_disk_offering.id
        cls.services["customresizeddiskofferingid"] = cls.custom_resized_disk_offering.id

        # Create VMs, VMs etc
        cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id)
        cls.service_offering = ServiceOffering.create(cls.apiclient, cls.services["service_offerings"]["tiny"])
        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient,
            cls.services,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=cls.service_offering.id,
            mode=cls.services["mode"],
        )
        pools = StoragePool.list(cls.apiclient)
        # cls.assertEqual(
        #         validateList(pools)[0],
        #         PASS,
        #         "storage pool list validation failed")

        cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid)
        cls._cleanup = [
            cls.resized_disk_offering,
            cls.custom_resized_disk_offering,
            cls.service_offering,
            cls.disk_offering,
            cls.volume,
            cls.account,
        ]
    def test06_primary_storage_cancel_maintenance_mode(self):
        StoragePool.enableMaintenance(self.apiClient,
                                      id=self.primary_storage_id)
        StoragePool.cancelMaintenance(self.apiClient,
                                      id=self.primary_storage_id)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for storage in storage_pools_response:
            if storage.id == self.primary_storage_id:
                storage_pool = storage
        self.assertEqual(storage_pool.state, "Up",
                         "Primary storage not in up mode")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        self.assertEqual(datera_instance["admin_state"], "online",
                         "app-instance not in online mode")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == self.primary_storage_id:
                xen_sr = value
        self.assertEqual(
            set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
            False, "Xenserver SR in offline mode")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
    def test06_primary_storage_cancel_maintenance_mode(self):
        StoragePool.enableMaintenance(self.apiClient,
                                      id=self.primary_storage_id)
        StoragePool.cancelMaintenance(self.apiClient,
                                      id=self.primary_storage_id)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for storage in storage_pools_response:
            if storage.id == self.primary_storage_id:
                storage_pool = storage
        self.assertEqual(
            storage_pool.state, "Up",
            "Primary storage not in up mode")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        self.assertEqual(
            datera_instance["admin_state"], "online",
            "app-instance not in online mode")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == self.primary_storage_id:
                xen_sr = value
        self.assertEqual(
            set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
            False, "Xenserver SR in offline mode")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
    def test04_delete_primary_storage(self):
        #cleanup_resources(self.apiClient, self._primary_storage)
        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []

        # Verify in Cloudstack
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        if len(storage_pools_response) > 0:
            for storage in storage_pools_response:
                self.assertNotEqual(
                    storage.id,
                    self.primary_storage_id,
                    "Primary storage not deleted")

        # Verify in Datera
        flag = 0
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for item in self.datera_api.app_instances.list():
            if item['name'] == datera_primary_storage_name:
                flag = 1
        self.assertEqual(flag, 0, "app instance not deleted.")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            self.assertNotEqual(
                value['name_description'],
                self.primary_storage_id,
                "SR not deleted in xenserver")

        # Verify in sql database
        command = "select uuid from storage_pool"
        sql_result = self.dbConnection.execute(command)
        key = 0
        for uuid in sql_result:
            if uuid[0] == self.primary_storage_id:
                key = 1
        self.assertEqual(
            key, 0, "Primary storage not deleted in database")
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false", storageid=cls.storageID)
                if cls.storageID:
                    StoragePool.update(cls.api_client, id=cls.storageID,
                                    tags="")

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def tearDownClass(cls):
        try:
            # Cleanup resources used

            if cls.updateclone:
                Configurations.update(cls.api_client,
                                              "vmware.root.disk.controller",
                                              value=cls.defaultdiskcontroller)
                Configurations.update(cls.api_client,
                                              "vmware.create.full.clone",
                                              value="false")
                Configurations.update(cls.api_client,
                                      "vmware.create.full.clone",
                                      value="false", storageid=cls.storageID)
                if cls.storageID:
                    StoragePool.update(cls.api_client, id=cls.storageID,
                                    tags="")

            cleanup_resources(cls.api_client, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
Example #44
0
 def updateVmwareSettings(cls, tearDown):
     value = "false"
     if not tearDown:
         value = "true"
     if cls.hypervisor.lower() == 'vmware':
         Configurations.update(cls.apiclient, "vmware.create.full.clone",
                               value)
         allStoragePools = StoragePool.list(cls.apiclient)
         for pool in allStoragePools:
             Configurations.update(cls.apiclient,
                                   storageid=pool.id,
                                   name="vmware.create.full.clone",
                                   value=value)
Example #45
0
 def check_storage_pools(self, virtualmachineid):
     """
     list storage pools available to the VM
     """
     vm = VirtualMachine.list(self.apiClient, id=virtualmachineid)[0]
     hostid = vm.histid
     host = Host.list(self.apiClient, id=hostid)[0]
     clusterid = host.clusterid
     storage_pools = StoragePool.list(self.apiClient, clusterid=clusterid)
     if len(storage_pools) < 2:
         self.skipTest(
             "at least two accesible primary storage pools needed for the vm to perform this test"
         )
     return storage_pools
Example #46
0
    def test10_add_vm_with_datera_storage_and_volume(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor])

        primary_storage_url = primarystorage[TestData.url]
        self._verify_attributes(primary_storage.id, primary_storage_url)

        self.cleanup.append(primary_storage)
        self.virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        self._validate_storage(primary_storage, self.virtual_machine)

        volume = Volume.create(self.apiClient,
                               self.testdata[TestData.volume_1],
                               account=self.account.name,
                               domainid=self.domain.id,
                               zoneid=self.zone.id,
                               diskofferingid=self.disk_offering.id)

        virtual_machine.attach_volume(self.apiClient, volume)
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    id=primary_storage.id)

        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == primary_storage.id:
                xen_server_response = value

        self.assertNotEqual(int(storage_pools_response[0].disksizeused),
                            int(xen_server_response['physical_utilisation']))
    def test_add_remove_host_with_solidfire_plugin_2(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            scope=primarystorage2[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage2[TestData.provider],
            tags=primarystorage2[TestData.tags],
            capacityiops=primarystorage2[TestData.capacityIops],
            capacitybytes=primarystorage2[TestData.capacityBytes],
            hypervisor=primarystorage2[TestData.hypervisor]
        )

        self.cleanup.append(primary_storage_2)

        sf_iscsi_name = self._get_iqn_2(primary_storage_2)

        self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name)
    def setUp(self):
        primarystorage = self.testdata[TestData.primaryStorage]

        self.primary_storage = StoragePool.create(
            self.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=self.zone.id,
            clusterid=self.cluster.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )
        self.primary_storage_id = self.primary_storage.id
        self._primary_storage = [self.primary_storage]
        self.primary_tag = primarystorage[TestData.tags]
        self.cleanup = [self.primary_storage]
        primary_storage_url = primarystorage[TestData.url]
        self._verify_priamry_storage(
            self.primary_storage_id, primary_storage_url)
    def test_05_storage_pools(self):
        """Check the status of Storage pools"""

        # Validate the following
        # 1. List storage pools for the zone
        # 2. Check state is "enabled" or not

        storage_pools = StoragePool.list(
                          self.apiclient,
                          zoneid=self.zone.id,
              listall=True
                          )
        self.assertEqual(
                         isinstance(storage_pools, list),
                         True,
                         "Check if listStoragePools returns a valid response"
                         )
        for storage_pool in storage_pools:
            self.assertEqual(
                             storage_pool.state,
                             'Up',
                             "storage pool should be in Up state and running"
                             )
        return
    def test_01_recover_VM(self):
        """ Test Restore VM on VMWare
            1. Deploy a VM without datadisk
            2. Restore the VM
            3. Verify that VM comes up in Running state
        """
        try:
            self.pools = StoragePool.list(
                self.apiclient,
                zoneid=self.zone.id,
                scope="CLUSTER")

            status = validateList(self.pools)

            # Step 3
            self.assertEqual(
                status[0],
                PASS,
                "Check: Failed to list  cluster wide storage pools")

            if len(self.pools) < 2:
                self.skipTest("There must be at atleast two cluster wide\
                storage pools available in the setup")

        except Exception as e:
            self.skipTest(e)

        # Adding tags to Storage Pools
        cluster_no = 1
        StoragePool.update(
            self.apiclient,
            id=self.pools[0].id,
            tags=[CLUSTERTAG1[:-1] + repr(cluster_no)])

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            accountid=self.account.name,
            templateid=self.template.id,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_cwps.id,
            zoneid=self.zone.id,
        )
        # Step 2

        volumes_root_list = list_volumes(
            self.apiclient,
            virtualmachineid=self.vm.id,
            type=ROOT,
            listall=True
        )

        root_volume = volumes_root_list[0]

        # Restore VM till its ROOT disk is recreated on onother Primary Storage
        while True:
            self.vm.restore(self.apiclient)
            volumes_root_list = list_volumes(
                self.apiclient,
                virtualmachineid=self.vm.id,
                type=ROOT,
                listall=True
            )

            root_volume = volumes_root_list[0]

            if root_volume.storage != self.pools[0].name:
                break

        # Step 3
        vm_list = list_virtual_machines(
            self.apiclient,
            id=self.vm.id)

        state = vm_list[0].state
        i = 0
        while(state != "Running"):
            vm_list = list_virtual_machines(
                self.apiclient,
                id=self.vm.id)

            time.sleep(10)
            i = i + 1
            state = vm_list[0].state
            if i >= 10:
                self.fail("Restore VM Failed")
                break

        return
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVMSnapshots, cls).getClsTestClient()
        cls.apiClient = testclient.getApiClient()

        cls.testdata = TestData().testdata

        # Set up XenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress

        cls.xen_session = XenAPI.Session(host_ip)

        xenserver = cls.testdata[TestData.xenServer]

        cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata[TestData.account],
            admin=1
        )

        # Set up connection to make customized API calls
        user = User.create(
            cls.apiClient,
            cls.testdata[TestData.user],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage,
            scope=primarystorage[TestData.scope],
            zoneid=cls.zone.id,
            provider=primarystorage[TestData.provider],
            tags=primarystorage[TestData.tags],
            capacityiops=primarystorage[TestData.capacityIops],
            capacitybytes=primarystorage[TestData.capacityBytes],
            hypervisor=primarystorage[TestData.hypervisor]
        )

        compute_offering = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering]
        )

        cls.disk_offering = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering]
        )

        # Create VM and volume for tests
        cls.virtual_machine = VirtualMachine.create(
            cls.apiClient,
            cls.testdata[TestData.virtualMachine],
            accountid=cls.account.name,
            zoneid=cls.zone.id,
            serviceofferingid=compute_offering.id,
            templateid=template.id,
            domainid=cls.domain.id,
            startvm=True
        )

        cls._cleanup = [
            cls.virtual_machine,
            compute_offering,
            cls.disk_offering,
            user,
            cls.account
        ]
Example #52
0
    def test_08_resize_volume(self):
        """Test resize a volume"""
        # Verify the size is the new size is what we wanted it to be.
        self.debug(
                "Attaching volume (ID: %s) to VM (ID: %s)" % (
                                                    self.volume.id,
                                                    self.virtual_machine.id
                                                    ))

        self.virtual_machine.attach_volume(self.apiClient, self.volume)
        self.attached = True
        hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
        self.assertTrue(isinstance(hosts, list))
        self.assertTrue(len(hosts) > 0)
        self.debug("Found %s host" % hosts[0].hypervisor)

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.stop(self.apiClient)
        elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"):
            self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V")

        # resize the data disk
        self.debug("Resize Volume ID: %s" % self.volume.id)

        self.services["disk_offering"]["disksize"] = 20
        disk_offering_20_GB = DiskOffering.create(
                                    self.apiclient,
                                    self.services["disk_offering"]
                                    )
        self.cleanup.append(disk_offering_20_GB)

        cmd                = resizeVolume.resizeVolumeCmd()
        cmd.id             = self.volume.id
        cmd.diskofferingid = disk_offering_20_GB.id

        self.apiClient.resizeVolume(cmd)

        count = 0
        success = False
        while count < 3:
            list_volume_response = Volume.list(
                                                self.apiClient,
                                                id=self.volume.id,
                                                type='DATADISK'
                                                )
            for vol in list_volume_response:
                if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024** 3)) and vol.state == 'Ready':
                    success = True
            if success:
                break
            else:
                time.sleep(10)
                count += 1

        self.assertEqual(
                         success,
                         True,
                         "Check if the data volume resized appropriately"
                         )

        can_shrink = False

        list_volume_response = Volume.list(
                                            self.apiClient,
                                            id=self.volume.id,
                                            type='DATADISK'
                                            )
        storage_pool_id = [x.storageid for x in list_volume_response if x.id == self.volume.id][0]
        storage = StoragePool.list(self.apiclient, id=storage_pool_id)[0]
        # At present only CLVM supports shrinking volumes
        if storage.type.lower() == "clvm":
            can_shrink = True

        if can_shrink:
            self.services["disk_offering"]["disksize"] = 10
            disk_offering_10_GB = DiskOffering.create(
                                        self.apiclient,
                                        self.services["disk_offering"]
                                        )
            self.cleanup.append(disk_offering_10_GB)

            cmd                = resizeVolume.resizeVolumeCmd()
            cmd.id             = self.volume.id
            cmd.diskofferingid = disk_offering_10_GB.id
            cmd.shrinkok       = "true"

            self.apiClient.resizeVolume(cmd)

            count = 0
            success = False
            while count < 3:
                list_volume_response = Volume.list(
                                                    self.apiClient,
                                                    id=self.volume.id
                                                    )
                for vol in list_volume_response:
                    if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_10_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
                        success = True
                if success:
                    break
                else:
                    time.sleep(10)
                    count += 1

            self.assertEqual(
                             success,
                             True,
                             "Check if the root volume resized appropriately"
                             )

        #start the vm if it is on xenserver

        if hosts[0].hypervisor == "XenServer":
            self.virtual_machine.start(self.apiClient)
            time.sleep(30)
        return
    def test_01_attach_datadisk_to_vm_on_zwps(self):
        """ Attach Data Disk To VM on ZWPS
            1.  Check if zwps storage pool exists.
            2.  Adding tag to zone wide primary storage
            3.  Launch a VM on ZWPS
            4.  Attach data disk to vm which is on zwps.
            5.  Verify disk is attached.
        """

        # Step 1
        if len(list(storagePool for storagePool in self.pools
                    if storagePool.scope == "ZONE")) < 1:
            self.skipTest("There must be at least one zone wide \
                storage pools available in the setup")

        # Adding tags to Storage Pools
        zone_no = 1
        for storagePool in self.pools:
            if storagePool.scope == "ZONE":
                StoragePool.update(
                    self.apiclient,
                    id=storagePool.id,
                    tags=[ZONETAG1[:-1] + repr(zone_no)])
                zone_no += 1

        self.vm = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering_zone1.id,
            zoneid=self.zone.id
        )

        self.data_volume_created = Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id
        )

        self.cleanup.append(self.data_volume_created)

        # Step 2
        self.vm.attach_volume(
            self.userapiclient,
            self.data_volume_created
        )

        data_volumes_list = Volume.list(
            self.userapiclient,
            id=self.data_volume_created.id,
            virtualmachineid=self.vm.id
        )

        data_volume = data_volumes_list[0]

        status = validateList(data_volume)

        # Step 3
        self.assertEqual(
            status[0],
            PASS,
            "Check: Data if Disk is attached to VM")

        return
    def test_02_storage_migrate_root_and_data_disks(self):
        primarystorage2 = self.testdata[TestData.primaryStorage2]

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            clusterid=self.cluster_1.id
        )

        primary_storage_3 = StoragePool.create(
            self.apiClient,
            primarystorage2,
            clusterid=self.cluster_2.id
        )

        src_host, dest_host = self._get_source_and_dest_hosts()

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering_3.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            hostid=src_host.id,
            startvm=True
        )

        cs_data_volume = Volume.create(
            self.apiClient,
            self.testdata[TestData.volume_1],
            account=self.account.name,
            domainid=self.domain.id,
            zoneid=self.zone.id,
            diskofferingid=self.disk_offering_1.id
        )

        self.cleanup = [
            virtual_machine,
            cs_data_volume,
            primary_storage_2,
            primary_storage_3
        ]

        cs_data_volume = virtual_machine.attach_volume(
            self.apiClient,
            cs_data_volume
        )

        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
                                                  TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)

        sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)

        sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)

        sf_data_volume = self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id,
                                                                sf_data_volume, self.xen_session_1, self.xen_session_2)

        src_host, dest_host = dest_host, src_host

        self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume,
                                               self.xen_session_2, self.xen_session_1)
    def setUpClass(cls):
        # Set up API client
        testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient()

        cls.apiClient = testclient.getApiClient()
        cls.configData = testclient.getParsedTestDataConfig()
        cls.dbConnection = testclient.getDbConnection()

        cls.testdata = TestData().testdata

        xenserver = cls.testdata[TestData.xenServer]

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session_1 = XenAPI.Session(host_ip)

        cls.xen_session_1.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up xenAPI connection
        host_ip = "https://" + \
                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress

        # Set up XenAPI connection
        cls.xen_session_2 = XenAPI.Session(host_ip)

        cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])

        # Set up SolidFire connection
        solidfire = cls.testdata[TestData.solidFire]

        cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])

        # Get Resources from Cloud Infrastructure
        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
        cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0]
        cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0]
        cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])

        # Create test account
        cls.account = Account.create(
            cls.apiClient,
            cls.testdata["account"],
            admin=1
        )

        # Set up connection to make customized API calls
        cls.user = User.create(
            cls.apiClient,
            cls.testdata["user"],
            account=cls.account.name,
            domainid=cls.domain.id
        )

        url = cls.testdata[TestData.url]

        api_url = "http://" + url + ":8080/client/api"
        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)

        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)

        primarystorage = cls.testdata[TestData.primaryStorage]

        cls.primary_storage = StoragePool.create(
            cls.apiClient,
            primarystorage
        )

        cls.compute_offering_1 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering1]
        )

        cls.compute_offering_2 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering2]
        )

        cls.compute_offering_3 = ServiceOffering.create(
            cls.apiClient,
            cls.testdata[TestData.computeOffering3]
        )

        cls.disk_offering_1 = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering1]
        )

        cls.disk_offering_2 = DiskOffering.create(
            cls.apiClient,
            cls.testdata[TestData.diskOffering2]
        )

        # Resources that are to be destroyed
        cls._cleanup = [
            cls.compute_offering_1,
            cls.compute_offering_2,
            cls.compute_offering_3,
            cls.disk_offering_1,
            cls.disk_offering_2,
            cls.user,
            cls.account
        ]
    def setUpClass(cls):
        try:
            cls._cleanup = []
            cls.testClient = super(
                testHaPoolMaintenance,
                cls).getClsTestClient()
            cls.api_client = cls.testClient.getApiClient()
            cls.services = cls.testClient.getParsedTestDataConfig()
            # Get Domain, Zone, Template
            cls.domain = get_domain(cls.api_client)
            cls.zone = get_zone(
                cls.api_client,
                cls.testClient.getZoneForTests())
            cls.template = get_template(
                cls.api_client,
                cls.zone.id,
                cls.services["ostype"]
            )
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services['mode'] = cls.zone.networktype
            cls.hypervisor = cls.testClient.getHypervisorInfo()
            cls.services["virtual_machine"]["zoneid"] = cls.zone.id
            cls.services["virtual_machine"]["template"] = cls.template.id
            cls.clusterWithSufficientPool = None
            cls.listResponse = None
            clusters = Cluster.list(cls.api_client, zoneid=cls.zone.id)

            if not validateList(clusters)[0]:

                cls.debug(
                    "check list cluster response for zone id %s" %
                    cls.zone.id)
                cls.listResponse = True
                return

            for cluster in clusters:
                cls.pool = StoragePool.list(cls.api_client,
                                            clusterid=cluster.id,
                                            keyword="NetworkFilesystem"
                                            )

                if not validateList(cls.pool)[0]:

                    cls.debug(
                        "check list cluster response for zone id %s" %
                        cls.zone.id)
                    cls.listResponse = True
                    return

                if len(cls.pool) >= 2:
                    cls.clusterWithSufficientPool = cluster
                    break
            if not cls.clusterWithSufficientPool:
                return

            cls.services["service_offerings"][
                "tiny"]["offerha"] = "True"

            cls.services_off = ServiceOffering.create(
                cls.api_client,
                cls.services["service_offerings"]["tiny"])
            cls._cleanup.append(cls.services_off)

        except Exception as e:
            cls.tearDownClass()
            raise Exception("Warning: Exception in setup : %s" % e)
        return
    def test_ha_with_storage_maintenance(self):
        """put storage in maintenance mode and start ha vm and check usage"""
        # Steps
        # 1. Create a Compute service offering with the 'Offer HA' option
        # selected.
        # 2. Create a Guest VM with the compute service offering created above.
        # 3. put PS into maintenance  mode
        # 4. vm should go in stop state
        # 5. start vm ,vm should come up on another storage
        # 6. check usage events are getting generated for root disk

        host = list_hosts(
            self.api_client,
            clusterid=self.clusterWithSufficientPool.id)
        self.assertEqual(validateList(host)[0],
                         PASS,
                         "check list host response for cluster id %s"
                         % self.clusterWithSufficientPool.id)

        self.virtual_machine_with_ha = VirtualMachine.create(
            self.api_client,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.services_off.id,
            hostid=host[0].id
        )

        vms = VirtualMachine.list(
            self.api_client,
            id=self.virtual_machine_with_ha.id,
            listall=True,
        )

        self.assertEqual(
            validateList(vms)[0],
            PASS,
            "List VMs should return valid response for deployed VM"
        )

        vm = vms[0]

        self.debug("Deployed VM on host: %s" % vm.hostid)

        # Put storage in maintenance  mode

        self.list_root_volume = Volume.list(self.api_client,
                                            virtualmachineid=vm.id,
                                            type='ROOT',
                                            account=self.account.name,
                                            domainid=self.account.domainid)

        self.assertEqual(validateList(self.list_root_volume)[0],
                         PASS,
                         "check list voume_response for vm id %s" % vm.id)

        self.pool_id = self.dbclient.execute(
            "select pool_id from volumes where uuid = '%s';"
            % self.list_root_volume[0].id)
        self.storageid = self.dbclient.execute(
            "select uuid from storage_pool where id = '%s';"
            % self.pool_id[0][0])

        StoragePool.enableMaintenance(self.api_client,
                                      id=self.storageid[0][0])

        self.virtual_machine_with_ha.start(self.api_client)
        self.events = self.dbclient.execute(
            "select type from usage_event where resource_name='%s';"
            % self.list_root_volume[0].name
        )
        self.assertEqual(len(self.events),
                         3,
                         "check the usage event table for root disk %s"
                         % self.list_root_volume[0].name
                         )

        self.assertEqual(str(self.events[0][0]),
                         "VOLUME.CREATE",
                         "check volume create events for volume %s"
                         % self.list_root_volume[0].name)
        self.assertEqual(str(self.events[1][0]),
                         "VOLUME.DELETE",
                         "check fvolume delete events for volume%s"
                         % self.list_root_volume[0].name)
        self.assertEqual(str(self.events[2][0]),
                         "VOLUME.CREATE",
                         "check volume create events for volume %s"
                         % self.list_root_volume[0].name)
 def tearDown(self):
     # Clean up, terminate the created resources
     StoragePool.cancelMaintenance(self.api_client,
                                   id=self.storageid[0][0])
     cleanup_resources(self.apiClient, self.cleanup)
     return
    def setUpClass(cls):
        cls.testClient = super(TestResizeVolume, cls).getClsTestClient()
        cls.api_client = cls.testClient.getApiClient()
        cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower()
        cls.storageID = None
        # Fill services from the external config file
        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.api_client)
        cls.zone = get_zone(
            cls.api_client,
            cls.testClient.getZoneForTests())
        cls.services["mode"] = cls.zone.networktype
        cls._cleanup = []
        cls.unsupportedStorageType = False
        cls.unsupportedHypervisorType = False
        cls.updateclone = False
        if cls.hypervisor not in ['xenserver',"kvm","vmware"]:
            cls.unsupportedHypervisorType=True
            return
        cls.template = get_template(
            cls.api_client,
            cls.zone.id
        )
        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
        cls.services["virtual_machine"]["template"] = cls.template.id
        cls.services["volume"]["zoneid"] = cls.zone.id
        try:
            cls.parent_domain = Domain.create(cls.api_client,
                                              services=cls.services[
                                                  "domain"],
                                              parentdomainid=cls.domain.id)
            cls.parentd_admin = Account.create(cls.api_client,
                                               cls.services["account"],
                                               admin=True,
                                               domainid=cls.parent_domain.id)
            cls._cleanup.append(cls.parentd_admin)
            cls._cleanup.append(cls.parent_domain)
            list_pool_resp = list_storage_pools(cls.api_client,
                                               account=cls.parentd_admin.name,domainid=cls.parent_domain.id)
            res = validateList(list_pool_resp)
            if res[2]== INVALID_INPUT:
                raise Exception("Failed to  list storage pool-no storagepools found ")
            #Identify the storage pool type  and set vmware fullclone to true if storage is VMFS
            if cls.hypervisor == 'vmware':
                for strpool in list_pool_resp:
                    if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
                        list_config_storage_response = list_configurations(
                            cls.api_client
                            , name=
                            "vmware.create.full.clone",storageid=strpool.id)
                        res = validateList(list_config_storage_response)
                        if res[2]== INVALID_INPUT:
                         raise Exception("Failed to  list configurations ")
                        if list_config_storage_response[0].value == "false":
                            Configurations.update(cls.api_client,
                                                  "vmware.create.full.clone",
                                                  value="true",storageid=strpool.id)
                            cls.updateclone = True
                            StoragePool.update(cls.api_client,id=strpool.id,tags="scsi")
                            cls.storageID = strpool.id
                            cls.unsupportedStorageType = False
                            break
                    else:
                        cls.unsupportedStorageType = True
            # Creating service offering with normal config
            cls.service_offering = ServiceOffering.create(
                cls.api_client,
                cls.services["service_offering"])
            cls.services_offering_vmware=ServiceOffering.create(
                cls.api_client,cls.services["service_offering"],tags="scsi")
            cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])

        except Exception as e:
            cls.tearDownClass()
        return