def test_online_migrate_volume_from_nfs_storage_to_managed_storage(self):
        if TestData.hypervisor_type != TestData.xenServer:
            return

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self._get_vm_name(),
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True)

        self.cleanup.append(virtual_machine)

        vm_root_volume = self._get_only_volume(virtual_machine.id)

        self._verify_volume_on_primary_storage(vm_root_volume,
                                               self.primary_storage)

        # Migrate the root disk from NFS storage to managed storage.

        Volume.migrate(self.apiClient,
                       livemigrate=True,
                       volumeid=vm_root_volume.id,
                       storageid=self.primary_storage_2.id)

        vm_root_volume = self._get_only_volume(virtual_machine.id)

        self._verify_volume_on_primary_storage(vm_root_volume,
                                               self.primary_storage_2)
예제 #2
0
    def test_03_migrate_detached_volume(self):
        """Test VM will be migrated with it's root volume"""
        # Validate the following
        # 1. Deploys a VM and attaches 1 data disk
        # 2. Detaches the Disk
        # 3. Finds suitable storage pool for the Disk
        # 4. Migrate the storage pool and assert migration successful

        vm = self.deploy_vm()

        volume1 = self.create_volume()

        vm.attach_volume(self.apiclient, volume1)
        vm.detach_volume(self.apiclient, volume1)

        target_pool = self.get_target_pool(volume1.id)

        Volume.migrate(self.apiclient,
                       storageid=target_pool.id,
                       volumeid=volume1.id)

        vol = Volume.list(self.apiclient, volume=volume1.id)[0]

        self.assertEqual(vol.storageid, target_pool.id,
                         "Storage pool was not the same as expected")
예제 #3
0
    def test_11_migrate_volume_and_change_offering(self):

        # Validates the following
        #
        # 1. Creates a new Volume with a small disk offering
        #
        # 2. Migrates the Volume to another primary storage and changes the offering
        #
        # 3. Verifies the Volume has new offering when migrated to the new storage.

        small_offering = list_disk_offering(self.apiclient, name="Small")[0]

        large_offering = list_disk_offering(self.apiclient, name="Large")[0]
        volume = Volume.create(self.apiClient,
                               self.services,
                               zoneid=self.zone.id,
                               account=self.account.name,
                               domainid=self.account.domainid,
                               diskofferingid=small_offering.id)
        self.debug("Created a small volume: %s" % volume.id)

        self.virtual_machine.attach_volume(self.apiclient, volume=volume)

        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.stop(self.apiclient)

        pools = StoragePool.listForMigration(self.apiclient, id=volume.id)

        pool = None

        if pools and len(pools) > 0:
            pool = pools[0]
        else:
            raise self.skipTest(
                "Not enough storage pools found, skipping test")

        if hasattr(pool, 'tags'):
            StoragePool.update(self.apiclient, id=pool.id, tags="")

        self.debug("Migrating Volume-ID: %s to Pool: %s" %
                   (volume.id, pool.id))
        livemigrate = False
        if self.virtual_machine.hypervisor.lower(
        ) == "vmware" or self.virtual_machine.hypervisor.lower(
        ) == 'xenserver':
            livemigrate = True

        Volume.migrate(self.apiclient,
                       volumeid=volume.id,
                       storageid=pool.id,
                       newdiskofferingid=large_offering.id,
                       livemigrate=livemigrate)
        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.start(self.apiclient)
        migrated_vol = Volume.list(self.apiclient, id=volume.id)[0]
        self.assertEqual(migrated_vol.diskofferingname, large_offering.name,
                         "Offering name did not match with the new one ")
        return
예제 #4
0
 def test_01_migrateVolume(self):
     """
     @Desc:Volume is not retaining same uuid when migrating from one
           storage to another.
     Step1:Create a volume/data disk
     Step2:Verify UUID of the volume
     Step3:Migrate the volume to another primary storage within
           the cluster
     Step4:Migrating volume to new primary storage should succeed
     Step5:volume UUID should not change even after migration
     """
     vol = Volume.create(
         self.apiclient,
         self.services["volume"],
         diskofferingid=self.disk_offering.id,
         zoneid=self.zone.id,
         account=self.account.name,
         domainid=self.account.domainid,
     )
     self.assertIsNotNone(vol, "Failed to create volume")
     vol_res = Volume.list(self.apiclient, id=vol.id)
     self.assertEqual(validateList(vol_res)[0], PASS, "Invalid response returned for list volumes")
     vol_uuid = vol_res[0].id
     try:
         self.virtual_machine.attach_volume(self.apiclient, vol)
     except Exception as e:
         self.fail("Attaching data disk to vm failed with error %s" % e)
     pools = StoragePool.listForMigration(self.apiclient, id=vol.id)
     if not pools:
         self.skipTest(
             "No suitable storage pools found for volume migration.\
                     Skipping"
         )
     self.assertEqual(validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration")
     pool = pools[0]
     self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id))
     try:
         Volume.migrate(self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate="true")
     except Exception as e:
         self.fail("Volume migration failed with error %s" % e)
     migrated_vols = Volume.list(
         self.apiclient, virtualmachineid=self.virtual_machine.id, listall="true", type="DATADISK"
     )
     self.assertEqual(validateList(migrated_vols)[0], PASS, "invalid volumes response after migration")
     migrated_vol_uuid = migrated_vols[0].id
     self.assertEqual(
         vol_uuid,
         migrated_vol_uuid,
         "Volume is not retaining same uuid when migrating from one\
                 storage to another",
     )
     self.virtual_machine.detach_volume(self.apiclient, vol)
     self.cleanup.append(vol)
     return
    def test_06_migrate_volume_between_pools(self):
        '''Migrate volume between two StorPool primary storages'''
        vol = list_volumes(self.apiclient,
                           virtualmachineid=self.virtual_machine4.id,
                           type="ROOT")
        self.primary_storage2 = list_storage_pools(
            self.apiclient, name=self.primary_storage2.name)[0]
        vol = Volume.migrate(self.apiclient,
                             volumeid=vol[0].id,
                             storageid=self.primary_storage2.id)
        self.assertEqual(vol.storageid, self.primary_storage2.id,
                         "Volume was not migrated to primary storage 2")

        self.primary_storage = list_storage_pools(
            self.apiclient, name=self.primary_storage.name)[0]
        vol = Volume.migrate(self.apiclient,
                             volumeid=vol.id,
                             storageid=self.primary_storage.id)

        self.assertEqual(vol.storageid, self.primary_storage.id,
                         "Volume was not migrated to primary storage 2")
예제 #6
0
    def test_04_migrate_volume_to_another_storage(self):
        ''' Migrate Volume To Another Primary Storage
        '''
        self.assertFalse(hasattr(self.volume, 'virtualmachineid') , "Volume is not detached")
        
        self.assertFalse(hasattr(self.volume, 'storageid') , "Volume is not detached")
        volume = Volume.migrate(
            self.apiClient,
            volumeid = self.volume.id,
            storageid = self.primary_storage.id
            )

        self.assertIsNotNone(volume, "Volume is None")

        self.assertEqual(volume.storageid, self.primary_storage.id, "Storage is the same")
예제 #7
0
    def test_12_migrate_volume_to_another_storage(self):
        ''' Migrate Volume To Another Primary Storage
        '''
        self.assertFalse(hasattr(self.volume, 'virtualmachineid') , "Volume is not detached")

        vol = list_volumes(
            self.apiclient,
            id = self.volume.id,
            listall = True
            )[0]

        self.debug("################# storage id is %s " % vol.storageid)
        self.debug("################# primary_storage2 id is %s " % self.primary_storage2.id)
        self.debug("################# primary_storage id is %s " % self.primary_storage.id)
        if vol.storageid == self.primary_storage2.id:
            self.assertFalse(hasattr(self.volume, 'storageid') , "Volume is not detached")
            volume = Volume.migrate(
            self.apiclient,
            volumeid = self.volume.id,
            storageid = self.primary_storage.id
            )

            self.assertIsNotNone(volume, "Volume is None")

            self.assertEqual(volume.storageid, self.primary_storage.id, "Storage is the same")
        else:
            self.assertFalse(hasattr(self.volume, 'storageid') , "Volume is not detached")
            volume = Volume.migrate(
                self.apiclient,
                volumeid = self.volume.id,
                storageid = self.primary_storage2.id
                )
    
            self.assertIsNotNone(volume, "Volume is None")
    
            self.assertEqual(volume.storageid, self.primary_storage2.id, "Storage is the same")
예제 #8
0
    def test_09_migrate_volume_to_same_instance_pool(self):
        """Migrate volume to the same instance pool"""

        if not self.testdata[TestData.migrationTests]:
            self.skipTest("Volume migration tests not enabled, skipping test")

        #######################################
        # STEP 1: Create VM and Start VM      #
        #######################################

        test_virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine3],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=False
        )

        TestLinstorVolumes._start_vm(test_virtual_machine)

        #######################################
        # STEP 2: Create vol and attach to VM #
        #######################################

        new_volume = Volume.create(
            self.apiClient,
            self.testdata[TestData.volume_3],
            account=self.account.name,
            domainid=self.domain.id,
            zoneid=self.zone.id,
            diskofferingid=self.disk_offering_same_inst.id
        )

        volume_to_delete_later = new_volume

        new_volume = test_virtual_machine.attach_volume(
            self.apiClient,
            new_volume
        )

        vm = self._get_vm(test_virtual_machine.id)

        self.assertEqual(
            new_volume.virtualmachineid,
            vm.id,
            "Check if attached to virtual machine"
        )

        self.assertEqual(
            vm.state.lower(),
            'running',
            str(vm.state)
        )

        #######################################
        # STEP 3: Stop VM and Migrate volume  #
        #######################################

        test_virtual_machine.stop(self.apiClient)

        vm = self._get_vm(test_virtual_machine.id)

        self.assertEqual(
            vm.state.lower(),
            'stopped',
            str(vm.state)
        )

        pools = StoragePool.listForMigration(
            self.apiClient,
            id=new_volume.id
        )

        if not pools:
            self.skipTest("No suitable storage pools found for volume migration, skipping test")

        self.assertEqual(
            validateList(pools)[0],
            PASS,
            "Invalid pool response from findStoragePoolsForMigration API"
        )

        pool = pools[0]
        self.debug("Migrating Volume-ID: {} to Same Instance Pool: {}".format(new_volume.id, pool.id))

        try:
            Volume.migrate(
                self.apiClient,
                volumeid=new_volume.id,
                storageid=pool.id
            )
        except Exception as e:
            self.fail("Volume migration failed with error %s" % e)

        #######################################
        #  STEP 4: Detach and delete volume   #
        #######################################

        new_volume = test_virtual_machine.detach_volume(
            self.apiClient,
            new_volume
        )

        self.assertEqual(
            new_volume.virtualmachineid,
            None,
            "Check if attached to virtual machine"
        )

        volume_to_delete_later.delete(self.apiClient)

        list_volumes_response = list_volumes(
            self.apiClient,
            id=new_volume.id
        )

        self.assertEqual(
            list_volumes_response,
            None,
            "Check volume was deleted"
        )

        #######################################
        #  STEP 4: Delete VM                  #
        #######################################

        test_virtual_machine.delete(self.apiClient, True)
예제 #9
0
 def test_01_migrateVolume(self):
     """
     @Desc:Volume is not retaining same uuid when migrating from one
           storage to another.
     Step1:Create a volume/data disk
     Step2:Verify UUID of the volume
     Step3:Migrate the volume to another primary storage within
           the cluster
     Step4:Migrating volume to new primary storage should succeed
     Step5:volume UUID should not change even after migration
     """
     vol = Volume.create(
         self.apiclient,
         self.services["volume"],
         diskofferingid=self.disk_offering.id,
         zoneid=self.zone.id,
         account=self.account.name,
         domainid=self.account.domainid,
     )
     self.assertIsNotNone(vol, "Failed to create volume")
     vol_res = Volume.list(
         self.apiclient,
         id=vol.id
     )
     self.assertEqual(
         validateList(vol_res)[0],
         PASS,
         "Invalid response returned for list volumes")
     vol_uuid = vol_res[0].id
     try:
         self.virtual_machine.attach_volume(
             self.apiclient,
             vol
         )
     except Exception as e:
         self.fail("Attaching data disk to vm failed with error %s" % e)
     pools = StoragePool.listForMigration(
         self.apiclient,
         id=vol.id
     )
     if not pools:
         self.skipTest(
             "No suitable storage pools found for volume migration.\
                     Skipping")
     self.assertEqual(
         validateList(pools)[0],
         PASS,
         "invalid pool response from findStoragePoolsForMigration")
     pool = pools[0]
     self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id))
     try:
         Volume.migrate(
             self.apiclient,
             volumeid=vol.id,
             storageid=pool.id,
             livemigrate='true'
         )
     except Exception as e:
         self.fail("Volume migration failed with error %s" % e)
     migrated_vols = Volume.list(
         self.apiclient,
         virtualmachineid=self.virtual_machine.id,
         listall='true',
         type='DATADISK'
     )
     self.assertEqual(
         validateList(migrated_vols)[0],
         PASS,
         "invalid volumes response after migration")
     migrated_vol_uuid = migrated_vols[0].id
     self.assertEqual(
         vol_uuid,
         migrated_vol_uuid,
         "Volume is not retaining same uuid when migrating from one\
                 storage to another"
     )
     self.virtual_machine.detach_volume(
         self.apiclient,
         vol
     )
     self.cleanup.append(vol)
     return
예제 #10
0
    def test_01_positive_test_1(self):
        """
        positive test for volume life cycle
        # 1. Deploy a vm [vm1] with shared storage and data disk
        # 2. Deploy a vm [vm2]with shared storage without data disk
        # 3.
        # 4. Create a new volume and attache to vm2
        # 5. Detach data disk from vm1 and download it
        #  Variance(1-9)
        # 6. Upload volume by providing url of downloaded volume in step 5
        # 7. Attach the volume to a different vm - vm2
        # 8. Try to delete an attached volume
        # 9. Create template from root volume of VM1
        # 10. Create new VM using the template created in step 9
        # 11. Delete the template
        # 12. Detach the disk from VM2 and re-attach the disk to VM1
        # 13.
        # 14.
        # 15.Migrate volume(detached) and then attach to a vm and live-migrate
        # 16.Upload volume of size smaller  than storage.max.volume.upload.size(leaving the negative case)
        # 17.NA
        # 18.
        # 19.NA
        # 20.Detach data disks from VM2 and delete volume

        """
        # 1. Deploy a vm [vm1] with shared storage and data disk
        self.virtual_machine_1 = VirtualMachine.create(self.userapiclient,
                                                       self.testdata["small"],
                                                       templateid=self.template.id,
                                                       accountid=self.account.name,
                                                       domainid=self.account.domainid,
                                                       serviceofferingid=self.service_offering_1.id,
                                                       zoneid=self.zone.id,
                                                       diskofferingid=self.disk_offering_1.id,
                                                       mode=self.testdata["mode"]
                                                       )
        verify_vm(self, self.virtual_machine_1.id)
        # List data volume for vm1
        list_volume = Volume.list(self.userapiclient,
                                  virtualmachineid=self.virtual_machine_1.id,
                                  type='DATADISK'
                                  )
        self.assertEqual(validateList(list_volume)[0], PASS, "Check List volume response for vm id  %s" % self.virtual_machine_1.id)
        list_data_volume_for_vm1 = list_volume[0]
        self.assertEqual(len(list_volume), 1, "There is no data disk attached to vm id:%s" % self.virtual_machine_1.id)
        self.assertEqual(list_data_volume_for_vm1.virtualmachineid, str(self.virtual_machine_1.id), "Check if volume state (attached) is reflected")
        # 2. Deploy a vm [vm2]with shared storage without data disk
        self.virtual_machine_2 = VirtualMachine.create(self.userapiclient,
                                                       self.testdata["small"],
                                                       templateid=self.template.id,
                                                       accountid=self.account.name,
                                                       domainid=self.account.domainid,
                                                       serviceofferingid=self.service_offering_1.id,
                                                       zoneid=self.zone.id,
                                                       mode=self.testdata["mode"]
                                                       )
        verify_vm(self, self.virtual_machine_2.id)

        #4. Create a new volume and attache to vm2
        self.volume = Volume.create(self.userapiclient,
                                    services=self.testdata["volume"],
                                    diskofferingid=self.disk_offering_1.id,
                                    zoneid=self.zone.id
                                    )

        list_data_volume = Volume.list(self.userapiclient,
                                       id=self.volume.id
                                       )
        self.assertEqual(validateList(list_data_volume)[0], PASS, "Check List volume response for volume %s" % self.volume.id)
        self.assertEqual(list_data_volume[0].id, self.volume.id, "check list volume response for volume id:  %s" % self.volume.id)
        self.debug("volume id %s got created successfully" % list_data_volume[0].id)
        # Attach volume to vm2
        self.virtual_machine_2.attach_volume(self.userapiclient,
                                             self.volume
                                             )
        verify_attach_volume(self, self.virtual_machine_2.id, self.volume.id)

        #Variance
        if self.zone.localstorageenabled:
            # V1.Create vm3 with local storage offering
            self.virtual_machine_local_3=VirtualMachine.create(self.userapiclient,
                                                               self.testdata["small"],
                                                               templateid=self.template.id,
                                                               accountid=self.account.name,
                                                               domainid=self.account.domainid,
                                                               serviceofferingid=self.service_offering_2.id,
                                                               zoneid=self.zone.id,
                                                               mode=self.testdata["mode"]
                                                               )
            verify_vm(self, self.virtual_machine_local_3.id)

            # V2.create two data disk on local storage
            self.local_volumes = []
            for i in range(2):

                    local_volume = Volume.create(self.userapiclient,
                                                 services=self.testdata["volume"],
                                                 diskofferingid=self.disk_offering_local.id,
                                                 zoneid=self.zone.id
                                                 )

                    list_local_data_volume = Volume.list(self.userapiclient,
                                                         id=local_volume.id
                                                         )
                    self.assertEqual(validateList(list_local_data_volume)[0], PASS, "Check List volume response for volume %s" % local_volume.id)
                    self.assertEqual(list_local_data_volume[0].id, local_volume.id, "check list volume response for volume id:  %s" % local_volume.id)
                    self.debug("volume id %s got created successfully" % list_local_data_volume[0].id)
                    self.local_volumes.append(local_volume)
            # V3.Attach local disk to vm1
            self.virtual_machine_1.attach_volume(self.userapiclient,
                                                 self.local_volumes[0]
                                                 )
            verify_attach_volume(self, self.virtual_machine_1.id, self.local_volumes[0].id)
        if self.list_storage:
            # V4.create vm4 with zone wide storage
            self.virtual_machine_zone_4 = VirtualMachine.create(self.userapiclient,
                                                                self.testdata["small"],
                                                                templateid=self.template.id,
                                                                accountid=self.account.name,
                                                                domainid=self.account.domainid,
                                                                serviceofferingid=self.tagged_so.id,
                                                                zoneid=self.zone.id,
                                                                mode=self.testdata["mode"]
                                                                )
            verify_vm(self, self.virtual_machine_zone_4.id)

            # V5.Create two data disk on zone  wide storage
            self.zone_volumes = []
            for i in range(2):

                    zone_volume = Volume.create(self.userapiclient,
                                                services=self.testdata["volume"],
                                                diskofferingid=self.disk_offering_tagged.id,
                                                zoneid=self.zone.id
                                                )

                    list_zone_data_volume = Volume.list(self.userapiclient,
                                                        id=zone_volume.id
                                                        )
                    self.assertEqual(validateList(list_zone_data_volume)[0], PASS, "Check List volume response for volume %s" % zone_volume.id)
                    self.assertEqual(list_zone_data_volume[0].id, zone_volume.id, "check list volume response for volume id:  %s" % zone_volume.id)
                    self.debug("volume id:%s got created successfully" % list_zone_data_volume[0].id)
                    self.zone_volumes.append(zone_volume)

            # V6.Attach data disk running on ZWPS to VM1 (root disk on shared)
            self.virtual_machine_1.attach_volume(self.userapiclient,
                                                 self.zone_volumes[0]
                                                 )
            verify_attach_volume(self, self.virtual_machine_1.id, self.zone_volumes[0].id)
            # V7. Create a cluster wide volume and attach to vm running on zone wide storage
            self.cluster_volume = Volume.create(self.userapiclient,
                                                services=self.testdata["volume"],
                                                diskofferingid=self.disk_offering_1.id,
                                                zoneid=self.zone.id
                                                )
            list_cluster_volume = Volume.list(self.userapiclient,
                                              id=self.cluster_volume.id
                                              )
            self.assertEqual(validateList(list_cluster_volume)[0], PASS, "Check List volume response for volume %s" % self.cluster_volume.id)
            self.assertEqual(list_cluster_volume[0].id, str(self.cluster_volume.id), "volume does not exist %s" % self.cluster_volume.id)
            self.debug("volume id %s got created successfuly" % list_cluster_volume[0].id)
            self.virtual_machine_zone_4.attach_volume(self.userapiclient,
                                                      self.cluster_volume
                                                      )
            verify_attach_volume(self, self.virtual_machine_zone_4.id, self.cluster_volume.id)
        if self.list_storage and self.zone.localstorageenabled:
            #V8.Attach zone wide volume to vm running on local storage
            self.virtual_machine_local_3.attach_volume(self.userapiclient,
                                                       self.zone_volumes[1]
                                                       )
            verify_attach_volume(self, self.virtual_machine_local_3.id, self.zone_volumes[1].id)
            # V9.Attach local volume to a vm running on zone wide storage
            self.virtual_machine_zone_4.attach_volume(self.userapiclient,
                                                      self.local_volumes[1]
                                                      )
            verify_attach_volume(self, self.virtual_machine_zone_4.id, self.local_volumes[1].id)
        # 5. Detach data disk from vm1 and download it
        self.virtual_machine_1.detach_volume(self.userapiclient,
                                             volume=list_data_volume_for_vm1
                                             )
        verify_detach_volume(self, self.virtual_machine_1.id, list_data_volume_for_vm1.id)
        # download detached volume
        self.extract_volume = Volume.extract(self.userapiclient,
                                             volume_id=list_data_volume_for_vm1.id,
                                             zoneid=self.zone.id,
                                             mode='HTTP_DOWNLOAD'
                                             )

        self.debug("extracted url is%s  :" % self.extract_volume.url)
        try:

            formatted_url = urllib.unquote_plus(self.extract_volume.url)
            self.debug("Attempting to download volume at url %s" % formatted_url)
            response = urllib.urlopen(formatted_url)
            self.debug("response from volume url %s" % response.getcode())
            fd, path = tempfile.mkstemp()
            self.debug("Saving volume %s to path %s" % (list_data_volume_for_vm1.id, path))
            os.close(fd)
            with open(path, 'wb') as fd:
                fd.write(response.read())
            self.debug("Saved volume successfully")
        except Exception:
            self.fail("Extract Volume Failed with invalid URL %s (vol id: %s)" % (self.extract_volume, list_data_volume_for_vm1.id))
        #Need to get format for downloaded volume ,for now using default format VHD
        if "OVA" in self.extract_volume.url.upper():
            self.testdata["upload_volume"]["format"] = "OVA"
        if "QCOW2" in self.extract_volume.url.upper():
            self.testdata["upload_volume"]["format"] = "QCOW2"
        # 6. Upload volume by providing url of downloaded volume in step 5
        self.upload_response = Volume.upload(self.userapiclient,
                                             zoneid=self.zone.id,
                                             url=self.extract_volume.url,
                                             services=self.testdata["upload_volume"]
                                             )
        self.upload_response.wait_for_upload(self.userapiclient
                                             )
        self.debug("uploaded volume id is %s" % self.upload_response.id)
        # 7. Attach the volume to a different vm - vm2
        self.virtual_machine_2.attach_volume(self.userapiclient,
                                             volume=self.upload_response
                                             )
        verify_attach_volume(self, self.virtual_machine_2.id, self.upload_response.id)
        # 8. Try to delete an attached volume
        try:
            self.volume.delete(self.userapiclient
                               )
            self.fail("Volume got deleted in attached state %s " % self.volume.id)
        except Exception as e:
            self.debug("Attached volume deletion failed because  %s" % e)
        #9. Create template from root volume of VM1(stop VM->create template -> start vm)

        self.virtual_machine_1.stop(self.userapiclient
                                    )

        self.list_root_disk_for_vm1 = Volume.list(self.userapiclient,
                                                  virtualmachineid=self.virtual_machine_1.id,
                                                  type='ROOT'
                                                  )
        self.assertEqual(validateList(self.list_root_disk_for_vm1)[0], PASS, "Check List volume response for vm %s" % self.virtual_machine_1.id)
        self.assertEqual(len(self.list_root_disk_for_vm1), 1, "list root disk for vm1 is empty : %s" % self.virtual_machine_1.id)
        self.template_from_vm1_root_disk = Template.create(self.userapiclient,
                                                           self.testdata["template"],
                                                           self.list_root_disk_for_vm1[0].id,
                                                           account=self.account.name,
                                                           domainid=self.account.domainid
                                                           )
        list_template = Template.list(self.userapiclient,
                                      templatefilter=self.testdata["templatefilter"],
                                      id=self.template_from_vm1_root_disk.id
                                      )
        self.assertEqual(validateList(list_template)[0], PASS, "Check List template response for template id %s" % self.template_from_vm1_root_disk.id)
        self.assertEqual(len(list_template), 1, "list template response is empty for template id  : %s" % list_template[0].id)
        self.assertEqual(list_template[0].id, self.template_from_vm1_root_disk.id, "list template id is not same as created template")
        self.debug("Template id:%s got created successfully" % self.template_from_vm1_root_disk.id)
        self.virtual_machine_1.start(self.userapiclient
                                     )
        # 10. Deploy a vm using template ,created  from vm1's root disk

        self.virtual_machine_3 = VirtualMachine.create(self.userapiclient,
                                                       self.testdata["small"],
                                                       templateid=self.template_from_vm1_root_disk.id,
                                                       accountid=self.account.name,
                                                       domainid=self.account.domainid,
                                                       serviceofferingid=self.service_offering_1.id,
                                                       zoneid=self.zone.id,
                                                       mode=self.testdata["mode"]
                                                       )
        verify_vm(self, self.virtual_machine_3.id)

        # 11.delete the template created from root disk of vm1
        try:
            self.template_from_vm1_root_disk.delete(self.userapiclient
                                                    )
            self.debug("Template id: %s got deleted successfuly" % self.template_from_vm1_root_disk.id)
        except Exception as e:
            raise Exception("Template deletion failed with error %s" % e)
        list_template = Template.list(self.userapiclient,
                                      templatefilter=self.testdata["templatefilter"],
                                      id=self.template_from_vm1_root_disk.id
                                      )
        self.assertEqual(list_template, None, "Template is not deleted, id %s:" % self.template_from_vm1_root_disk.id)
        self.debug("Template id%s got deleted successfully" % self.template_from_vm1_root_disk.id)

        # List vm and check the state of vm
        verify_vm(self, self.virtual_machine_3.id)

        #12.Detach the disk from VM2 and re-attach the disk to VM1
        self.virtual_machine_2.detach_volume(self.userapiclient,
                                             volume=self.upload_response
                                             )
        verify_detach_volume(self, self.virtual_machine_2.id, self.upload_response.id)

        self.virtual_machine_1.attach_volume(self.userapiclient,
                                             volume=self.upload_response
                                             )

        verify_attach_volume(self, self.virtual_machine_1.id, self.upload_response.id)

        # 15.Migrate volume(detached) and then attach to a vm and live-migrate
        self.migrate_volume = Volume.create(self.userapiclient,
                                            services=self.testdata["volume"],
                                            diskofferingid=self.disk_offering_1.id,
                                            zoneid=self.zone.id
                                            )
        list_volume = Volume.list(self.apiclient,
                                  id=self.migrate_volume.id
                                  )
        self.assertEqual(validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id)
        self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id)
        self.debug("volume id %s got created successfuly" % list_volume[0].id)

        self.virtual_machine_1.attach_volume(self.userapiclient,
                                             self.migrate_volume
                                             )
        verify_attach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id)

        self.virtual_machine_1.detach_volume(self.userapiclient,
                                             volume=self.migrate_volume
                                             )
        verify_detach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id)

        list_volume = Volume.list(self.apiclient,
                                  id=self.migrate_volume.id
                                  )
        self.assertEqual(validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id)
        self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id)
        self.debug("volume id %s got created successfuly" % list_volume[0].id)
        list_pool = StoragePool.list(self.apiclient,
                                     id=list_volume[0].storageid
                                     )
        self.assertEqual(validateList(list_pool)[0], PASS, "Check List pool response for storage id %s" % list_volume[0].storageid)
        self.assertGreater(len(list_pool), 0, "Check the list list storagepoolresponse for vm id:  %s" % list_volume[0].storageid)
        list_pools = StoragePool.list(self.apiclient,
                                      scope=list_pool[0].scope
                                      )
        self.assertEqual(validateList(list_pools)[0], PASS, "Check List pool response for scope %s" % list_pool[0].scope)
        self.assertGreater(len(list_pools), 0, "Check the list vm response for scope :%s" % list_volume[0].scope)
        storagepoolid = None
        for i in range(len(list_pools)):
            if list_volume[0].storageid != list_pools[i].id:
                storagepoolid = list_pools[i].id
                break
            else:
                self.debug("No pool available for volume migration ")

        if storagepoolid is not None:
            try:
                volume_migrate = Volume.migrate(self.apiclient,
                                                storageid=storagepoolid,
                                                volumeid=self.migrate_volume.id
                                                )
            except Exception as e:
                raise Exception("Volume migration failed with error %s" % e)

            self.virtual_machine_2.attach_volume(self.userapiclient,
                                                 self.migrate_volume
                                                 )
            verify_attach_volume(self, self.virtual_machine_2.id, self.migrate_volume.id)

            pool_for_migration = StoragePool.listForMigration(self.apiclient,
                                                              id=self.migrate_volume.id
                                                              )
            self.assertEqual(validateList(pool_for_migration)[0], PASS, "Check list pool For Migration response for volume %s" % self.migrate_volume.id)
            self.assertGreater(len(pool_for_migration), 0, "Check the listForMigration response for volume :%s" % self.migrate_volume.id)
            try:
                volume_migrate = Volume.migrate(self.apiclient,
                                                storageid=pool_for_migration[0].id,
                                                volumeid=self.migrate_volume.id,
                                                livemigrate=True
                                                )
            except Exception as e:
                raise Exception("Volume migration failed with error %s" % e)
        else:
            try:
                self.migrate_volume.delete(self.userapiclient
                                           )
                self.debug("volume id:%s got deleted successfully " % self.migrate_volume.id)
            except Exception as e:
                raise Exception("Volume deletion failed with error %s" % e)
        # 16.Upload volume of size smaller  than storage.max.volume.upload.size(leaving the negative case)
        self.testdata["upload_volume"]["format"] = "VHD"
        volume_upload = Volume.upload(self.userapiclient,
                                      self.testdata["upload_volume"],
                                      zoneid=self.zone.id
                                      )
        volume_upload.wait_for_upload(self.userapiclient
                                      )
        self.debug("volume id :%s got uploaded successfully is " % volume_upload.id)

        # 20.Detach data disk from vm 2 and delete the volume
        self.virtual_machine_2.detach_volume(self.userapiclient,
                                             volume=self.volume
                                             )
        verify_detach_volume(self, self.virtual_machine_2.id, self.volume.id)

        try:
            self.volume.delete(self.userapiclient
                               )
            self.debug("volume id:%s got deleted successfully " % self.volume.id)
        except Exception as e:
            raise Exception("Volume deletion failed with error %s" % e)
예제 #11
0
    def test_02_list_snapshots_with_removed_data_store(self):
        """Test listing volume snapshots with removed data stores
        """

        # 1 - Create new volume -> V
        # 2 - Create new Primary Storage -> PS
        # 3 - Attach and detach volume V from vm
        # 4 - Migrate volume V to PS
        # 5 - Take volume V snapshot -> S
        # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed

        # Create new volume
        vol = Volume.create(
            self.apiclient,
            self.services["volume"],
            diskofferingid=self.disk_offering.id,
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )
        self.cleanup.append(vol)
        self.assertIsNotNone(vol, "Failed to create volume")
        vol_res = Volume.list(self.apiclient, id=vol.id)
        self.assertEqual(
            validateList(vol_res)[0], PASS,
            "Invalid response returned for list volumes")
        vol_uuid = vol_res[0].id
        clusters = list_clusters(self.apiclient, zoneid=self.zone.id)
        assert isinstance(clusters, list) and len(clusters) > 0

        # Attach created volume to vm, then detach it to be able to migrate it
        self.virtual_machine_with_disk.stop(self.apiclient)
        self.virtual_machine_with_disk.attach_volume(self.apiclient, vol)

        # Create new Primary Storage
        storage = StoragePool.create(self.apiclient,
                                     self.services["nfs2"],
                                     clusterid=clusters[0].id,
                                     zoneid=self.zone.id,
                                     podid=self.pod.id)

        self.cleanup.append(storage)
        self.assertEqual(storage.state, 'Up', "Check primary storage state")
        self.assertEqual(storage.type, 'NetworkFilesystem',
                         "Check storage pool type")
        storage_pools_response = list_storage_pools(self.apiclient,
                                                    id=storage.id)
        self.assertEqual(isinstance(storage_pools_response, list), True,
                         "Check list response returns a valid list")
        self.assertNotEqual(len(storage_pools_response), 0,
                            "Check list Hosts response")
        storage_response = storage_pools_response[0]
        self.assertEqual(storage_response.id, storage.id,
                         "Check storage pool ID")
        self.assertEqual(storage.type, storage_response.type,
                         "Check storage pool type ")

        self.virtual_machine_with_disk.detach_volume(self.apiclient, vol)

        # Migrate volume to new Primary Storage
        Volume.migrate(self.apiclient, storageid=storage.id, volumeid=vol.id)

        volume_response = list_volumes(
            self.apiclient,
            id=vol.id,
        )
        self.assertNotEqual(len(volume_response), 0,
                            "Check list Volumes response")
        volume_migrated = volume_response[0]
        self.assertEqual(volume_migrated.storageid, storage.id,
                         "Check volume storage id")

        # Take snapshot of new volume
        snapshot = Snapshot.create(self.apiclient,
                                   volume_migrated.id,
                                   account=self.account.name,
                                   domainid=self.account.domainid)

        self.debug("Snapshot created: ID - %s" % snapshot.id)

        # Delete volume, VM and created Primary Storage
        cleanup_resources(self.apiclient, self.cleanup)

        # List snapshot and verify it gets properly listed although Primary Storage was removed
        snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertNotEqual(len(snapshot_response), 0,
                            "Check list Snapshot response")
        self.assertEqual(snapshot_response[0].id, snapshot.id,
                         "Check snapshot id")

        # Delete snapshot and verify it gets properly deleted (should not be listed)
        self.cleanup = [snapshot]
        cleanup_resources(self.apiclient, self.cleanup)

        self.cleanup = []
        snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertEqual(snapshot_response_2, None,
                         "Check list Snapshot response")

        return
예제 #12
0
    def test_02_list_snapshots_with_removed_data_store(self):
        """Test listing volume snapshots with removed data stores
        """

        # 1 - Create new volume -> V
        # 2 - Create new Primary Storage -> PS
        # 3 - Attach and detach volume V from vm
        # 4 - Migrate volume V to PS
        # 5 - Take volume V snapshot -> S
        # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed
        
        # Create new volume
        vol = Volume.create(
            self.apiclient,
            self.services["volume"],
            diskofferingid=self.disk_offering.id,
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )
        self.cleanup.append(vol)
        self.assertIsNotNone(vol, "Failed to create volume")
        vol_res = Volume.list(
            self.apiclient,
            id=vol.id
        )
        self.assertEqual(
            validateList(vol_res)[0],
            PASS,
            "Invalid response returned for list volumes")
        vol_uuid = vol_res[0].id
        
        # Create new Primary Storage
        clusters = list_clusters(
            self.apiclient,
            zoneid=self.zone.id
        )
        assert isinstance(clusters,list) and len(clusters)>0

        storage = StoragePool.create(self.apiclient,
                                     self.services["nfs2"],
                                     clusterid=clusters[0].id,
                                     zoneid=self.zone.id,
                                     podid=self.pod.id
                                     )
        self.cleanup.append(self.virtual_machine_with_disk)
        self.cleanup.append(storage)

        self.assertEqual(
            storage.state,
            'Up',
            "Check primary storage state"
        )
        self.assertEqual(
            storage.type,
            'NetworkFilesystem',
            "Check storage pool type"
        )
        storage_pools_response = list_storage_pools(self.apiclient,
                                                    id=storage.id)
        self.assertEqual(
            isinstance(storage_pools_response, list),
            True,
            "Check list response returns a valid list"
        )
        self.assertNotEqual(
            len(storage_pools_response),
            0,
            "Check list Hosts response"
        )
        storage_response = storage_pools_response[0]
        self.assertEqual(
            storage_response.id,
            storage.id,
            "Check storage pool ID"
        )
        self.assertEqual(
            storage.type,
            storage_response.type,
            "Check storage pool type "
        )

        # Attach created volume to vm, then detach it to be able to migrate it
        self.virtual_machine_with_disk.stop(self.apiclient)
        self.virtual_machine_with_disk.attach_volume(
            self.apiclient,
            vol
        )
        self.virtual_machine_with_disk.detach_volume(
            self.apiclient,
            vol
        )

        # Migrate volume to new Primary Storage
        Volume.migrate(self.apiclient,
            storageid=storage.id,
            volumeid=vol.id
        )

        volume_response = list_volumes(
            self.apiclient,
            id=vol.id,
        )
        self.assertNotEqual(
            len(volume_response),
            0,
            "Check list Volumes response"
        )
        volume_migrated = volume_response[0]
        self.assertEqual(
            volume_migrated.storageid,
            storage.id,
            "Check volume storage id"
        )

        # Take snapshot of new volume
        snapshot = Snapshot.create(
            self.apiclient,
            volume_migrated.id,
            account=self.account.name,
            domainid=self.account.domainid
        )

        self.debug("Snapshot created: ID - %s" % snapshot.id)

        # Delete volume, VM and created Primary Storage
        cleanup_resources(self.apiclient, self.cleanup)

        # List snapshot and verify it gets properly listed although Primary Storage was removed
        snapshot_response = Snapshot.list(
            self.apiclient,
            id=snapshot.id
        )
        self.assertNotEqual(
            len(snapshot_response),
            0,
            "Check list Snapshot response"
        )
        self.assertEqual(
            snapshot_response[0].id,
            snapshot.id,
            "Check snapshot id"
        )

        # Delete snapshot and verify it gets properly deleted (should not be listed)
        self.cleanup = [snapshot]
        cleanup_resources(self.apiclient, self.cleanup)

        self.cleanup = []
        snapshot_response_2 = Snapshot.list(
            self.apiclient,
            id=snapshot.id
        )
        self.assertEqual(
            snapshot_response_2,
            None,
            "Check list Snapshot response"
        )

        return
예제 #13
0
    def test_02_list_snapshots_with_removed_data_store(self):
        """Test listing volume snapshots with removed data stores
        """

        # 1) Create new Primary Storage
        clusters = list_clusters(self.apiclient, zoneid=self.zone.id)
        assert isinstance(clusters, list) and len(clusters) > 0

        storage = StoragePool.create(self.apiclient,
                                     self.services["nfs"],
                                     clusterid=clusters[0].id,
                                     zoneid=self.zone.id,
                                     podid=self.pod.id)
        self.assertEqual(storage.state, 'Up', "Check primary storage state")
        self.assertEqual(storage.type, 'NetworkFilesystem',
                         "Check storage pool type")
        storage_pools_response = list_storage_pools(self.apiclient,
                                                    id=storage.id)
        self.assertEqual(isinstance(storage_pools_response, list), True,
                         "Check list response returns a valid list")
        self.assertNotEqual(len(storage_pools_response), 0,
                            "Check list Hosts response")
        storage_response = storage_pools_response[0]
        self.assertEqual(storage_response.id, storage.id,
                         "Check storage pool ID")
        self.assertEqual(storage.type, storage_response.type,
                         "Check storage pool type ")

        # 2) Migrate VM ROOT volume to new Primary Storage
        volumes = list_volumes(
            self.apiclient,
            virtualmachineid=self.virtual_machine_with_disk.id,
            type='ROOT',
            listall=True)
        Volume.migrate(self.apiclient,
                       storageid=storage.id,
                       volumeid=volumes[0].id,
                       livemigrate="true")

        volume_response = list_volumes(
            self.apiclient,
            id=volumes[0].id,
        )
        self.assertNotEqual(len(volume_response), 0,
                            "Check list Volumes response")
        volume_migrated = volume_response[0]
        self.assertEqual(volume_migrated.storageid, storage.id,
                         "Check volume storage id")
        self.cleanup.append(self.virtual_machine_with_disk)
        self.cleanup.append(storage)

        # 3) Take snapshot of VM ROOT volume
        snapshot = Snapshot.create(self.apiclient,
                                   volume_migrated.id,
                                   account=self.account.name,
                                   domainid=self.account.domainid)
        self.debug("Snapshot created: ID - %s" % snapshot.id)

        # 4) Delete VM and created Primery Storage
        cleanup_resources(self.apiclient, self.cleanup)

        # 5) List snapshot and verify it gets properly listed although Primary Storage was removed
        snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertNotEqual(len(snapshot_response), 0,
                            "Check list Snapshot response")
        self.assertEqual(snapshot_response[0].id, snapshot.id,
                         "Check snapshot id")

        # 6) Delete snapshot and verify it gets properly deleted (should not be listed)
        self.cleanup = [snapshot]
        cleanup_resources(self.apiclient, self.cleanup)

        snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id)
        self.assertEqual(snapshot_response_2, None,
                         "Check list Snapshot response")

        return
예제 #14
0
    def test_11_migrate_volume_and_change_offering(self):

    # Validates the following
    #
    # 1. Creates a new Volume with a small disk offering
    #
    # 2. Migrates the Volume to another primary storage and changes the offering
    #
    # 3. Verifies the Volume has new offering when migrated to the new storage.

        small_offering = list_disk_offering(
            self.apiclient,
            name = "Small"
        )[0]

        large_offering = list_disk_offering(
            self.apiclient,
            name = "Large"
        )[0]
        volume = Volume.create(
            self.apiClient,
            self.services,
            zoneid = self.zone.id,
            account = self.account.name,
            domainid = self.account.domainid,
            diskofferingid = small_offering.id
        )
        self.debug("Created a small volume: %s" % volume.id)

        self.virtual_machine.attach_volume(self.apiclient, volume=volume)

        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.stop(self.apiclient)

        pools = StoragePool.listForMigration(
            self.apiclient,
            id=volume.id
            )

        pool = None

        if pools and len(pools) > 0:
            pool = pools[0]
        else:
            raise self.skipTest("Not enough storage pools found, skipping test")
        
        if hasattr(pool, 'tags'):
            StoragePool.update(self.apiclient, id=pool.id, tags="")

        self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id))
        Volume.migrate(
            self.apiclient,
            volumeid = volume.id,
            storageid = pool.id,
            newdiskofferingid = large_offering.id
        )
        if self.virtual_machine.hypervisor == "KVM":
            self.virtual_machine.start(self.apiclient
        )
        migrated_vol = Volume.list(
            self.apiclient,
            id = volume.id
        )[0]
        self.assertEqual(
            migrated_vol.diskofferingname,
            large_offering.name,
            "Offering name did not match with the new one "
        )
        return
예제 #15
0
    def test_09_stop_vm_migrate_vol(self):
        """Test Stopped Virtual Machine's ROOT volume migration
        """

        # Validate the following:
        # 1. deploy Vm with startvm=true
        # 2. Should not be able to login to the VM.
        # 3. listVM command should return the deployed VM.State of this VM
        #    should be "Running".
        # 4. Stop the vm
        # 5.list primary storages in the cluster , should be more than one
        # 6.Migrate voluem to another available primary storage
        clusters = Cluster.list(self.apiclient, zoneid=self.zone.id)
        self.assertEqual(isinstance(clusters, list), True, "Check list response returns a valid list")
        i = 0
        for cluster in clusters:
            storage_pools = StoragePool.list(self.apiclient, clusterid=cluster.id)
            if len(storage_pools) > 1:
                self.cluster_id = cluster.id
                i += 1
                break
        if i == 0:
            self.skipTest("No cluster with more than one primary storage pool to perform migrate volume test")

        hosts = Host.list(self.apiclient, clusterid=self.cluster_id)
        self.assertEqual(isinstance(hosts, list), True, "Check list response returns a valid list")
        host = hosts[0]
        self.debug("Deploying instance on host: %s" % host.id)
        self.debug("Deploying instance in the account: %s" % self.account.name)
        self.virtual_machine = VirtualMachine.create(
            self.apiclient,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            diskofferingid=self.disk_offering.id,
            hostid=host.id,
            mode=self.zone.networktype,
        )

        response = self.virtual_machine.getState(self.apiclient, VirtualMachine.RUNNING)
        self.assertEqual(response[0], PASS, response[1])
        try:
            self.virtual_machine.stop(self.apiclient)
        except Exception as e:
            self.fail("failed to stop instance: %s" % e)
        volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True)
        self.assertEqual(isinstance(volumes, list), True, "Check volume list response returns a valid list")
        vol_response = volumes[0]
        # get the storage name in which volume is stored
        storage_name = vol_response.storage
        storage_pools = StoragePool.list(self.apiclient, clusterid=self.cluster_id)
        # Get storage pool to migrate volume
        for spool in storage_pools:
            if spool.name == storage_name:
                continue
            else:
                self.storage_id = spool.id
                self.storage_name = spool.name
                break
        self.debug("Migrating volume to storage pool: %s" % self.storage_name)
        Volume.migrate(self.apiclient, storageid=self.storage_id, volumeid=vol_response.id)
        volume = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True)
        self.assertEqual(volume[0].storage, self.storage_name, "Check volume migration response")

        return