Ejemplo n.º 1
0
    def create_cloned_volume(self, volume, src_vref):
        """
        Creates a clone of the specified volume.

        :param volume: new volume object
        :param src_vref: source volume object
        """
        source_volume_name = self.VOLUME_PREFIX + str(src_vref.id)
        new_volume_name = self.VOLUME_PREFIX + str(volume.id)
        LOG.debug('libovsvolumedriver.ovs_clone new={0} source={1}'.format(
            volume.id, src_vref.id))

        api = self._setup_ovs_client()
        # pick a random storagerouter to deploy the new clone on
        storagerouter_ip = random.choice([
            sr
            for sr in StoragerouterHelper.get_storagerouters(api=api).values()
            if self.vpool_guid in sr['vpools_guids']
        ])
        VDiskSetup.create_clone(vdisk_name=source_volume_name,
                                new_vdisk_name=new_volume_name,
                                storagerouter_ip=storagerouter_ip,
                                api=api,
                                vpool_guid=self.vpool_guid)

        # return volume location
        storage_ip = VDiskHelper.get_vdisk_location_by_name(
            vdisk_name=new_volume_name, vpool_guid=self.vpool_guid,
            api=api)['storage_ip']
        location = self._get_volume_location(volume_name=new_volume_name,
                                             storage_ip=storage_ip)
        volume['provider_location'] = location
        return {'provider_location': volume['provider_location']}
Ejemplo n.º 2
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """
        Creates a volume from a snapshot.

        :param volume: new volume object
        :param snapshot: existing snapshot object
        """

        new_volume_name = self.VOLUME_PREFIX + str(volume.id)

        api = self._setup_ovs_client()
        # pick a random storagerouter to deploy the new clone on
        storagerouter_ip = random.choice([
            sr
            for sr in StoragerouterHelper.get_storagerouters(api=api).values()
            if self.vpool_guid in sr['vpools_guids']
        ])
        VDiskSetup.create_clone(new_vdisk_name=new_volume_name,
                                storagerouter_ip=storagerouter_ip,
                                api=api,
                                vpool_guid=self.vpool_guid,
                                snapshot_name=snapshot['name'])

        # return volume location
        storage_ip = VDiskHelper.get_vdisk_location_by_name(
            vdisk_name=new_volume_name, vpool_guid=self.vpool_guid,
            api=api)['storage_ip']
        location = self._get_volume_location(volume_name=new_volume_name,
                                             storage_ip=storage_ip)
        volume['provider_location'] = location

        LOG.debug('libovsvolumedriver.ovs_clone_from_snapshot {0} '.format(
            volume.id))
        return {'provider_location': volume['provider_location']}
Ejemplo n.º 3
0
 def _prepare_for_scrubbing(cls, vdisks, storagedriver, fio_bin_location,
                            is_ee):
     """
     Writes data to the vdisks
     :param vdisks: list of vdisks
     :return: 
     """
     client = SSHClient(storagedriver.storagerouter, username='******')
     edge_configuration = {
         'fio_bin_location':
         fio_bin_location,
         'hostname':
         storagedriver.storage_ip,
         'port':
         storagedriver.ports['edge'],
         'protocol':
         storagedriver.cluster_node_config['network_server_uri'].split(':')
         [0],
         'volumenames': []
     }
     if is_ee is True:
         edge_configuration.update(cls.get_shell_user())
     for vdisk in vdisks:
         edge_configuration['volumenames'].append(
             vdisk.devicename.rsplit('.', 1)[0].split('/', 1)[1])
     for i in xrange(
             2
     ):  # Will write to max of volume size. Loop over it to avoid this issue:
         DataWriter.write_data_fio(client=client,
                                   fio_configuration={
                                       'io_size': cls.SIZE_VDISK,
                                       'configuration': (0, 100)
                                   },
                                   edge_configuration=edge_configuration,
                                   screen=False,
                                   loop_screen=False)
     for vdisk in vdisks:  # Snapshot to give the volumedriver a point of reference to scrub towards
         VDiskSetup.create_snapshot(snapshot_name='{}_snapshot01'.format(
             vdisk.name),
                                    vdisk_name=vdisk.name,
                                    vpool_name=vdisk.vpool.name,
                                    consistent=False,
                                    sticky=False)
     stored_map = {}
     for vdisk in vdisks:
         stored_map[vdisk.guid] = vdisk.statistics['stored']
         cls.LOGGER.info(
             "Logged {0} stored data for VDisk {1} in mapper".format(
                 vdisk.statistics['stored'], vdisk.name))
     return stored_map
Ejemplo n.º 4
0
 def create_vdisks(cls,
                   storagedriver,
                   amount_vdisks=AMOUNT_VDISKS_TO_SCRUB,
                   size=SIZE_VDISK,
                   cloned=False):
     vpool = storagedriver.vpool
     cls.LOGGER.info(
         "Start deploying vdisks for scrubbing with clone status: {0}".
         format(cloned))
     vdisks = {
         'parents': [],  # Non cloned
         'clones': []
     }  # Cloned
     if cloned is True:
         parent_vdisk_name = '{0}_clone_parent_{1}'.format(
             cls.PREFIX,
             str(0).zfill(3))
         parent_vdisk_guid = VDiskSetup.create_vdisk(
             vdisk_name=parent_vdisk_name,
             vpool_name=vpool.name,
             size=size,
             storagerouter_ip=storagedriver.storagerouter.ip)
         parent_vdisk = VDiskHelper.get_vdisk_by_guid(parent_vdisk_guid)
         vdisks['parents'].append(parent_vdisk)
         for vdisk_nr in xrange(amount_vdisks):
             clone_vdisk_name = '{0}_clone{1}'.format(
                 parent_vdisk.name,
                 str(len(vdisks['clones']) + 1).zfill(3))
             cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
                 VDiskSetup.create_clone(
                     vdisk_name=parent_vdisk_name,
                     vpool_name=vpool.name,
                     new_vdisk_name=clone_vdisk_name,
                     storagerouter_ip=storagedriver.storagerouter.ip)
                 ['vdisk_guid'])
             vdisks['clones'].append(cloned_vdisk)
     else:
         for vdisk_nr in xrange(amount_vdisks):
             vdisk_name = '{0}_{1}'.format(cls.PREFIX,
                                           str(vdisk_nr).zfill(3))
             vdisk_guid = VDiskSetup.create_vdisk(
                 vdisk_name=vdisk_name,
                 vpool_name=vpool.name,
                 size=size,
                 storagerouter_ip=storagedriver.storagerouter.ip)
             vdisk = VDiskHelper.get_vdisk_by_guid(vdisk_guid)
             vdisks['parents'].append(vdisk)
     return vdisks
Ejemplo n.º 5
0
    def create_snapshot(self, snapshot):
        """
        Creates a snapshot

        will always be marked as inconsistent
        """
        volume_name = self.VOLUME_PREFIX + snapshot['volume_id']
        api = self._setup_ovs_client()
        VDiskSetup.create_snapshot(snapshot_name=snapshot['name'],
                                   vdisk_name=volume_name,
                                   vpool_guid=self.vpool_guid,
                                   api=api,
                                   consistent=False,
                                   sticky=True)

        LOG.debug('libovsvolumedriver.ovs_snapshot_create: {0} {1}'.format(
            volume_name, str(snapshot['name'])))
Ejemplo n.º 6
0
 def _trigger_mds_issue(cls, vpool, volume_bundle, target_storagerouter_guid, logger=LOGGER):
     """
     voldrv A, voldrv B, volume X op A
     ensure_safety lopen op X, die gaat master op A en slave op B hebben -> done on create
     dan ga je manueel tegen de voldrv zeggen dat X enkel nog A heeft als MDS
     B blijft dan slave, en blijft catchuppe
     dan scrub je X, en die raken niet applied op B
     want 'm kent die eigenlijk niet
     en dan - da's hier enigzinds nen educated guess - configureer je B manueel terug als slave
     doe je nen move of nen failover beter gezegd (zie hierboven hoe te doen)
     dan gaat 'm opeens B als master gebruiken maar die heeft geen scrub results applied
     """
     logger.debug('Starting the mds triggering.')
     cls._set_mds_safety(vpool, 2, checkup=True)  # Will trigger mds checkup which should create a slave again
     # Move the volume to set the slave as the master
     for vdisk_name, vdisk_object in volume_bundle.iteritems():
         VDiskSetup.move_vdisk(vdisk_guid=vdisk_object.guid, target_storagerouter_guid=target_storagerouter_guid)
Ejemplo n.º 7
0
    def backup_volume(self, context, backup, backup_service):
        """
        Create a new backup from an existing volume.

        A backup is always marked as consistent
        """
        volume_name = self.VOLUME_PREFIX + str(backup['volume_id'])
        backup_snapshot_name = self.SNAPSHOT_BACKUP_PREFIX + str(backup['id'])
        LOG.debug('libovsvolumedriver.ovs_backup_create {0} {1}'.format(
            volume_name, backup_snapshot_name))

        api = self._setup_ovs_client()
        VDiskSetup.create_snapshot(snapshot_name=backup_snapshot_name,
                                   vdisk_name=volume_name,
                                   vpool_guid=self.vpool_guid,
                                   api=api,
                                   consistent=True,
                                   sticky=True)
Ejemplo n.º 8
0
    def restore_backup(self, context, backup, volume, backup_service):
        """
        Restore an existing backup to a new or existing volume.
        """

        volume_name = self.VOLUME_PREFIX + str(volume.id)
        backup_snapshot_name = self.SNAPSHOT_BACKUP_PREFIX + str(backup['id'])
        LOG.debug('libovsvolumedriver.ovs_backup_restore {0} {1}'.format(
            volume_name, backup_snapshot_name))

        api = self._setup_ovs_client()
        snapshot, vdisk_guid, vdisk_name = VDiskHelper.get_snapshot_by_name(
            snapshot_name=backup_snapshot_name,
            vpool_guid=self.vpool_guid,
            api=api)
        if vdisk_name == volume_name:
            # rollback
            VDiskSetup.rollback_to_snapshot(vpool_guid=self.vpool_guid,
                                            snapshot_name=backup_snapshot_name,
                                            api=api)
        else:
            # to new volume
            storagerouter_ip = random.choice([
                sr for sr in StoragerouterHelper.get_storagerouters(
                    api=api).values() if self.vpool_guid in sr['vpools_guids']
            ])
            VDiskSetup.create_clone(vpool_guid=self.vpool_guid,
                                    new_vdisk_name=volume_name,
                                    storagerouter_ip=storagerouter_ip,
                                    api=api,
                                    snapshot_name=backup_snapshot_name)

            # return volume location
            storage_ip = VDiskHelper.get_vdisk_location_by_name(
                vdisk_name=volume_name, vpool_guid=self.vpool_guid,
                api=api)['storage_ip']
            location = self._get_volume_location(volume_name=volume_name,
                                                 storage_ip=storage_ip)
            volume['provider_location'] = location
            return {'provider_location': volume['provider_location']}
Ejemplo n.º 9
0
 def _start_snapshots(cls, vdisks, event, interval=60):
     """
     Threading code that creates snapshots every x seconds
     :param event: Threading event that will stop the while loop
     :type event: threading._Event
     :param interval: time between taking the snapshots
     :type interval: int
     :param vdisks: vdisk object
     :type vdisks: list(ovs.dal.hybrids.vdisk.VDISK)
     :return: None
     :rtype: NoneType
     """
     while not event.is_set():
         start = time.time()
         for vdisk in vdisks:
             VDiskSetup.create_snapshot(snapshot_name='{0}_{1}'.format(
                 vdisk.name,
                 datetime.today().strftime('%Y-%m-%d %H:%M:%S')),
                                        vdisk_name=vdisk.devicename,
                                        vpool_name=vdisk.vpool.name,
                                        consistent=False,
                                        sticky=False)
         duration = time.time() - start
         time.sleep(0 if duration > interval else interval - duration)
Ejemplo n.º 10
0
    def prepare_vm_disks(self,
                         source_storagedriver,
                         cloud_image_path,
                         cloud_init_loc,
                         vm_name,
                         data_disk_size,
                         edge_user_info=None,
                         logger=LOGGER):
        """
        Will create all necessary vdisks to create the bulk of vms
        :param source_storagedriver: storagedriver to create the disks on
        :param cloud_image_path: path to the cloud image
        :param cloud_init_loc: path to the cloud init script
        :param vm_name: name prefix for the vms
        :param data_disk_size: size of the data disk
        :param edge_user_info: user information for the edge. Optional
        :param logger: logging instance
        :return: 
        """
        logger.info('Starting with preparing vm disk(s)')
        vm_amount = self.amount_of_vms
        if isinstance(edge_user_info, dict):
            required_edge_params = {
                'username': (str, None, False),
                'password': (str, None, False)
            }
            ExtensionsToolbox.verify_required_params(required_edge_params,
                                                     edge_user_info)
        if edge_user_info is None:
            edge_user_info = {}

        protocol = source_storagedriver.cluster_node_config[
            'network_server_uri'].split(':')[0]
        vpool = source_storagedriver.vpool
        client = SSHClient(source_storagedriver.storagerouter, username='******')

        edge_configuration = {
            'ip': source_storagedriver.storage_ip,
            'port': source_storagedriver.ports['edge'],
            'protocol': protocol
        }
        edge_configuration.update(edge_user_info)

        original_boot_disk_name = None  # Cloning purposes
        original_data_disk_name = None  # Cloning purposes

        connection_messages = []
        vm_info = {}
        volume_amount = 0

        for vm_number in xrange(0, vm_amount):
            filled_number = str(vm_number).zfill(3)
            vm_name = '{0}-{1}'.format(vm_name, filled_number)
            create_msg = '{0}_{1}'.format(str(uuid.uuid4()), vm_name)
            boot_vdisk_name = '{0}_vdisk_boot_{1}'.format(
                vm_name, filled_number)
            data_vdisk_name = '{0}_vdisk_data_{1}'.format(
                vm_name, filled_number)
            cd_vdisk_name = '{0}_vdisk_cd_{1}'.format(vm_name, filled_number)
            boot_vdisk_path = '/mnt/{0}/{1}.raw'.format(
                vpool.name, boot_vdisk_name)
            data_vdisk_path = '/mnt/{0}/{1}.raw'.format(
                vpool.name, data_vdisk_name)
            cd_vdisk_path = '/mnt/{0}/{1}.raw'.format(vpool.name,
                                                      cd_vdisk_name)
            if vm_number == 0:
                try:
                    # Create VDISKs
                    self.convert_image(client, cloud_image_path,
                                       boot_vdisk_name, edge_configuration)
                except RuntimeError as ex:
                    logger.error('Could not covert the image. Got {0}'.format(
                        str(ex)))
                    raise
                boot_vdisk = VDiskHelper.get_vdisk_by_name(
                    '{0}.raw'.format(boot_vdisk_name), vpool.name)
                original_boot_disk_name = boot_vdisk_name
                logger.info('Boot VDisk successfully created.')
                try:
                    data_vdisk = VDiskHelper.get_vdisk_by_guid(
                        VDiskSetup.create_vdisk(
                            data_vdisk_name, vpool.name, data_disk_size,
                            source_storagedriver.storage_ip))
                    logger.info('VDisk data_vdisk successfully created!')
                except TimeOutError:
                    logger.error(
                        'The creation of the data vdisk has timed out.')
                    raise
                except RuntimeError as ex:
                    logger.error(
                        'Could not create the data vdisk. Got {0}'.format(
                            str(ex)))
                    raise
                original_data_disk_name = data_vdisk_name
            else:
                # Rely on cloning to speed up the process
                boot_vdisk_info = VDiskSetup.create_clone(
                    vdisk_name=original_boot_disk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=boot_vdisk_name,
                    storagerouter_ip=source_storagedriver.storage_ip)
                boot_vdisk = VDiskHelper.get_vdisk_by_guid(
                    boot_vdisk_info['vdisk_guid'])
                data_vdisk_info = VDiskSetup.create_clone(
                    vdisk_name=original_data_disk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=data_vdisk_name,
                    storagerouter_ip=source_storagedriver.storage_ip)
                data_vdisk = VDiskHelper.get_vdisk_by_guid(
                    data_vdisk_info['vdisk_guid'])
            #######################
            # GENERATE CLOUD INIT #
            #######################
            iso_loc = self._generate_cloud_init(
                client=client,
                convert_script_loc=cloud_init_loc,
                create_msg=create_msg)
            self.convert_image(client, iso_loc, cd_vdisk_name,
                               edge_configuration)
            cd_creation_time = time.time()
            cd_vdisk = None
            while cd_vdisk is None:
                if time.time() - cd_creation_time > 60:
                    raise RuntimeError(
                        'Could not fetch the cd vdisk after {}s'.format(
                            time.time() - cd_creation_time))
                try:
                    cd_vdisk = VDiskHelper.get_vdisk_by_name(
                        cd_vdisk_name, vpool.name)
                except VDiskNotFoundError:
                    logger.warning(
                        'Could not fetch the cd vdisk after {0}s.'.format(
                            time.time() - cd_creation_time))
                time.sleep(0.5)

            # Take snapshot to revert back to after every migrate scenario
            data_snapshot_guid = VDiskSetup.create_snapshot(
                '{0}_data'.format(vm_name),
                data_vdisk.devicename,
                vpool.name,
                consistent=False)
            vm_info[vm_name] = {
                'data_snapshot_guid':
                data_snapshot_guid,
                'vdisks': [boot_vdisk, data_vdisk, cd_vdisk],
                'cd_path':
                cd_vdisk_path,
                'disks': [{
                    'mountpoint': boot_vdisk_path
                }, {
                    'mountpoint': data_vdisk_path
                }],
                'networks': [{
                    'network': 'default',
                    'mac': 'RANDOM',
                    'model': 'e1000'
                }],
                'created':
                False,
                'ip':
                '',
                'create_msg':
                create_msg
            }
            connection_messages.append(create_msg)
            volume_amount += len(vm_info[vm_name]['vdisks'])
            logger.info('Prepped everything for VM {0}.'.format(vm_name))

        self.vm_info = vm_info
        self.connection_messages = connection_messages
        self.volume_amount = volume_amount
Ejemplo n.º 11
0
    def _rollback_vdisks(cls,
                         stored_vdisks,
                         vpool,
                         amount_checks=MAX_ROLLBACK_CHECKS,
                         timeout=ROLLBACK_TIMEOUT):
        """
        Rollback the given mapped vdisks

        :param stored_vdisks: dict with stored vdisks, snapshot, location, ...
        :type stored_vdisks: dict
        :param vpool: a valid vpool object
        :type vpool: ovs.model.hybrids.vpool
        :param amount_checks: amount of checks to perform after a vdisk has been rolled back
        :type amount_checks: int
        :param timeout: timeout between checks
        :type timeout: int
        :return: None
        """

        for stored_vdisk in stored_vdisks:
            # fetch vdisk
            vdisk = VDiskHelper.get_vdisk_by_guid(
                vdisk_guid=stored_vdisk['vdisk_guid'])

            # Commencing rollback
            cls.LOGGER.info(
                "Starting rollback on vdisk `{0}` to first snapshot `{1}`".
                format(vdisk.name, stored_vdisk['snapshots'][0]))

            VDiskSetup.rollback_to_snapshot(
                vdisk_name=vdisk.name + '.raw',
                vpool_name=vpool.name,
                snapshot_id=stored_vdisk['snapshots'][0]['snapshot_guid'])

            # Start checking when disk is rollback'ed
            tries = 0
            while tries < amount_checks:
                current_statistics = vdisk.storagedriver_client.info_volume(
                    str(vdisk.volume_id)).stored
                if current_statistics < stored_vdisk['snapshots'][1][
                        'stored_data']:
                    cls.LOGGER.info(
                        "VDisk `{0}` matched the requirements for rollback with {1} < {2}"
                        .format(stored_vdisk['vdisk_guid'], current_statistics,
                                stored_vdisk['snapshots'][1]['stored_data']))
                    break
                else:
                    tries += 1
                    cls.LOGGER.warning(
                        "Try `{0}` when checking stored data on volumedriver for VDisk "
                        "`{1}`, with currently `{2}` but it should be less than `{3}`. "
                        "Now sleeping for `{4}` seconds ...".format(
                            tries, stored_vdisk['vdisk_guid'],
                            current_statistics,
                            stored_vdisk['snapshots'][1]['stored_data'],
                            timeout))
                    time.sleep(timeout)

            # check if amount of tries has exceeded
            if tries == amount_checks:
                error_msg = "VDisk `{0}` should have been rollback'ed but max. amount of checks have exceeded!"\
                            .format(vdisk.name)
                cls.LOGGER.error(error_msg)
                raise RuntimeError(error_msg)
            else:
                cls.LOGGER.info(
                    "Successfully finished rollback'ing on vdisk `{0}`".format(
                        vdisk.name))

                # commencing deleting volumes
                cls.LOGGER.info("Starting to remove VDisk `{0}`".format(
                    vdisk.name))
            VDiskRemover.remove_vdisk(stored_vdisk['vdisk_guid'])
            cls.LOGGER.info("Finished removing VDisk `{0}`".format(vdisk.name))
Ejemplo n.º 12
0
    def _deploy_vdisks(cls,
                       vpool,
                       storagedriver,
                       size=SIZE_VDISK,
                       amount_vdisks=AMOUNT_VDISKS,
                       cloned=False):
        """
        Deploy X amount of vdisks, write some data to it & snapshot
        :param vpool: a valid vpool object
        :type vpool: ovs.model.hybrids.vpool
        :param storagedriver: a valid storagedriver object
        :type storagedriver: ovs.mode.hybrids.storagedriver
        :param size: size of a single vdisk in bytes
        :type size: int
        :return: tuple[0]: stored vdisks, snapshot, location; tuple[1]: base_vdisks_guids that are used for clones
        [{
            'vdisk_guid': u 'b789b23e-1077-4d96-9ec2-a7cc3785686c',
            'snapshots': {
                0: {
                    'snapshot_name': 'integration-tests-rollback0-snapshot0',
                    'snapshot_guid': u 'fbd1c961-7d33-4bd3-8c92-c8a3c52eb74f',
                    'stored_data': 52428800
                },
                1: {
                    'snapshot_name': 'integration-tests-rollback0-snapshot1',
                    'snapshot_guid': u '15eb7119-d984-4c84-985c-1fb1cc44a95e',
                    'stored_data': 104857600
                }
            }
        }, {
            'vdisk_guid': u '9c2cd023-d15b-4994-8a62-07edc36d748c',
            'snapshots': {
                0: {
                    'snapshot_name': 'integration-tests-rollback1-snapshot0',
                    'snapshot_guid': u 'c7500fec-cc5a-4593-89dc-fca78dcb2783',
                    'stored_data': 52428800
                },
                1: {
                    'snapshot_name': 'integration-tests-rollback1-snapshot1',
                    'snapshot_guid': u 'e46bd42b-516d-4636-9d15-d9c1a8f489e4',
                    'stored_data': 104857600
                }
            }
        }], ['8858717a-e6d2-11e6-831d-00249b133798', '8c644a82-e6d2-11e6-8efe-00249b133798']
        :rtype: tuple
        """

        cls.LOGGER.info(
            "Starting deploying {0} vdisks with clone status: {1}".format(
                amount_vdisks, cloned))

        client = SSHClient(storagedriver.storagerouter.ip, username='******')
        vdisks = []
        base_vdisks = []
        for vdisk_nr in xrange(amount_vdisks):
            # create a vdisk & collect results
            vdisk_name = cls.VDISK_NAME + str(vdisk_nr)

            vdisk_guid = VDiskSetup.create_vdisk(
                vdisk_name=vdisk_name + '.raw',
                vpool_name=vpool.name,
                size=size,
                storagerouter_ip=storagedriver.storagerouter.ip)
            # clone
            if cloned:
                clone_vdisk_name = vdisk_name + '_clone'
                cls.LOGGER.info(
                    "Creating clone from vdisk `{0}` with new name `{1}`".
                    format(vdisk_name, clone_vdisk_name))
                base_vdisks.append(str(vdisk_guid))
                cls.LOGGER.info(
                    "Stored old base vdisk guid in list: {0}".format(
                        vdisk_guid))
                vdisk_guid = VDiskSetup.create_clone(
                    vdisk_name=vdisk_name + '.raw',
                    vpool_name=vpool.name,
                    new_vdisk_name=clone_vdisk_name,
                    storagerouter_ip=storagedriver.storagerouter.ip
                )['vdisk_guid']
                vdisk_name = clone_vdisk_name

            vdisk = VDiskHelper.get_vdisk_by_guid(vdisk_guid)
            results = {'vdisk_guid': vdisk_guid, 'snapshots': {}}
            cls.LOGGER.info(
                "Finished deploying vdisk `{0}`".format(vdisk_name))

            cls.LOGGER.info(
                "Starting writing & snapshotting vdisk `{0}`".format(
                    vdisk_name))
            for i in xrange(cls.WRITE_AMOUNT_OF_TIMES):
                # write some data
                try:
                    RollbackChecks.LOGGER.info(
                        "Starting FIO on vdisk `{0}`".format(vdisk_name))
                    client.run([
                        "fio", "--name=test",
                        "--filename=/mnt/{0}/{1}.raw".format(
                            vpool.name, vdisk_name), "--ioengine=libaio",
                        "--iodepth=4", "--rw=write", "--bs=4k", "--direct=1",
                        "--size={0}b".format(size)
                    ])
                    RollbackChecks.LOGGER.info(
                        "Finished FIO on vdisk `{0}`".format(vdisk_name))

                except subprocess.CalledProcessError as ex:
                    raise VDiskNotFoundError(
                        "VDisk `/mnt/{0}/{1}.raw` does not seem to be present "
                        "or has problems on storagerouter `{2}`: {3}".format(
                            vpool.name, vdisk_name,
                            storagedriver.storagerouter.ip, str(ex)))
                # create snapshot
                cls.LOGGER.info(
                    "Starting snapshot creation on vdisk `{0}`".format(
                        vdisk_name))
                snapshot_guid = VDiskSetup.create_snapshot(
                    snapshot_name=vdisk_name + '-snapshot{0}'.format(i),
                    vdisk_name=vdisk_name + '.raw',
                    vpool_name=vpool.name,
                    consistent=False,
                    sticky=False)
                # save the current stored_data for comparison
                stored_data = vdisk.storagedriver_client.info_volume(
                    str(vdisk.volume_id)).stored
                cls.LOGGER.info(
                    "Logged `{0}` stored data for VDisk `{1}` in mapper".
                    format(stored_data, vdisk_name))
                # add details to snapshot mapper
                results['snapshots'][i] = {
                    'snapshot_guid': snapshot_guid,
                    'snapshot_name': vdisk_name + '-snapshot{0}'.format(i),
                    'stored_data': stored_data
                }
                cls.LOGGER.info(
                    "Snapshot creation finished on vdisk `{0}`".format(
                        vdisk_name))
            vdisks.append(results)
            cls.LOGGER.info(
                "Finished writing & snapshotting vdisk `{0}`. Results: {1}".
                format(vdisk_name, results))
        return vdisks, base_vdisks
Ejemplo n.º 13
0
 def _execute_test(cls, amount_vdisks=AMOUNT_VDISKS):
     """
     Executes a offline migration
     :param amount_vdisks: amount of vdisks to test
     :type amount_vdisks: int
     :return:
     """
     cls.LOGGER.info("Starting offline migrate test.")
     vpool = None  # Get a suitable vpool
     for vp in VPoolHelper.get_vpools():
         if len(vp.storagedrivers) >= 2:
             vpool = vp
             break
     assert vpool is not None, "Not enough vPools to test. Requires 1 with at least 2 storagedrivers and found 0."
     ##########################
     # Setup base information #
     ##########################
     # Executor storagedriver_1 is current system
     std_1 = random.choice([st for st in vpool.storagedrivers])
     # Get a random other storagedriver to migrate to
     std_2 = random.choice([st for st in vpool.storagedrivers if st != std_1])
     # Cache to validate properties
     values_to_check = {
         'source_std': std_1.serialize(),
         'target_std': std_2.serialize()
     }
     ###############################
     # start deploying & migrating #
     ###############################
     created_vdisks = []
     try:
         for i in xrange(amount_vdisks):
             ################
             # create vdisk #
             ################
             vdisk_name = "{0}_{1}".format(cls.TEST_NAME, i)
             try:
                 vdisk_guid = VDiskSetup.create_vdisk(vdisk_name=vdisk_name + '.raw',
                                                      vpool_name=vpool.name,
                                                      size=cls.AMOUNT_TO_WRITE * 5,
                                                      storagerouter_ip=std_1.storagerouter.ip)
                 vdisk = VDiskHelper.get_vdisk_by_guid(vdisk_guid)  # Fetch to validate if it was properly created
                 created_vdisks.append(vdisk)
                 values_to_check['vdisk'] = vdisk.serialize()
             except TimeOutError:
                 cls.LOGGER.error("Creation of the vdisk has timed out.")
                 raise
             except (RuntimeError, TimeOutError) as ex:
                 cls.LOGGER.info("Creation of vdisk failed: {0}".format(ex))
                 raise
             else:
                 time.sleep(cls.SLEEP_TIME)
                 try:
                     cls.LOGGER.info("Moving vdisk {0} from {1} to {2}".format(vdisk_guid, std_1.storage_ip, std_2.storage_ip))
                     VDiskSetup.move_vdisk(vdisk_guid=vdisk_guid, target_storagerouter_guid=std_2.storagerouter_guid)
                     time.sleep(cls.SLEEP_TIME)
                     cls.LOGGER.info("Validating move...")
                     cls._validate_move(values_to_check)
                 except Exception as ex:
                     cls.LOGGER.exception('Failed during migation: {0}'.format(ex))
                     raise
     finally:
         for vdisk in created_vdisks:
             VDiskRemover.remove_vdisk(vdisk.guid)
     cls.LOGGER.info("Finished offline migrate test.")
Ejemplo n.º 14
0
    def validate_add_extend_remove_vpool(cls, timeout=ADD_EXTEND_REMOVE_VPOOL_TIMEOUT):
        """
        Validate if we can add, extend and/or remove a vPool, testing the following scenarios:
            * Normal with no accelerated backend
            * Accelerated vPool with hdd_backend & ssd_backend

        INFO:
            * at least 2 storagerouters should be available
            * at least 2 backends should be available with default preset

        :param timeout: specify a timeout
        :type timeout: int
        :return:
        """
        cls.LOGGER.info("Starting to validate add-extend-remove vpool")


        storagerouter_ips = []
        for storagerouter_ip in StoragerouterHelper.get_storagerouter_ips():
            try:
                RoleValidation.check_required_roles(VPoolSetup.REQUIRED_VPOOL_ROLES, storagerouter_ip, "LOCAL")
                storagerouter_ips.append(storagerouter_ip)
                cls.LOGGER.info("Added `{0}` to list of eligible storagerouters".format(storagerouter_ip))
            except RuntimeError as ex:
                cls.LOGGER.warning("Did not add `{0}` to list of eligible "
                                              "storagerouters because: {1}".format(storagerouter_ip, ex))
                pass

        # Filter storagerouters without required roles
        assert len(storagerouter_ips) > 1, "We need at least 2 storagerouters with valid roles: {0}"\
            .format(storagerouter_ips)
        alba_backends = BackendHelper.get_alba_backends()
        assert len(alba_backends) >= 2, "We need at least 2 or more backends!"

        # Global vdisk details
        vdisk_deployment_ip = storagerouter_ips[0]

        # Determine backends (2)
        hdd_backend = alba_backends[0]
        ssd_backend = alba_backends[1]

        # Add preset to all alba_backends (we only use the first two as seen above)
        for alba_backend in alba_backends[0:2]:
            cls.LOGGER.info("Adding custom preset to backend {0}".format(alba_backend.name))
            preset_result = BackendSetup.add_preset(albabackend_name=alba_backend.name,
                                                    preset_details=cls.PRESET,
                                                    timeout=cls.PRESET_CREATE_TIMEOUT)
            assert preset_result is True, 'Failed to add preset to backend {0}'.format(alba_backend.name)
            cls.LOGGER.info("Finished adding custom preset to backend {0}".format(alba_backend.name))

        # Vpool configs, regressing https://github.com/openvstorage/alba/issues/560 & more
        vpool_configs = {
            "no_fragment_cache_on_disk": {
                "strategy": {"cache_on_read": False, "cache_on_write": False},
                "location": "disk"
            },
            "no_fragment_cache_on_accel": {
                "strategy": {"cache_on_read": False, "cache_on_write": False},
                "location": "backend",
                "backend": {
                    "name": ssd_backend.name,
                    "preset": cls.PRESET['name']
                }
            }
        }

        for cfg_name, cfg in vpool_configs.iteritems():
            # Create vpool
            block_cache_cfg = None
            if SystemHelper.get_ovs_version().lower() == 'ee':
                block_cache_cfg = cfg
            for storagerouter_ip in storagerouter_ips:
                cls.LOGGER.info("Add/extend vPool `{0}` on storagerouter `{1}`".format(cls.VPOOL_NAME, storagerouter_ip))
                start = time.time()
                try:
                    cls._add_vpool(vpool_name=cls.VPOOL_NAME, fragment_cache_cfg=cfg,
                                              block_cache_cfg=block_cache_cfg, albabackend_name=hdd_backend.name, timeout=timeout,
                                              preset_name=cls.PRESET['name'], storagerouter_ip=storagerouter_ip)
                except TimeOutError:
                    cls.LOGGER.warning('Adding/extending the vpool has timed out after {0}s. Polling for another {1}s.'
                                                  .format(timeout, cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING - timeout))
                    # Lets be a bit forgiving and give the fwk 5 mins to actually complete the task
                    vpool = VPoolHelper.get_vpool_by_name(cls.VPOOL_NAME)
                    while vpool.status != 'RUNNING':
                        if time.time() - start > cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING:
                            raise RuntimeError('The vpool was not added or extended after {0}s'.format(cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING))
                        cls.LOGGER.warning('Vpool status is still {0} after {1}s.'.format(vpool.status, time.time() - start))
                        time.sleep(1)
                        vpool.discard()
                    cls.LOGGER.warning('The vpool was added or extended after {0}s.'.format(time.time() - start))
                except RuntimeError as ex:
                    cls.LOGGER.error('Adding/extending the vpool has failed with {0}.'.format(str(ex)))
                    raise
                # Check #proxies
                vpool = VPoolHelper.get_vpool_by_name(cls.VPOOL_NAME)
                for storagedriver in vpool.storagedrivers:
                    assert len(storagedriver.alba_proxies) == 2, 'The vpool did not get setup with 2 proxies. Found {} instead.'.format(len(storagedriver.alba_proxies))
            # Deploy a vdisk
            vdisk_name = cls.PREFIX + cfg_name
            cls.LOGGER.info("Starting to create vdisk `{0}` on vPool `{1}` with size `{2}` on node `{3}`"
                                       .format(vdisk_name, cls.VPOOL_NAME, cls.VDISK_SIZE, vdisk_deployment_ip))
            VDiskSetup.create_vdisk(vdisk_name=vdisk_name + '.raw',
                                    vpool_name=cls.VPOOL_NAME,
                                    size=cls.VDISK_SIZE,
                                    storagerouter_ip=vdisk_deployment_ip,
                                    timeout=cls.VDISK_CREATE_TIMEOUT)
            cls.LOGGER.info("Finished creating vdisk `{0}`".format(vdisk_name))
            cls.LOGGER.info("Starting to delete vdisk `{0}`".format(vdisk_name))
            VDiskRemover.remove_vdisk_by_name(vdisk_name, cls.VPOOL_NAME)
            cls.LOGGER.info("Finished deleting vdisk `{0}`".format(vdisk_name))

            # Delete vpool
            for storagerouter_ip in storagerouter_ips:
                storagedrivers_to_delete = len(vpool.storagedrivers)
                cls.LOGGER.info("Deleting vpool `{0}` on storagerouter `{1}`".format(cls.VPOOL_NAME, storagerouter_ip))
                try:
                    VPoolRemover.remove_vpool(vpool_name=cls.VPOOL_NAME, storagerouter_ip=storagerouter_ip, timeout=timeout)
                except TimeOutError:
                    try:
                        vpool.discard()  # Discard is needed to update the vpool status as it was running before
                        while vpool.status != 'RUNNING':
                            cls.LOGGER.warning('Removal/shrinking the vpool has timed out after {0}s. Polling for another {1}s.'
                                                          .format(timeout, cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING - timeout))
                            if time.time() - start > cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING:
                                raise RuntimeError('The vpool was not removed or extended after {0}s'.format(cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING))
                            cls.LOGGER.warning('Vpool status is still {0} after {1}s.'.format(vpool.status, time.time() - start))
                            time.sleep(1)
                            vpool.discard()
                    except ObjectNotFoundException:
                        if storagedrivers_to_delete != 1:  # Should be last one
                            raise
                except RuntimeError as ex:
                    cls.LOGGER.error('Shrinking/removing the vpool has failed with {0}.'.format(str(ex)))
                    raise
            cls.LOGGER.info('Vpool has been fully removed.')
        # Delete presets
        for alba_backend in alba_backends[0:2]:
            cls.LOGGER.info("Removing custom preset from backend {0}".format(alba_backend.name))
            remove_preset_result = BackendRemover.remove_preset(albabackend_name=alba_backend.name,
                                                                preset_name=cls.PRESET['name'],
                                                                timeout=cls.PRESET_REMOVE_TIMEOUT)
            assert remove_preset_result is True, 'Failed to remove preset from backend {0}'.format(alba_backend.name)
            cls.LOGGER.info("Finshed removing custom preset from backend {0}".format(alba_backend.name))

        cls.LOGGER.info("Finished to validate add-extend-remove vpool")
Ejemplo n.º 15
0
    def validate_vdisk_clone(cls,
                             amount_vdisks=AMOUNT_VDISKS,
                             amount_to_write=AMOUNT_TO_WRITE):
        """
        Validate if vdisk deployment works via various ways
        INFO: 1 vPool should be available on 2 storagerouters

        :return:
        """

        cls.LOGGER.info("Starting to regress template memleak vdisks")

        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"

        try:
            vpool = next(
                (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2))
        except StopIteration:
            assert False, "Not enough Storagedrivers to test"

        # setup base information
        storagedriver_source = vpool.storagedrivers[0]
        client = SSHClient(storagedriver_source.storage_ip, username='******')

        # create required vdisk for test
        vdisk_name = VDiskTemplateChecks.PREFIX + '1'
        assert VDiskSetup.create_vdisk(
            vdisk_name=vdisk_name + '.raw',
            vpool_name=vpool.name,
            size=VDiskTemplateChecks.VDISK_SIZE,
            storagerouter_ip=storagedriver_source.storagerouter.ip) is not None
        time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_AFTER_CREATE)

        ##################
        # template vdisk #
        ##################

        VDiskSetup.set_vdisk_as_template(vdisk_name=vdisk_name + '.raw',
                                         vpool_name=vpool.name)
        time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_AFTER_CREATE)

        ######################
        # log current memory #
        ######################

        memory_usage_beginning = StatisticsHelper.get_current_memory_usage(
            storagedriver_source.storage_ip)
        cls.LOGGER.info("Starting memory usage monitor: {0}/{1}".format(
            memory_usage_beginning[0], memory_usage_beginning[1]))
        pid = int(
            client.run(
                "pgrep -a volumedriver | grep {0} | cut -d ' ' -f 1".format(
                    vpool.name),
                allow_insecure=True))
        cls.LOGGER.info(
            "Starting extended memory monitor on pid {0}: \n{1}".format(
                pid,
                StatisticsHelper.get_current_memory_usage_of_process(
                    storagedriver_source.storage_ip, pid)))

        ##################################################################
        # create vdisks from template, perform fio and delete them again #
        ##################################################################

        for vdisk in xrange(amount_vdisks):
            # create vdisk from template
            clone_vdisk_name = vdisk_name + '-template-' + str(vdisk)
            VDiskSetup.create_from_template(
                vdisk_name=vdisk_name + '.raw',
                vpool_name=vpool.name,
                new_vdisk_name=clone_vdisk_name + '.raw',
                storagerouter_ip=storagedriver_source.storagerouter.ip)
            # perform fio test
            client.run([
                "fio", "--name=test",
                "--filename=/mnt/{0}/{1}.raw".format(vpool.name,
                                                     clone_vdisk_name),
                "--ioengine=libaio", "--iodepth=4", "--rw=write", "--bs=4k",
                "--direct=1", "--size={0}M".format(amount_to_write),
                "--output-format=json", "--output={0}.json".format(vdisk_name)
            ])
            # delete vdisk
            time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE)
            VDiskRemover.remove_vdisk_by_name(vdisk_name=clone_vdisk_name,
                                              vpool_name=vpool.name)

        ###################
        # remove template #
        ###################

        time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE)
        VDiskRemover.remove_vtemplate_by_name(vdisk_name=vdisk_name,
                                              vpool_name=vpool.name)

        ######################
        # log current memory #
        ######################

        memory_usage_ending = StatisticsHelper.get_current_memory_usage(
            storagedriver_source.storage_ip)
        cls.LOGGER.info("Finished memory usage monitor: {0}/{1}".format(
            memory_usage_ending[0], memory_usage_ending[1]))
        pid = int(
            client.run(
                "pgrep -a volumedriver | grep {0} | cut -d ' ' -f 1".format(
                    vpool.name),
                allow_insecure=True))
        cls.LOGGER.info(
            "Finished extended memory monitor on pid {0}: \n{1}".format(
                pid,
                StatisticsHelper.get_current_memory_usage_of_process(
                    storagedriver_source.storage_ip, pid)))

        cls.LOGGER.info("Finished to regress template memleak vdisks")
Ejemplo n.º 16
0
    def validate_vdisk_clone(cls):
        """
        Validate if vdisk deployment works via various ways
        INFO: 1 vPool should be available on 2 storagerouters
        :return:
        """
        cls.LOGGER.info("Starting to validate clone vdisks")
        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"
        try:
            vpool = next(
                (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2))
        except StopIteration:
            assert False, "Not enough Storagedrivers to test"
        # Setup base information
        storagedriver_source = vpool.storagedrivers[0]
        storagedriver_destination = vpool.storagedrivers[1]

        vdisks = []
        try:
            # Create required vdisk for test
            original_vdisk_name = '{0}_{1}'.format(cls.PREFIX, str(1).zfill(3))
            cls.LOGGER.info(
                "Creating the vdisk: {0} to clone".format(original_vdisk_name))
            original_vdisk = VDiskHelper.get_vdisk_by_guid(
                VDiskSetup.create_vdisk(
                    vdisk_name=original_vdisk_name,
                    vpool_name=vpool.name,
                    size=cls.VDISK_SIZE,
                    storagerouter_ip=storagedriver_source.storagerouter.ip))
            vdisks.append(original_vdisk)
            time.sleep(cls.CLONE_SLEEP_AFTER_CREATE)
            ###############
            # Clone vdisk #
            ###############
            cloned_vdisk_name = original_vdisk_name + '-clone-nosnapshot'
            cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
                VDiskSetup.create_clone(
                    vdisk_name=original_vdisk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=cloned_vdisk_name,
                    storagerouter_ip=storagedriver_destination.storagerouter.ip
                )['vdisk_guid'])
            vdisks.append(cloned_vdisk)
            time.sleep(cls.CLONE_SLEEP_BEFORE_CHECK)
            ######################################
            # clone vdisk from existing snapshot #
            ######################################
            cloned_vdisk_name = original_vdisk_name + '-clone-snapshot'
            snapshot_id = VDiskSetup.create_snapshot(
                vdisk_name=original_vdisk_name,
                vpool_name=vpool.name,
                snapshot_name=cls.PREFIX + 'snapshot')
            cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
                VDiskSetup.create_clone(
                    vdisk_name=original_vdisk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=cloned_vdisk_name,
                    storagerouter_ip=storagedriver_destination.storagerouter.
                    ip,
                    snapshot_id=snapshot_id)['vdisk_guid'])
            vdisks.append(cloned_vdisk)
        finally:
            VDiskRemover.remove_vdisks_with_structure(vdisks)
        cls.LOGGER.info("Finished validating clone vdisks")
Ejemplo n.º 17
0
    def test_reroute_fio(cls, fio_bin_path, cluster_info, disk_amount=1, timeout=CIConstants.HA_TIMEOUT, is_ee=False, logger=LOGGER):
        """
        Uses a modified fio to work with the openvstorage protocol
        :param fio_bin_path: path of the fio binary
        :type fio_bin_path: str
        :param cluster_info: information about the cluster, contains all dal objects
        :type cluster_info: dict
        :param disk_amount: amount of disks to test fail over with
        :type disk_amount: int
        :param timeout: timeout in seconds
        :type timeout: int
        :param is_ee: is it the enterprise edition
        :type is_ee: bool
        :param logger: logger instance
        :type logger: ovs.log.log_handler.LogHandler
        :return: None
        :rtype: NoneType
        """
        compute_client = SSHClient(cluster_info['storagerouters']['compute'], username='******')

        destination_std = cluster_info['storagedrivers']['destination']
        source_std = cluster_info['storagedrivers']['source']  # will be downed
        vpool = source_std.vpool

        values_to_check = {
            'source_std': source_std.serialize(),
            'target_std': destination_std.serialize(),
            'vdisks': []
        }
        # Create vdisks
        protocol = source_std.cluster_node_config['network_server_uri'].split(':')[0]
        edge_configuration = {'fio_bin_location': fio_bin_path, 'hostname': source_std.storage_ip,
                              'port': source_std.ports['edge'],
                              'protocol': protocol,
                              'volumenames': []}
        vdisk_info = {}
        failed_configurations = []

        if is_ee is True:
            edge_configuration.update(cls.get_shell_user())

        for index in xrange(0, disk_amount):
            try:
                vdisk_name = '{0}_vdisk{1}'.format(EdgeTester.TEST_NAME, str(index).zfill(4))
                data_vdisk = VDiskHelper.get_vdisk_by_guid(VDiskSetup.create_vdisk(vdisk_name, vpool.name, EdgeTester.AMOUNT_TO_WRITE * 2, source_std.storage_ip))
                vdisk_info[vdisk_name] = data_vdisk
                edge_configuration['volumenames'].append(data_vdisk.devicename.rsplit('.', 1)[0].split('/', 1)[1])
                values_to_check['vdisks'].append(data_vdisk.serialize())
            except RuntimeError as ex:
                logger.error('Could not create the vdisk. Got {0}'.format(str(ex)))
                raise
        configuration = random.choice(cls.DATA_TEST_CASES)
        threads = {'evented': {'io': {'pairs': [], 'r_semaphore': None}}}
        screen_names = []
        adjusted = False
        try:
            io_thread_pairs, monitoring_data, io_r_semaphore = ThreadingHandler.start_io_polling_threads(volume_bundle=vdisk_info)
            threads['evented']['io']['pairs'] = io_thread_pairs
            threads['evented']['io']['r_semaphore'] = io_r_semaphore
            screen_names, output_files = DataWriter.write_data_fio(client=compute_client,
                                                                   fio_configuration={'io_size': cls.AMOUNT_TO_WRITE,
                                                                                      'configuration': configuration},
                                                                   edge_configuration=edge_configuration)
            logger.info('Doing IO for {0}s before bringing down the node.'.format(cls.IO_TIME))
            ThreadingHandler.keep_threads_running(r_semaphore=io_r_semaphore,
                                                  threads=io_thread_pairs,
                                                  shared_resource=monitoring_data,
                                                  duration=cls.IO_TIME)
            # Threads ready for monitoring at this point, they are waiting to resume
            EdgeTester.adjust_for_reroute(source_std.storagerouter, trigger_rerout=True, ip_to_block=compute_client.ip, additional_ports=[edge_configuration['port']])
            adjusted = True
            downed_time = time.time()
            logger.info('Now waiting two refreshrate intervals to avoid caching. In total {}s'.format(EdgeTester.IO_REFRESH_RATE * 2))
            time.sleep(cls.IO_REFRESH_RATE * 2)
            ThreadingHandler.poll_io(r_semaphore=io_r_semaphore,
                                     required_thread_amount=len(io_thread_pairs),
                                     shared_resource=monitoring_data,
                                     downed_time=downed_time,
                                     timeout=timeout,
                                     output_files=output_files,
                                     client=compute_client,
                                     disk_amount=disk_amount)
            EdgeTester._validate_dal(values_to_check)  # Validate
        except Exception as ex:
            logger.error('Got an exception while running configuration {0}. Namely: {1}'.format(configuration, str(ex)))
            failed_configurations.append({'configuration': configuration, 'reason': str(ex)})
        finally:
            if adjusted is True:
                EdgeTester.adjust_for_reroute(source_std.storagerouter, trigger_rerout=False, ip_to_block=compute_client.ip, additional_ports=[edge_configuration['port']])
            for screen_name in screen_names:
                compute_client.run(['screen', '-S', screen_name, '-X', 'quit'])
            for thread_category, thread_collection in threads['evented'].iteritems():
                ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
            for vdisk in vdisk_info.values():
                VDiskRemover.remove_vdisk(vdisk.guid)
        assert len(failed_configurations) == 0, 'Certain configuration failed: {0}'.format(failed_configurations)
Ejemplo n.º 18
0
    def test_ha_fio(cls,
                    fio_bin_path,
                    cluster_info,
                    is_ee,
                    disk_amount=1,
                    timeout=CIConstants.HA_TIMEOUT,
                    logger=LOGGER):
        """
        Uses a modified fio to work with the openvstorage protocol
        :param fio_bin_path: path of the fio binary
        :type fio_bin_path: str
        :param cluster_info: information about the cluster, contains all dal objects
        :type cluster_info: dict
        :param is_ee: is it an ee version or not
        :type is_ee: bool
        :param disk_amount: amount of disks to test fail over with
        :type disk_amount: int
        :param timeout: timeout in seconds
        :type timeout: int
        :param logger: logging instance
        :return: None
        :rtype: NoneType
        """
        destination_storagedriver = cluster_info['storagedrivers'][
            'destination']
        source_storagedriver = cluster_info['storagedrivers']['source']
        vpool = destination_storagedriver.vpool

        compute_client = SSHClient(cluster_info['storagerouters']['compute'],
                                   username='******')

        vm_to_stop = cls.HYPERVISOR_INFO['vms'][
            source_storagedriver.storage_ip]['name']
        parent_hypervisor = HypervisorFactory().get()
        values_to_check = {
            'source_std': source_storagedriver.serialize(),
            'target_std': destination_storagedriver.serialize(),
            'vdisks': []
        }
        # Create vdisks
        protocol = source_storagedriver.cluster_node_config[
            'network_server_uri'].split(':')[0]
        edge_configuration = {
            'fio_bin_location': fio_bin_path,
            'hostname': source_storagedriver.storage_ip,
            'port': source_storagedriver.ports['edge'],
            'protocol': protocol,
            'volumenames': []
        }
        if is_ee is True:
            edge_configuration.update(cls.get_shell_user())

        vdisk_info = {}
        failed_configurations = []

        for index in xrange(0, disk_amount):
            try:
                vdisk_name = '{0}_vdisk{1}'.format(cls.TEST_NAME,
                                                   str(index).zfill(3))
                data_vdisk = VDiskHelper.get_vdisk_by_guid(
                    VDiskSetup.create_vdisk(vdisk_name, vpool.name,
                                            cls.AMOUNT_TO_WRITE,
                                            source_storagedriver.storage_ip))
                vdisk_info[vdisk_name] = data_vdisk
                edge_configuration['volumenames'].append(
                    data_vdisk.devicename.rsplit('.', 1)[0].split('/', 1)[1])
                values_to_check['vdisks'].append(data_vdisk.serialize())
            except TimeOutError:
                logger.error('Creating the vdisk has timed out.')
                raise
            except RuntimeError as ex:
                logger.error('Could not create the vdisk. Got {0}'.format(
                    str(ex)))
                raise
        configuration = random.choice(cls.DATA_TEST_CASES)
        threads = {'evented': {'io': {'pairs': [], 'r_semaphore': None}}}
        vm_downed = False
        screen_names = []
        try:
            logger.info(
                'Starting threads.'
            )  # Separate because creating vdisks takes a while, while creating the threads does not

            io_thread_pairs, monitoring_data, io_r_semaphore = ThreadingHandler.start_io_polling_threads(
                volume_bundle=vdisk_info)
            threads['evented']['io']['pairs'] = io_thread_pairs
            threads['evented']['io']['r_semaphore'] = io_r_semaphore
            screen_names, output_files = DataWriter.write_data_fio(
                client=compute_client,
                fio_configuration={
                    'io_size': cls.AMOUNT_TO_WRITE,
                    'configuration': configuration
                },
                edge_configuration=edge_configuration)
            logger.info(
                'Doing IO for {0}s before bringing down the node.'.format(
                    cls.IO_TIME))
            ThreadingHandler.keep_threads_running(
                r_semaphore=io_r_semaphore,
                threads=io_thread_pairs,
                shared_resource=monitoring_data,
                duration=cls.IO_TIME)
            # Threads ready for monitoring at this point
            #########################
            # Bringing original owner of the volume down
            #########################
            try:
                logger.info('Stopping {0}.'.format(vm_to_stop))
                VMHandler.stop_vm(hypervisor=parent_hypervisor,
                                  vmid=vm_to_stop)
                downed_time = time.time()
                vm_downed = True
            except Exception as ex:
                logger.error('Failed to stop. Got {0}'.format(str(ex)))
                raise
            time.sleep(cls.IO_REFRESH_RATE * 2)
            # Start IO polling to verify nothing went down
            ThreadingHandler.poll_io(
                r_semaphore=io_r_semaphore,
                required_thread_amount=len(io_thread_pairs),
                shared_resource=monitoring_data,
                downed_time=downed_time,
                timeout=timeout,
                output_files=output_files,
                client=compute_client,
                disk_amount=disk_amount)
            cls._validate(values_to_check, monitoring_data)
        except Exception as ex:
            failed_configurations.append({
                'configuration': configuration,
                'reason': str(ex)
            })
        finally:
            for thread_category, thread_collection in threads[
                    'evented'].iteritems():
                ThreadHelper.stop_evented_threads(
                    thread_collection['pairs'],
                    thread_collection['r_semaphore'])
            if vm_downed is True:
                VMHandler.start_vm(parent_hypervisor, vm_to_stop)
                SystemHelper.idle_till_ovs_is_up(
                    source_storagedriver.storage_ip, **cls.get_shell_user())
                # @TODO: Remove when https://github.com/openvstorage/integrationtests/issues/540 is fixed
                FwkHandler.restart_all()
            if screen_names:
                for screen_name in screen_names:
                    compute_client.run(
                        ['screen', '-S', screen_name, '-X', 'quit'])
            for vdisk in vdisk_info.values():
                VDiskRemover.remove_vdisk(vdisk.guid)
        assert len(failed_configurations
                   ) == 0, 'Certain configuration failed: {0}'.format(
                       ' '.join(failed_configurations))
Ejemplo n.º 19
0
 def validate_vdisk_clone(cls):
     """
     Validate if vdisk deployment works via various ways
     INFO: 1 vPool should be available on 2 storagerouters
     :return:
     """
     cls.LOGGER.info("Starting to validate template vdisks")
     vpools = VPoolHelper.get_vpools()
     assert len(vpools) >= 1, "Not enough vPools to test"
     try:
         vpool = next(
             (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2))
     except StopIteration:
         assert False, "Not enough Storagedrivers to test"
     # setup base information
     storagedriver_source = vpool.storagedrivers[0]
     vdisks = []
     try:
         # create required vdisk for test
         parent_vdisk_name = '{0}_{1}'.format(cls.PREFIX, str(1).zfill(3))
         parent_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_vdisk(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 size=cls.VDISK_SIZE,
                 storagerouter_ip=storagedriver_source.storagerouter.ip))
         vdisks.append(parent_vdisk)
         time.sleep(cls.TEMPLATE_SLEEP_AFTER_CREATE)
         # Create vdisk template  #
         VDiskSetup.set_vdisk_as_template(vdisk_name=parent_vdisk_name,
                                          vpool_name=vpool.name)
         time.sleep(cls.TEMPLATE_SLEEP_AFTER_CREATE)
         clone_vdisk_name = '{0}_from-template'.format(parent_vdisk_name)
         clone_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_from_template(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 new_vdisk_name=clone_vdisk_name,
                 storagerouter_ip=storagedriver_source.storagerouter.ip)
             ['vdisk_guid'])
         vdisks.append(clone_vdisk)
         time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE)
         try:
             # try to delete template with clones (should fail) #
             VDiskRemover.remove_vtemplate_by_name(
                 vdisk_name=parent_vdisk_name, vpool_name=vpool.name)
             error_msg = "Removing vtemplate `{0}` should have failed!"
             cls.LOGGER.error(error_msg)
             raise RuntimeError(error_msg)
         except HttpException:
             cls.LOGGER.info(
                 "Removing vtemplate `{0}` has failed as expected (because of leftover clones)!"
                 .format(parent_vdisk_name))
     finally:
         while len(vdisks) > 0:
             vdisk = vdisks.pop()
             VDiskRemover.remove_vdisk(vdisk.guid)
     try:
         # template vdisk from clone (should fail) #
         parent_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_vdisk(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 size=cls.VDISK_SIZE,
                 storagerouter_ip=storagedriver_source.storagerouter.ip))
         vdisks.append(parent_vdisk)
         # create a clone from the vdisk
         clone_vdisk_name = '{0}_clone'.format(parent_vdisk_name)
         cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_clone(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 new_vdisk_name=clone_vdisk_name,
                 storagerouter_ip=storagedriver_source.storagerouter.ip)
             ['vdisk_guid'])
         vdisks.append(cloned_vdisk)
         # try to create a vTemplate from a clone
         try:
             VDiskSetup.set_vdisk_as_template(vdisk_name=clone_vdisk_name,
                                              vpool_name=vpool.name)
             error_msg = "Setting vdisk `{0}` as template should have failed!".format(
                 clone_vdisk_name)
             cls.LOGGER.error(error_msg)
             raise RuntimeError(error_msg)
         except RuntimeError:
             cls.LOGGER.info(
                 "Setting vdisk `{0}` as template failed as expected (because vdisk is clone)!"
                 .format(clone_vdisk_name))
     finally:
         parent_vdisks = []
         while len(vdisks) > 0:  # Remove clones first
             vdisk = vdisks.pop()
             if vdisk.parent_vdisk_guid is None:
                 parent_vdisks.append(vdisk)
                 continue
             VDiskRemover.remove_vdisk(vdisk.guid)
         for parent_vdisk in parent_vdisks:
             VDiskRemover.remove_vdisk(parent_vdisk.guid)
     cls.LOGGER.info("Finished to validate template vdisks")
Ejemplo n.º 20
0
    def validate_vdisk_deployment(cls):
        """
        Validate if vdisk deployment works via various ways
        INFO: 1 vPool should be available on 1 storagerouter
        :return:
        """
        cls.LOGGER.info("Starting to validate the vdisk deployment")

        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"

        vpool = vpools[0]  # just pick the first vpool you find
        assert len(
            vpool.storagedrivers) >= 1, "Not enough Storagedrivers to test"

        # setup base information
        storagedriver = vpool.storagedrivers[0]
        protocol = storagedriver.cluster_node_config[
            'network_server_uri'].split(':')[0]
        storage_ip = storagedriver.storage_ip
        edge_port = storagedriver.ports['edge']
        client = SSHClient(storagedriver.storage_ip, username='******')
        # =======
        # VIA API
        # =======
        for size in cls.VDISK_SIZES:
            api_disk_name = cls.PREFIX + str(size) + '-api'
            cls.LOGGER.info(
                "Starting to create vdisk `{0}` on vPool `{1}` with size `{2}` "
                "on node `{3}`".format(api_disk_name, vpool.name, size,
                                       storagedriver.storagerouter.ip))
            VDiskSetup.create_vdisk(
                vdisk_name=api_disk_name + '.raw',
                vpool_name=vpool.name,
                size=size,
                storagerouter_ip=storagedriver.storagerouter.ip,
                timeout=cls.VDISK_CREATE_TIMEOUT)
            cls.LOGGER.info(
                "Finished creating vdisk `{0}`".format(api_disk_name))
            cls._check_vdisk(vdisk_name=api_disk_name, vpool_name=vpool.name)
            cls.LOGGER.info(
                "Starting to delete vdisk `{0}`".format(api_disk_name))
            VDiskRemover.remove_vdisk_by_name(api_disk_name, vpool.name)
            cls.LOGGER.info(
                "Finished deleting vdisk `{0}`".format(api_disk_name))

        # ========
        # VIA QEMU
        # ========
        for size in cls.VDISK_SIZES:
            qemu_disk_name = cls.PREFIX + str(size) + '-qemu'
            edge_info = {
                'port': edge_port,
                'protocol': protocol,
                'ip': storage_ip,
            }
            if SystemHelper.get_ovs_version(
                    storagedriver.storagerouter) == 'ee':
                edge_info.update(cls.get_shell_user())
            VMHandler.create_image(client, qemu_disk_name, size, edge_info)
            cls.LOGGER.info(
                "Finished creating vdisk `{0}`".format(qemu_disk_name))
            cls._check_vdisk(vdisk_name=qemu_disk_name, vpool_name=vpool.name)
            cls.LOGGER.info(
                "Starting to delete vdisk `{0}`".format(qemu_disk_name))
            VDiskRemover.remove_vdisk_by_name(qemu_disk_name, vpool.name)
            cls.LOGGER.info(
                "Finished deleting vdisk `{0}`".format(qemu_disk_name))

        # ============
        # VIA TRUNCATE
        # ============
        for size in cls.VDISK_SIZES:
            truncate_disk_name = cls.PREFIX + str(size) + '-trunc'
            cls.LOGGER.info(
                "Starting to create vdisk `{0}` on vPool `{1}` on node `{2}` "
                "with size `{3}`".format(truncate_disk_name, vpool.name,
                                         storagedriver.storage_ip, size))
            client.run([
                "truncate", "-s",
                str(size), "/mnt/{0}/{1}.raw".format(vpool.name,
                                                     truncate_disk_name)
            ])
            cls.LOGGER.info(
                "Finished creating vdisk `{0}`".format(truncate_disk_name))
            cls._check_vdisk(vdisk_name=truncate_disk_name,
                             vpool_name=vpool.name)
            cls.LOGGER.info(
                "Starting to delete vdisk `{0}`".format(truncate_disk_name))
            VDiskRemover.remove_vdisk_by_name(truncate_disk_name, vpool.name)
            cls.LOGGER.info(
                "Finished deleting vdisk `{0}`".format(truncate_disk_name))
        cls.LOGGER.info("Finished to validate the vdisk deployment")
Ejemplo n.º 21
0
    def _execute_test(cls):
        """
        Validate if DTL is configured as desired
        REQUIREMENTS:
        * 1 vPool should be available with 1 storagedriver
        * 1 vPool should be available with 2 or more storagedrivers in 2 separate domains
        OPTIONAL:
        * 1 vPool with 1 storagedriver with disabled DTL
        :return:
        """
        cls.LOGGER.info("Starting to validate the basic DTL")
        ##########################
        # get deployment details #
        ##########################
        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"

        # Get a suitable vpools
        vpool_single_sd = None
        vpool_multi_sd = None
        vpool_dtl_disabled = None
        for vp in VPoolHelper.get_vpools():
            if vp.configuration['dtl_mode'] != VPoolHelper.DtlStatus.DISABLED:
                if len(vp.storagedrivers) == 1 and vpool_single_sd is None:
                    vpool_single_sd = vp
                    cls.LOGGER.info(
                        "vPool `{0}` has been chosen for SINGLE vPool DTL tests"
                        .format(vp.name))
                elif len(vp.storagedrivers) >= 2 and vpool_multi_sd is None:
                    vpool_multi_sd = vp
                    cls.LOGGER.info(
                        "vPool `{0}` has been chosen for MULTI vPool DTL tests"
                        .format(vp.name))
                else:
                    cls.LOGGER.info(
                        "vPool `{0}` is not suited for tests".format(vp.name))
            else:
                cls.LOGGER.info(
                    "vPool `{0}` with DISABLED DTL is available and will be tested!"
                    .format(vp.name))
                vpool_dtl_disabled = vp

        assert vpool_single_sd is not None, "A vPool should be available with 1 storagedriver"
        assert vpool_multi_sd is not None, "A vPool should be available with 2 or more storagedrivers"

        # pick a random storagedriver
        storagedriver_single = vpool_single_sd.storagedrivers[0]
        storagedriver_multi = random.choice(vpool_multi_sd.storagedrivers)
        storagedrivers = [storagedriver_single, storagedriver_multi]

        # check disabled DTL
        storagedriver_disabled_dtl = None
        if vpool_dtl_disabled is not None:
            storagedriver_disabled_dtl = random.choice(
                vpool_dtl_disabled.storagedrivers)
            storagedrivers.append(storagedriver_disabled_dtl)

        # key = amount of storagedrivers or a_s
        # value = list with the vpool & storagedriver to test
        vpools_to_test = {
            1: [{
                "vpool": vpool_single_sd,
                "storagedriver": storagedriver_single
            }],
            2: [{
                "vpool": vpool_multi_sd,
                "storagedriver": storagedriver_multi
            }]
        }

        # check if disabled DTL vpool needs to be added
        if vpool_dtl_disabled is not None:
            a_s = len(vpool_dtl_disabled.storagedrivers)
            v_s = {
                "vpool": vpool_dtl_disabled,
                "storagedriver": storagedriver_disabled_dtl
            }
            if a_s in vpools_to_test:
                vpools_to_test[a_s].append(v_s)
            else:
                vpools_to_test[a_s] = [v_s]

        ##############
        # start test #
        ##############

        for a_s, vpools in vpools_to_test.iteritems():
            start = time.time()
            for vpool in vpools:

                cls.LOGGER.info(
                    "Starting DTL test with vPool {0} and {1} storagedrivers".
                    format(vpool['vpool'].name,
                           len(vpool['vpool'].storagedrivers)))
                vdisk_name = "{0}-{1}-{2}".format(
                    cls.VDISK_NAME, vpool['vpool'].name,
                    str(len(vpool['vpool'].storagedrivers)))
                try:
                    vdisk_guid = VDiskSetup.create_vdisk(
                        vdisk_name=vdisk_name + '.raw',
                        vpool_name=vpool['vpool'].name,
                        size=cls.SIZE_VDISK,
                        storagerouter_ip=vpool['storagedriver'].storagerouter.
                        ip)
                    # Fetch to validate if it was properly created
                    vdisk = VDiskHelper.get_vdisk_by_guid(
                        vdisk_guid=vdisk_guid)
                except TimeOutError:
                    cls.LOGGER.error("Creation of the vDisk has timed out.")
                    raise
                except RuntimeError as ex:
                    cls.LOGGER.info("Creation of vDisk failed: {0}".format(ex))
                    raise
                else:
                    #####################################
                    # check DTL status after deployment #
                    #####################################

                    correct_msg = "vDisk {0} with {1} storagedriver(s) has correct DTL status: ".format(
                        vdisk_name, a_s)
                    if a_s == 1 and vdisk.dtl_status == VDiskHelper.DtlStatus.STANDALONE:
                        cls.LOGGER.info(correct_msg + vdisk.dtl_status)
                    elif a_s >= 2 and vdisk.dtl_status == VDiskHelper.DtlStatus.SYNC:
                        cls.LOGGER.info(correct_msg + vdisk.dtl_status)
                    elif vdisk.dtl_status == VDiskHelper.DtlStatus.DISABLED and vpool[
                            'vpool'].configuration[
                                'dtl_mode'] == VPoolHelper.DtlStatus.DISABLED:
                        cls.LOGGER.info(
                            correct_msg +
                            " Note: vdisk DTL is disabled but vPool DTL is also disabled!"
                        )
                    else:
                        error_msg = "vDisk {0} with {1} storagedriver(s) has WRONG DTL status: {2}".format(
                            vdisk_name, a_s, vdisk.dtl_status)
                        cls.LOGGER.error(error_msg)
                        raise RuntimeError(error_msg)

                    ################################
                    # try to change the DTL config #
                    ################################

                    base_config = {
                        "sco_size": 4,
                        "dtl_mode": VPoolHelper.DtlStatus.SYNC,
                        "write_buffer": 512
                    }
                    if a_s == 1:
                        ########################################################################################
                        # change config to domain with non existing storagedrivers of this vpool (should fail) #
                        ########################################################################################
                        cls.LOGGER.info(
                            "Starting test: change config to domain with non existing storagedrivers "
                            "of this vpool (should fail)")
                        base_config['dtl_target'] = [
                            random.choice([
                                domain_guid for domain_guid in
                                DomainHelper.get_domain_guids()
                                if domain_guid not in vpool['storagedriver'].
                                storagerouter.regular_domains
                            ])
                        ]
                        cls.LOGGER.info("Changing dtl_target to: {0}".format(
                            DomainHelper.get_domain_by_guid(
                                domain_guid=base_config['dtl_target']
                                [0]).name))
                        try:
                            cls.LOGGER.info(base_config)
                            VDiskSetup.set_config_params(
                                vdisk_name=vdisk_name + '.raw',
                                vpool_name=vpool['vpool'].name,
                                config=base_config)
                            error_msg = "Changing config to a domain with non existing storagedrivers should have failed with vdisk: {0}!".format(
                                vdisk_name)
                            cls.LOGGER.error(error_msg)
                            raise Exception(error_msg)
                        except TimeOutError:
                            cls.LOGGER.error(
                                "Changing config to a same domain with only 1 storagedriver has timed out."
                            )
                            raise
                        except RuntimeError:
                            cls.LOGGER.info(
                                "Changing config to a domain with non existing storagedrivers has failed as expected!"
                            )

                        ##############################################################################################
                        # change config to domain where there are other storagedrivers but not of ours (should fail) #
                        ##############################################################################################
                        cls.LOGGER.info(
                            "Starting test: change config to domain where there are other storagedrivers but not of ours (should fail)"
                        )

                        filtered_domains = list(
                            set(DomainHelper.get_domain_guids()) -
                            set(vpool['storagedriver'].storagerouter.
                                regular_domains))
                        base_config['dtl_target'] = [filtered_domains[0]]
                        cls.LOGGER.info(
                            "Current vdisk domain location: {0}".format(
                                DomainHelper.get_domain_by_guid(
                                    domain_guid=vpool['storagedriver'].
                                    storagerouter.regular_domains[0]).name))
                        cls.LOGGER.info("Changing dtl_target to: {0}".format(
                            DomainHelper.get_domain_by_guid(
                                domain_guid=base_config['dtl_target']
                                [0]).name))
                        try:
                            VDiskSetup.set_config_params(
                                vdisk_name=vdisk_name + '.raw',
                                vpool_name=vpool['vpool'].name,
                                config=base_config)
                            error_msg = "Changing config to a same domain with only 1 storagedriver should have failed with vdisk: {0}!".format(
                                vdisk_name)
                            cls.LOGGER.error(error_msg)
                            raise Exception(error_msg)
                        except TimeOutError:
                            cls.LOGGER.error(
                                "Changing config to a domain with non existing storagedrivers has timed out."
                            )
                            raise
                        except RuntimeError:
                            cls.LOGGER.info(
                                "Changing config to a same domain with only 1 storagedriver has failed as expected!"
                            )
                    elif a_s >= 2:
                        #######################################################################
                        # change config to domain with active storagedrivers (should succeed) #
                        #######################################################################
                        cls.LOGGER.info(
                            "Starting test: change config to domain with active storagedrivers (should succeed)"
                        )

                        # change current target domain to other target domain
                        current_vdisk_domains = StoragedriverHelper.get_storagedriver_by_id(
                            storagedriver_id=vdisk.storagedriver_id
                        ).storagerouter.regular_domains
                        cls.LOGGER.info(
                            "Currently the vdisk is living in: {0}".format(
                                current_vdisk_domains))
                        vpool_domains = VPoolHelper.get_domains_by_vpool(
                            vpool_name=vdisk.vpool.name)
                        cls.LOGGER.info(
                            "Currently the vpool {0} is available in: {1}".
                            format(vdisk.vpool.name, vpool_domains))
                        future_domains = list(
                            set(vpool_domains) - set(current_vdisk_domains))
                        cls.LOGGER.info(
                            "DTL will be moved to other domain: {0}".format(
                                future_domains))
                        base_config['dtl_target'] = future_domains

                        # change settings
                        try:
                            VDiskSetup.set_config_params(
                                vdisk_name=vdisk_name + '.raw',
                                vpool_name=vpool['vpool'].name,
                                config=base_config)
                        except TimeOutError:
                            cls.LOGGER.error(
                                "Changing config to a same domain with only 1 storagedriver has timed out."
                            )
                            raise
                        except RuntimeError:
                            cls.LOGGER.error(
                                "Changing config to a same domain with only 1 storagedriver was unsuccesful!"
                            )
                            raise
                        cls.LOGGER.info(
                            "Changing config to a same domain with only 1 storagedriver was successful!"
                        )

                    cls.LOGGER.info("Removing vDisk {0}".format(vdisk.name))
                    VDiskRemover.remove_vdisk(vdisk_guid=vdisk.guid)
                    cls.LOGGER.info("Finished removing vDisk {0}".format(
                        vdisk.name))

            end = time.time()

            # display run time
            cls.LOGGER.info("Run testing the DTL took {0} seconds".format(
                int(end - start)))

        cls.LOGGER.info("Finished to validate the basic DTL")