Example #1
0
 def run_test_fuse(cls,
                   storagedriver,
                   disk_amount,
                   write_amount,
                   logger=LOGGER):
     """
     Deploy and run a small io test using the FUSE interface
     :param storagedriver: chosen storagedriver for testing
     :param disk_amount: amount of disks to deploy and write/read to
     :param write_amount: amount of data to parse for writing/reading
     :param logger: logging instance
     :return: 
     """
     vpool = storagedriver.vpool
     client = SSHClient(storagedriver.storagerouter, username='******')
     vdisk_info = {}
     try:
         for vdisk_number in xrange(disk_amount):
             vdisk_name = '{0}{1}-fuse'.format(cls.PREFIX, vdisk_number)
             disk_location = "/mnt/{0}/{1}.raw".format(
                 vpool.name, vdisk_name)
             logger.info("Truncating vdisk {0} on {1}:{2}".format(
                 vdisk_name, storagedriver.storage_ip, vpool.name))
             client.run(
                 ["truncate", "-s",
                  str(cls.VDISK_SIZE), disk_location])
             vdisk = cls._get_vdisk('{0}.raw'.format(vdisk_name),
                                    vpool.name)
             vdisk_info[disk_location] = vdisk
         fio_configuration = {
             'io_size': write_amount,
             'configuration': (0, 100)
         }
         DataWriter.write_data_fio(client,
                                   fio_configuration,
                                   file_locations=vdisk_info.keys(),
                                   screen=False,
                                   loop_screen=False)
         fio_configuration = {
             'io_size': write_amount,
             'configuration': (100, 0)
         }
         DataWriter.write_data_fio(client,
                                   fio_configuration,
                                   file_locations=vdisk_info.keys(),
                                   screen=False,
                                   loop_screen=False)
     except Exception as ex:
         logger.error(
             'An exception occur while testing edge+blktap: {0}'.format(
                 str(ex)))
         raise
     finally:
         for vdisk in vdisk_info.values():
             VDiskRemover.remove_vdisk_by_name(vdisk.devicename,
                                               vdisk.vpool.name)
Example #2
0
    def delete_snapshot(self, snapshot):
        """
        Deletes a snapshot.
        """
        volume_name = self.VOLUME_PREFIX + snapshot['volume_id']
        api = self._setup_ovs_client()
        VDiskRemover.remove_snapshot(snapshot_name=snapshot['name'],
                                     vpool_guid=self.vpool_guid,
                                     api=api)

        LOG.debug('libovsvolumedriver.ovs_snapshot_remove: {0} {1}'.format(
            volume_name, snapshot['name']))
Example #3
0
    def delete_volume(self, volume):
        """
        Deletes a logical volume.
        Called by "cinder delete ... "

        :param volume: volume reference (sqlalchemy Model)
        """
        volume_name = self.VOLUME_PREFIX + str(volume.id)
        api = self._setup_ovs_client()
        VDiskRemover.remove_vdisk(vdisk_name=volume_name,
                                  vpool_guid=self.vpool_guid,
                                  api=api)
        LOG.debug('libovsvolumedriver.delete_volume: {0}'.format(volume.id))
Example #4
0
    def _delete_remaining_vdisks(cls, base_vdisks):
        """
        Delete remaining base vdisks (when performing cloned=True)
        :param base_vdisks: vdisk_guids of a base_vdisks ['a15908c0-f7f0-402e-ad20-2be97e401cd3', ...]
        :type: list
        :return: None
        """

        for vdisk_guid in base_vdisks:
            cls.LOGGER.info(
                "Starting to remove base vDisk `{0}`".format(vdisk_guid))
            VDiskRemover.remove_vdisk(vdisk_guid)
            cls.LOGGER.info(
                "Finished to remove base vDisk `{0}`".format(vdisk_guid))
Example #5
0
 def start_test(cls):
     storagedriver, fio_bin_loc, is_ee = cls.setup()
     for test_run_type in cls.TYPE_TEST_RUN:
         cloned = test_run_type == 'clones'
         created_vdisks = cls.create_vdisks(storagedriver, cloned=cloned)
         try:
             if cloned is True:
                 vdisks = created_vdisks['clones']
             else:
                 vdisks = created_vdisks['parents']
             stored_map = cls._prepare_for_scrubbing(
                 vdisks, storagedriver, fio_bin_loc, is_ee)
             cls._validate_scrubbing(stored_map)
         finally:
             for vdisk_type, vdisk_list in created_vdisks.iteritems():
                 VDiskRemover.remove_vdisks_with_structure(vdisk_list)
Example #6
0
 def _cleanup_vdisk(cls, vdisk_name, vpool_name, fail=True):
     """
     Attempt to cleanup vdisk
     :param vdisk_name: name of the vdisk
     :param vpool_name: name of the vpool
     :param fail: boolean to determine whether errors should raise or not
     :return:
     """
     # Cleanup vdisk using the controller
     try:
         VDiskRemover.remove_vdisk_by_name(vdisk_name, vpool_name)
     except Exception as ex:
         cls.LOGGER.error(str(ex))
         if fail is True:
             raise
         else:
             pass
Example #7
0
 def _delete_snapshots(cls, volume_bundle, amount_to_delete=3, logger=LOGGER):
     """
     Delete a random number of snapshots
     :return: None
     :rtype: NoneType
     """
     for index, (vdisk_name, vdisk_object) in enumerate(volume_bundle.iteritems(), 1):
         snapshot_list = vdisk_object.snapshots
         if len(snapshot_list) < 3:
             raise RuntimeError('Need at least 3 snapshots to be able to leave the first and last snapshots.')
         snapshots_allowed_to_remove = snapshot_list[1:-1]  # Do not remove first or last
         while amount_to_delete > 0:
             if len(snapshots_allowed_to_remove) == 0:
                 logger.warning('No snapshots left to remove. Needed to remove at least {} more.'.format(amount_to_delete))
                 break
             snapshot = snapshots_allowed_to_remove.pop(random.randrange(0, len(snapshots_allowed_to_remove)))
             logger.debug('Removing snapshot with guid {0}'.format(snapshot['guid']))
             VDiskRemover.remove_snapshot(snapshot['guid'], vdisk_object.name, vdisk_object.vpool.name)
             amount_to_delete -= 1
Example #8
0
    def validate_vdisk_clone(cls):
        """
        Validate if vdisk deployment works via various ways
        INFO: 1 vPool should be available on 2 storagerouters
        :return:
        """
        cls.LOGGER.info("Starting to validate clone vdisks")
        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"
        try:
            vpool = next(
                (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2))
        except StopIteration:
            assert False, "Not enough Storagedrivers to test"
        # Setup base information
        storagedriver_source = vpool.storagedrivers[0]
        storagedriver_destination = vpool.storagedrivers[1]

        vdisks = []
        try:
            # Create required vdisk for test
            original_vdisk_name = '{0}_{1}'.format(cls.PREFIX, str(1).zfill(3))
            cls.LOGGER.info(
                "Creating the vdisk: {0} to clone".format(original_vdisk_name))
            original_vdisk = VDiskHelper.get_vdisk_by_guid(
                VDiskSetup.create_vdisk(
                    vdisk_name=original_vdisk_name,
                    vpool_name=vpool.name,
                    size=cls.VDISK_SIZE,
                    storagerouter_ip=storagedriver_source.storagerouter.ip))
            vdisks.append(original_vdisk)
            time.sleep(cls.CLONE_SLEEP_AFTER_CREATE)
            ###############
            # Clone vdisk #
            ###############
            cloned_vdisk_name = original_vdisk_name + '-clone-nosnapshot'
            cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
                VDiskSetup.create_clone(
                    vdisk_name=original_vdisk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=cloned_vdisk_name,
                    storagerouter_ip=storagedriver_destination.storagerouter.ip
                )['vdisk_guid'])
            vdisks.append(cloned_vdisk)
            time.sleep(cls.CLONE_SLEEP_BEFORE_CHECK)
            ######################################
            # clone vdisk from existing snapshot #
            ######################################
            cloned_vdisk_name = original_vdisk_name + '-clone-snapshot'
            snapshot_id = VDiskSetup.create_snapshot(
                vdisk_name=original_vdisk_name,
                vpool_name=vpool.name,
                snapshot_name=cls.PREFIX + 'snapshot')
            cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
                VDiskSetup.create_clone(
                    vdisk_name=original_vdisk_name,
                    vpool_name=vpool.name,
                    new_vdisk_name=cloned_vdisk_name,
                    storagerouter_ip=storagedriver_destination.storagerouter.
                    ip,
                    snapshot_id=snapshot_id)['vdisk_guid'])
            vdisks.append(cloned_vdisk)
        finally:
            VDiskRemover.remove_vdisks_with_structure(vdisks)
        cls.LOGGER.info("Finished validating clone vdisks")
 def destroy_vms(self, vm_info):
     for vm_name, vm_object in vm_info.iteritems():
         self.hypervisor_client.sdk.destroy(vm_name)
         VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'])
         self.hypervisor_client.sdk.undefine(vm_name)
Example #10
0
    def _rollback_vdisks(cls,
                         stored_vdisks,
                         vpool,
                         amount_checks=MAX_ROLLBACK_CHECKS,
                         timeout=ROLLBACK_TIMEOUT):
        """
        Rollback the given mapped vdisks

        :param stored_vdisks: dict with stored vdisks, snapshot, location, ...
        :type stored_vdisks: dict
        :param vpool: a valid vpool object
        :type vpool: ovs.model.hybrids.vpool
        :param amount_checks: amount of checks to perform after a vdisk has been rolled back
        :type amount_checks: int
        :param timeout: timeout between checks
        :type timeout: int
        :return: None
        """

        for stored_vdisk in stored_vdisks:
            # fetch vdisk
            vdisk = VDiskHelper.get_vdisk_by_guid(
                vdisk_guid=stored_vdisk['vdisk_guid'])

            # Commencing rollback
            cls.LOGGER.info(
                "Starting rollback on vdisk `{0}` to first snapshot `{1}`".
                format(vdisk.name, stored_vdisk['snapshots'][0]))

            VDiskSetup.rollback_to_snapshot(
                vdisk_name=vdisk.name + '.raw',
                vpool_name=vpool.name,
                snapshot_id=stored_vdisk['snapshots'][0]['snapshot_guid'])

            # Start checking when disk is rollback'ed
            tries = 0
            while tries < amount_checks:
                current_statistics = vdisk.storagedriver_client.info_volume(
                    str(vdisk.volume_id)).stored
                if current_statistics < stored_vdisk['snapshots'][1][
                        'stored_data']:
                    cls.LOGGER.info(
                        "VDisk `{0}` matched the requirements for rollback with {1} < {2}"
                        .format(stored_vdisk['vdisk_guid'], current_statistics,
                                stored_vdisk['snapshots'][1]['stored_data']))
                    break
                else:
                    tries += 1
                    cls.LOGGER.warning(
                        "Try `{0}` when checking stored data on volumedriver for VDisk "
                        "`{1}`, with currently `{2}` but it should be less than `{3}`. "
                        "Now sleeping for `{4}` seconds ...".format(
                            tries, stored_vdisk['vdisk_guid'],
                            current_statistics,
                            stored_vdisk['snapshots'][1]['stored_data'],
                            timeout))
                    time.sleep(timeout)

            # check if amount of tries has exceeded
            if tries == amount_checks:
                error_msg = "VDisk `{0}` should have been rollback'ed but max. amount of checks have exceeded!"\
                            .format(vdisk.name)
                cls.LOGGER.error(error_msg)
                raise RuntimeError(error_msg)
            else:
                cls.LOGGER.info(
                    "Successfully finished rollback'ing on vdisk `{0}`".format(
                        vdisk.name))

                # commencing deleting volumes
                cls.LOGGER.info("Starting to remove VDisk `{0}`".format(
                    vdisk.name))
            VDiskRemover.remove_vdisk(stored_vdisk['vdisk_guid'])
            cls.LOGGER.info("Finished removing VDisk `{0}`".format(vdisk.name))
Example #11
0
    def test_reroute_fio(cls, fio_bin_path, cluster_info, disk_amount=1, timeout=CIConstants.HA_TIMEOUT, is_ee=False, logger=LOGGER):
        """
        Uses a modified fio to work with the openvstorage protocol
        :param fio_bin_path: path of the fio binary
        :type fio_bin_path: str
        :param cluster_info: information about the cluster, contains all dal objects
        :type cluster_info: dict
        :param disk_amount: amount of disks to test fail over with
        :type disk_amount: int
        :param timeout: timeout in seconds
        :type timeout: int
        :param is_ee: is it the enterprise edition
        :type is_ee: bool
        :param logger: logger instance
        :type logger: ovs.log.log_handler.LogHandler
        :return: None
        :rtype: NoneType
        """
        compute_client = SSHClient(cluster_info['storagerouters']['compute'], username='******')

        destination_std = cluster_info['storagedrivers']['destination']
        source_std = cluster_info['storagedrivers']['source']  # will be downed
        vpool = source_std.vpool

        values_to_check = {
            'source_std': source_std.serialize(),
            'target_std': destination_std.serialize(),
            'vdisks': []
        }
        # Create vdisks
        protocol = source_std.cluster_node_config['network_server_uri'].split(':')[0]
        edge_configuration = {'fio_bin_location': fio_bin_path, 'hostname': source_std.storage_ip,
                              'port': source_std.ports['edge'],
                              'protocol': protocol,
                              'volumenames': []}
        vdisk_info = {}
        failed_configurations = []

        if is_ee is True:
            edge_configuration.update(cls.get_shell_user())

        for index in xrange(0, disk_amount):
            try:
                vdisk_name = '{0}_vdisk{1}'.format(EdgeTester.TEST_NAME, str(index).zfill(4))
                data_vdisk = VDiskHelper.get_vdisk_by_guid(VDiskSetup.create_vdisk(vdisk_name, vpool.name, EdgeTester.AMOUNT_TO_WRITE * 2, source_std.storage_ip))
                vdisk_info[vdisk_name] = data_vdisk
                edge_configuration['volumenames'].append(data_vdisk.devicename.rsplit('.', 1)[0].split('/', 1)[1])
                values_to_check['vdisks'].append(data_vdisk.serialize())
            except RuntimeError as ex:
                logger.error('Could not create the vdisk. Got {0}'.format(str(ex)))
                raise
        configuration = random.choice(cls.DATA_TEST_CASES)
        threads = {'evented': {'io': {'pairs': [], 'r_semaphore': None}}}
        screen_names = []
        adjusted = False
        try:
            io_thread_pairs, monitoring_data, io_r_semaphore = ThreadingHandler.start_io_polling_threads(volume_bundle=vdisk_info)
            threads['evented']['io']['pairs'] = io_thread_pairs
            threads['evented']['io']['r_semaphore'] = io_r_semaphore
            screen_names, output_files = DataWriter.write_data_fio(client=compute_client,
                                                                   fio_configuration={'io_size': cls.AMOUNT_TO_WRITE,
                                                                                      'configuration': configuration},
                                                                   edge_configuration=edge_configuration)
            logger.info('Doing IO for {0}s before bringing down the node.'.format(cls.IO_TIME))
            ThreadingHandler.keep_threads_running(r_semaphore=io_r_semaphore,
                                                  threads=io_thread_pairs,
                                                  shared_resource=monitoring_data,
                                                  duration=cls.IO_TIME)
            # Threads ready for monitoring at this point, they are waiting to resume
            EdgeTester.adjust_for_reroute(source_std.storagerouter, trigger_rerout=True, ip_to_block=compute_client.ip, additional_ports=[edge_configuration['port']])
            adjusted = True
            downed_time = time.time()
            logger.info('Now waiting two refreshrate intervals to avoid caching. In total {}s'.format(EdgeTester.IO_REFRESH_RATE * 2))
            time.sleep(cls.IO_REFRESH_RATE * 2)
            ThreadingHandler.poll_io(r_semaphore=io_r_semaphore,
                                     required_thread_amount=len(io_thread_pairs),
                                     shared_resource=monitoring_data,
                                     downed_time=downed_time,
                                     timeout=timeout,
                                     output_files=output_files,
                                     client=compute_client,
                                     disk_amount=disk_amount)
            EdgeTester._validate_dal(values_to_check)  # Validate
        except Exception as ex:
            logger.error('Got an exception while running configuration {0}. Namely: {1}'.format(configuration, str(ex)))
            failed_configurations.append({'configuration': configuration, 'reason': str(ex)})
        finally:
            if adjusted is True:
                EdgeTester.adjust_for_reroute(source_std.storagerouter, trigger_rerout=False, ip_to_block=compute_client.ip, additional_ports=[edge_configuration['port']])
            for screen_name in screen_names:
                compute_client.run(['screen', '-S', screen_name, '-X', 'quit'])
            for thread_category, thread_collection in threads['evented'].iteritems():
                ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
            for vdisk in vdisk_info.values():
                VDiskRemover.remove_vdisk(vdisk.guid)
        assert len(failed_configurations) == 0, 'Certain configuration failed: {0}'.format(failed_configurations)
Example #12
0
 def run_test_edge_blktap(cls,
                          storagedriver,
                          image_path,
                          disk_amount,
                          write_amount,
                          logger=LOGGER):
     """
     Runs the fio deployment using edge and blocktap combination.
     Creates the disks using edge (via qemu convert)
     Writes data to the disks using blocktap
     :param storagedriver: chosen storagedriver
     :param image_path: Path to the image to convert
     :param disk_amount: Amount of disks to deploy
     :param write_amount: Amount of data to write
     :param logger: logging instance
     :return: None
     """
     client = SSHClient(storagedriver.storagerouter, username='******')
     vpool = storagedriver.vpool
     edge_info = {
         'port':
         storagedriver.ports['edge'],
         'protocol':
         storagedriver.cluster_node_config['network_server_uri'].split(':')
         [0],
         'ip':
         storagedriver.storage_ip
     }
     if SystemHelper.get_ovs_version(storagedriver.storagerouter) == 'ee':
         edge_info.update(cls.get_shell_user())
     vdisk_info = {}
     try:
         for vdisk_number in xrange(disk_amount):  # Create all images first
             vdisk_name = '{0}_{1}_-blktap'.format(cls.PREFIX, vdisk_number)
             logger.info("Converting image {0} to {1}:{2}".format(
                 image_path, edge_info['ip'], vdisk_name))
             VMHandler.convert_image(client, image_path, vdisk_name,
                                     edge_info)
             logger.info(
                 "Creating a tap blk device for image.{0}:{1}".format(
                     image_path, edge_info['ip'], vdisk_name))
             tap_dir = VMHandler.create_blktap_device(
                 client, vdisk_name, edge_info)
             vdisk_info[vdisk_name] = tap_dir
         fio_configuration = {
             'io_size': write_amount,
             'configuration': (0, 100)
         }
         DataWriter.write_data_fio(client,
                                   fio_configuration,
                                   file_locations=vdisk_info.values(),
                                   screen=False,
                                   loop_screen=False)
         fio_configuration = {
             'io_size': write_amount,
             'configuration': (100, 0)
         }
         DataWriter.write_data_fio(client,
                                   fio_configuration,
                                   file_locations=vdisk_info.values(),
                                   screen=False,
                                   loop_screen=False)
     except Exception as ex:
         logger.error(
             'An exception occur while testing edge+blktap: {0}'.format(
                 str(ex)))
         raise
     finally:
         for tap_conn in client.run(['tap-ctl', 'list']).splitlines():
             if not tap_conn.endswith(tuple(vdisk_info.keys())):
                 continue
             logger.info("Deleting tapctl connection {0}".format(tap_conn))
             tap_conn_pid = None
             tap_conn_minor = None
             for tap_conn_section in tap_conn.split():
                 if tap_conn_section.startswith('pid='):
                     tap_conn_pid = tap_conn_section.replace('pid=', '')
                 elif tap_conn_section.startswith('minor='):
                     tap_conn_minor = tap_conn_section.replace('minor=', '')
             if tap_conn_pid is None or tap_conn_minor is None:
                 raise ValueError(
                     'Unable to destroy the blocktap connection because its output format has changed.'
                 )
             client.run([
                 "tap-ctl", "destroy", "-p", tap_conn_pid, "-m",
                 tap_conn_minor
             ])
         for vdisk_name in vdisk_info.keys():
             VDiskRemover.remove_vdisk_by_name(vdisk_name, vpool.name)
Example #13
0
 def validate_vdisk_clone(cls):
     """
     Validate if vdisk deployment works via various ways
     INFO: 1 vPool should be available on 2 storagerouters
     :return:
     """
     cls.LOGGER.info("Starting to validate template vdisks")
     vpools = VPoolHelper.get_vpools()
     assert len(vpools) >= 1, "Not enough vPools to test"
     try:
         vpool = next(
             (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2))
     except StopIteration:
         assert False, "Not enough Storagedrivers to test"
     # setup base information
     storagedriver_source = vpool.storagedrivers[0]
     vdisks = []
     try:
         # create required vdisk for test
         parent_vdisk_name = '{0}_{1}'.format(cls.PREFIX, str(1).zfill(3))
         parent_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_vdisk(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 size=cls.VDISK_SIZE,
                 storagerouter_ip=storagedriver_source.storagerouter.ip))
         vdisks.append(parent_vdisk)
         time.sleep(cls.TEMPLATE_SLEEP_AFTER_CREATE)
         # Create vdisk template  #
         VDiskSetup.set_vdisk_as_template(vdisk_name=parent_vdisk_name,
                                          vpool_name=vpool.name)
         time.sleep(cls.TEMPLATE_SLEEP_AFTER_CREATE)
         clone_vdisk_name = '{0}_from-template'.format(parent_vdisk_name)
         clone_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_from_template(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 new_vdisk_name=clone_vdisk_name,
                 storagerouter_ip=storagedriver_source.storagerouter.ip)
             ['vdisk_guid'])
         vdisks.append(clone_vdisk)
         time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE)
         try:
             # try to delete template with clones (should fail) #
             VDiskRemover.remove_vtemplate_by_name(
                 vdisk_name=parent_vdisk_name, vpool_name=vpool.name)
             error_msg = "Removing vtemplate `{0}` should have failed!"
             cls.LOGGER.error(error_msg)
             raise RuntimeError(error_msg)
         except HttpException:
             cls.LOGGER.info(
                 "Removing vtemplate `{0}` has failed as expected (because of leftover clones)!"
                 .format(parent_vdisk_name))
     finally:
         while len(vdisks) > 0:
             vdisk = vdisks.pop()
             VDiskRemover.remove_vdisk(vdisk.guid)
     try:
         # template vdisk from clone (should fail) #
         parent_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_vdisk(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 size=cls.VDISK_SIZE,
                 storagerouter_ip=storagedriver_source.storagerouter.ip))
         vdisks.append(parent_vdisk)
         # create a clone from the vdisk
         clone_vdisk_name = '{0}_clone'.format(parent_vdisk_name)
         cloned_vdisk = VDiskHelper.get_vdisk_by_guid(
             VDiskSetup.create_clone(
                 vdisk_name=parent_vdisk_name,
                 vpool_name=vpool.name,
                 new_vdisk_name=clone_vdisk_name,
                 storagerouter_ip=storagedriver_source.storagerouter.ip)
             ['vdisk_guid'])
         vdisks.append(cloned_vdisk)
         # try to create a vTemplate from a clone
         try:
             VDiskSetup.set_vdisk_as_template(vdisk_name=clone_vdisk_name,
                                              vpool_name=vpool.name)
             error_msg = "Setting vdisk `{0}` as template should have failed!".format(
                 clone_vdisk_name)
             cls.LOGGER.error(error_msg)
             raise RuntimeError(error_msg)
         except RuntimeError:
             cls.LOGGER.info(
                 "Setting vdisk `{0}` as template failed as expected (because vdisk is clone)!"
                 .format(clone_vdisk_name))
     finally:
         parent_vdisks = []
         while len(vdisks) > 0:  # Remove clones first
             vdisk = vdisks.pop()
             if vdisk.parent_vdisk_guid is None:
                 parent_vdisks.append(vdisk)
                 continue
             VDiskRemover.remove_vdisk(vdisk.guid)
         for parent_vdisk in parent_vdisks:
             VDiskRemover.remove_vdisk(parent_vdisk.guid)
     cls.LOGGER.info("Finished to validate template vdisks")
Example #14
0
    def _execute_test(cls):
        """
        Validate if DTL is configured as desired
        REQUIREMENTS:
        * 1 vPool should be available with 1 storagedriver
        * 1 vPool should be available with 2 or more storagedrivers in 2 separate domains
        OPTIONAL:
        * 1 vPool with 1 storagedriver with disabled DTL
        :return:
        """
        cls.LOGGER.info("Starting to validate the basic DTL")
        ##########################
        # get deployment details #
        ##########################
        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"

        # Get a suitable vpools
        vpool_single_sd = None
        vpool_multi_sd = None
        vpool_dtl_disabled = None
        for vp in VPoolHelper.get_vpools():
            if vp.configuration['dtl_mode'] != VPoolHelper.DtlStatus.DISABLED:
                if len(vp.storagedrivers) == 1 and vpool_single_sd is None:
                    vpool_single_sd = vp
                    cls.LOGGER.info(
                        "vPool `{0}` has been chosen for SINGLE vPool DTL tests"
                        .format(vp.name))
                elif len(vp.storagedrivers) >= 2 and vpool_multi_sd is None:
                    vpool_multi_sd = vp
                    cls.LOGGER.info(
                        "vPool `{0}` has been chosen for MULTI vPool DTL tests"
                        .format(vp.name))
                else:
                    cls.LOGGER.info(
                        "vPool `{0}` is not suited for tests".format(vp.name))
            else:
                cls.LOGGER.info(
                    "vPool `{0}` with DISABLED DTL is available and will be tested!"
                    .format(vp.name))
                vpool_dtl_disabled = vp

        assert vpool_single_sd is not None, "A vPool should be available with 1 storagedriver"
        assert vpool_multi_sd is not None, "A vPool should be available with 2 or more storagedrivers"

        # pick a random storagedriver
        storagedriver_single = vpool_single_sd.storagedrivers[0]
        storagedriver_multi = random.choice(vpool_multi_sd.storagedrivers)
        storagedrivers = [storagedriver_single, storagedriver_multi]

        # check disabled DTL
        storagedriver_disabled_dtl = None
        if vpool_dtl_disabled is not None:
            storagedriver_disabled_dtl = random.choice(
                vpool_dtl_disabled.storagedrivers)
            storagedrivers.append(storagedriver_disabled_dtl)

        # key = amount of storagedrivers or a_s
        # value = list with the vpool & storagedriver to test
        vpools_to_test = {
            1: [{
                "vpool": vpool_single_sd,
                "storagedriver": storagedriver_single
            }],
            2: [{
                "vpool": vpool_multi_sd,
                "storagedriver": storagedriver_multi
            }]
        }

        # check if disabled DTL vpool needs to be added
        if vpool_dtl_disabled is not None:
            a_s = len(vpool_dtl_disabled.storagedrivers)
            v_s = {
                "vpool": vpool_dtl_disabled,
                "storagedriver": storagedriver_disabled_dtl
            }
            if a_s in vpools_to_test:
                vpools_to_test[a_s].append(v_s)
            else:
                vpools_to_test[a_s] = [v_s]

        ##############
        # start test #
        ##############

        for a_s, vpools in vpools_to_test.iteritems():
            start = time.time()
            for vpool in vpools:

                cls.LOGGER.info(
                    "Starting DTL test with vPool {0} and {1} storagedrivers".
                    format(vpool['vpool'].name,
                           len(vpool['vpool'].storagedrivers)))
                vdisk_name = "{0}-{1}-{2}".format(
                    cls.VDISK_NAME, vpool['vpool'].name,
                    str(len(vpool['vpool'].storagedrivers)))
                try:
                    vdisk_guid = VDiskSetup.create_vdisk(
                        vdisk_name=vdisk_name + '.raw',
                        vpool_name=vpool['vpool'].name,
                        size=cls.SIZE_VDISK,
                        storagerouter_ip=vpool['storagedriver'].storagerouter.
                        ip)
                    # Fetch to validate if it was properly created
                    vdisk = VDiskHelper.get_vdisk_by_guid(
                        vdisk_guid=vdisk_guid)
                except TimeOutError:
                    cls.LOGGER.error("Creation of the vDisk has timed out.")
                    raise
                except RuntimeError as ex:
                    cls.LOGGER.info("Creation of vDisk failed: {0}".format(ex))
                    raise
                else:
                    #####################################
                    # check DTL status after deployment #
                    #####################################

                    correct_msg = "vDisk {0} with {1} storagedriver(s) has correct DTL status: ".format(
                        vdisk_name, a_s)
                    if a_s == 1 and vdisk.dtl_status == VDiskHelper.DtlStatus.STANDALONE:
                        cls.LOGGER.info(correct_msg + vdisk.dtl_status)
                    elif a_s >= 2 and vdisk.dtl_status == VDiskHelper.DtlStatus.SYNC:
                        cls.LOGGER.info(correct_msg + vdisk.dtl_status)
                    elif vdisk.dtl_status == VDiskHelper.DtlStatus.DISABLED and vpool[
                            'vpool'].configuration[
                                'dtl_mode'] == VPoolHelper.DtlStatus.DISABLED:
                        cls.LOGGER.info(
                            correct_msg +
                            " Note: vdisk DTL is disabled but vPool DTL is also disabled!"
                        )
                    else:
                        error_msg = "vDisk {0} with {1} storagedriver(s) has WRONG DTL status: {2}".format(
                            vdisk_name, a_s, vdisk.dtl_status)
                        cls.LOGGER.error(error_msg)
                        raise RuntimeError(error_msg)

                    ################################
                    # try to change the DTL config #
                    ################################

                    base_config = {
                        "sco_size": 4,
                        "dtl_mode": VPoolHelper.DtlStatus.SYNC,
                        "write_buffer": 512
                    }
                    if a_s == 1:
                        ########################################################################################
                        # change config to domain with non existing storagedrivers of this vpool (should fail) #
                        ########################################################################################
                        cls.LOGGER.info(
                            "Starting test: change config to domain with non existing storagedrivers "
                            "of this vpool (should fail)")
                        base_config['dtl_target'] = [
                            random.choice([
                                domain_guid for domain_guid in
                                DomainHelper.get_domain_guids()
                                if domain_guid not in vpool['storagedriver'].
                                storagerouter.regular_domains
                            ])
                        ]
                        cls.LOGGER.info("Changing dtl_target to: {0}".format(
                            DomainHelper.get_domain_by_guid(
                                domain_guid=base_config['dtl_target']
                                [0]).name))
                        try:
                            cls.LOGGER.info(base_config)
                            VDiskSetup.set_config_params(
                                vdisk_name=vdisk_name + '.raw',
                                vpool_name=vpool['vpool'].name,
                                config=base_config)
                            error_msg = "Changing config to a domain with non existing storagedrivers should have failed with vdisk: {0}!".format(
                                vdisk_name)
                            cls.LOGGER.error(error_msg)
                            raise Exception(error_msg)
                        except TimeOutError:
                            cls.LOGGER.error(
                                "Changing config to a same domain with only 1 storagedriver has timed out."
                            )
                            raise
                        except RuntimeError:
                            cls.LOGGER.info(
                                "Changing config to a domain with non existing storagedrivers has failed as expected!"
                            )

                        ##############################################################################################
                        # change config to domain where there are other storagedrivers but not of ours (should fail) #
                        ##############################################################################################
                        cls.LOGGER.info(
                            "Starting test: change config to domain where there are other storagedrivers but not of ours (should fail)"
                        )

                        filtered_domains = list(
                            set(DomainHelper.get_domain_guids()) -
                            set(vpool['storagedriver'].storagerouter.
                                regular_domains))
                        base_config['dtl_target'] = [filtered_domains[0]]
                        cls.LOGGER.info(
                            "Current vdisk domain location: {0}".format(
                                DomainHelper.get_domain_by_guid(
                                    domain_guid=vpool['storagedriver'].
                                    storagerouter.regular_domains[0]).name))
                        cls.LOGGER.info("Changing dtl_target to: {0}".format(
                            DomainHelper.get_domain_by_guid(
                                domain_guid=base_config['dtl_target']
                                [0]).name))
                        try:
                            VDiskSetup.set_config_params(
                                vdisk_name=vdisk_name + '.raw',
                                vpool_name=vpool['vpool'].name,
                                config=base_config)
                            error_msg = "Changing config to a same domain with only 1 storagedriver should have failed with vdisk: {0}!".format(
                                vdisk_name)
                            cls.LOGGER.error(error_msg)
                            raise Exception(error_msg)
                        except TimeOutError:
                            cls.LOGGER.error(
                                "Changing config to a domain with non existing storagedrivers has timed out."
                            )
                            raise
                        except RuntimeError:
                            cls.LOGGER.info(
                                "Changing config to a same domain with only 1 storagedriver has failed as expected!"
                            )
                    elif a_s >= 2:
                        #######################################################################
                        # change config to domain with active storagedrivers (should succeed) #
                        #######################################################################
                        cls.LOGGER.info(
                            "Starting test: change config to domain with active storagedrivers (should succeed)"
                        )

                        # change current target domain to other target domain
                        current_vdisk_domains = StoragedriverHelper.get_storagedriver_by_id(
                            storagedriver_id=vdisk.storagedriver_id
                        ).storagerouter.regular_domains
                        cls.LOGGER.info(
                            "Currently the vdisk is living in: {0}".format(
                                current_vdisk_domains))
                        vpool_domains = VPoolHelper.get_domains_by_vpool(
                            vpool_name=vdisk.vpool.name)
                        cls.LOGGER.info(
                            "Currently the vpool {0} is available in: {1}".
                            format(vdisk.vpool.name, vpool_domains))
                        future_domains = list(
                            set(vpool_domains) - set(current_vdisk_domains))
                        cls.LOGGER.info(
                            "DTL will be moved to other domain: {0}".format(
                                future_domains))
                        base_config['dtl_target'] = future_domains

                        # change settings
                        try:
                            VDiskSetup.set_config_params(
                                vdisk_name=vdisk_name + '.raw',
                                vpool_name=vpool['vpool'].name,
                                config=base_config)
                        except TimeOutError:
                            cls.LOGGER.error(
                                "Changing config to a same domain with only 1 storagedriver has timed out."
                            )
                            raise
                        except RuntimeError:
                            cls.LOGGER.error(
                                "Changing config to a same domain with only 1 storagedriver was unsuccesful!"
                            )
                            raise
                        cls.LOGGER.info(
                            "Changing config to a same domain with only 1 storagedriver was successful!"
                        )

                    cls.LOGGER.info("Removing vDisk {0}".format(vdisk.name))
                    VDiskRemover.remove_vdisk(vdisk_guid=vdisk.guid)
                    cls.LOGGER.info("Finished removing vDisk {0}".format(
                        vdisk.name))

            end = time.time()

            # display run time
            cls.LOGGER.info("Run testing the DTL took {0} seconds".format(
                int(end - start)))

        cls.LOGGER.info("Finished to validate the basic DTL")
Example #15
0
    def test_ha_fio(cls,
                    fio_bin_path,
                    cluster_info,
                    is_ee,
                    disk_amount=1,
                    timeout=CIConstants.HA_TIMEOUT,
                    logger=LOGGER):
        """
        Uses a modified fio to work with the openvstorage protocol
        :param fio_bin_path: path of the fio binary
        :type fio_bin_path: str
        :param cluster_info: information about the cluster, contains all dal objects
        :type cluster_info: dict
        :param is_ee: is it an ee version or not
        :type is_ee: bool
        :param disk_amount: amount of disks to test fail over with
        :type disk_amount: int
        :param timeout: timeout in seconds
        :type timeout: int
        :param logger: logging instance
        :return: None
        :rtype: NoneType
        """
        destination_storagedriver = cluster_info['storagedrivers'][
            'destination']
        source_storagedriver = cluster_info['storagedrivers']['source']
        vpool = destination_storagedriver.vpool

        compute_client = SSHClient(cluster_info['storagerouters']['compute'],
                                   username='******')

        vm_to_stop = cls.HYPERVISOR_INFO['vms'][
            source_storagedriver.storage_ip]['name']
        parent_hypervisor = HypervisorFactory().get()
        values_to_check = {
            'source_std': source_storagedriver.serialize(),
            'target_std': destination_storagedriver.serialize(),
            'vdisks': []
        }
        # Create vdisks
        protocol = source_storagedriver.cluster_node_config[
            'network_server_uri'].split(':')[0]
        edge_configuration = {
            'fio_bin_location': fio_bin_path,
            'hostname': source_storagedriver.storage_ip,
            'port': source_storagedriver.ports['edge'],
            'protocol': protocol,
            'volumenames': []
        }
        if is_ee is True:
            edge_configuration.update(cls.get_shell_user())

        vdisk_info = {}
        failed_configurations = []

        for index in xrange(0, disk_amount):
            try:
                vdisk_name = '{0}_vdisk{1}'.format(cls.TEST_NAME,
                                                   str(index).zfill(3))
                data_vdisk = VDiskHelper.get_vdisk_by_guid(
                    VDiskSetup.create_vdisk(vdisk_name, vpool.name,
                                            cls.AMOUNT_TO_WRITE,
                                            source_storagedriver.storage_ip))
                vdisk_info[vdisk_name] = data_vdisk
                edge_configuration['volumenames'].append(
                    data_vdisk.devicename.rsplit('.', 1)[0].split('/', 1)[1])
                values_to_check['vdisks'].append(data_vdisk.serialize())
            except TimeOutError:
                logger.error('Creating the vdisk has timed out.')
                raise
            except RuntimeError as ex:
                logger.error('Could not create the vdisk. Got {0}'.format(
                    str(ex)))
                raise
        configuration = random.choice(cls.DATA_TEST_CASES)
        threads = {'evented': {'io': {'pairs': [], 'r_semaphore': None}}}
        vm_downed = False
        screen_names = []
        try:
            logger.info(
                'Starting threads.'
            )  # Separate because creating vdisks takes a while, while creating the threads does not

            io_thread_pairs, monitoring_data, io_r_semaphore = ThreadingHandler.start_io_polling_threads(
                volume_bundle=vdisk_info)
            threads['evented']['io']['pairs'] = io_thread_pairs
            threads['evented']['io']['r_semaphore'] = io_r_semaphore
            screen_names, output_files = DataWriter.write_data_fio(
                client=compute_client,
                fio_configuration={
                    'io_size': cls.AMOUNT_TO_WRITE,
                    'configuration': configuration
                },
                edge_configuration=edge_configuration)
            logger.info(
                'Doing IO for {0}s before bringing down the node.'.format(
                    cls.IO_TIME))
            ThreadingHandler.keep_threads_running(
                r_semaphore=io_r_semaphore,
                threads=io_thread_pairs,
                shared_resource=monitoring_data,
                duration=cls.IO_TIME)
            # Threads ready for monitoring at this point
            #########################
            # Bringing original owner of the volume down
            #########################
            try:
                logger.info('Stopping {0}.'.format(vm_to_stop))
                VMHandler.stop_vm(hypervisor=parent_hypervisor,
                                  vmid=vm_to_stop)
                downed_time = time.time()
                vm_downed = True
            except Exception as ex:
                logger.error('Failed to stop. Got {0}'.format(str(ex)))
                raise
            time.sleep(cls.IO_REFRESH_RATE * 2)
            # Start IO polling to verify nothing went down
            ThreadingHandler.poll_io(
                r_semaphore=io_r_semaphore,
                required_thread_amount=len(io_thread_pairs),
                shared_resource=monitoring_data,
                downed_time=downed_time,
                timeout=timeout,
                output_files=output_files,
                client=compute_client,
                disk_amount=disk_amount)
            cls._validate(values_to_check, monitoring_data)
        except Exception as ex:
            failed_configurations.append({
                'configuration': configuration,
                'reason': str(ex)
            })
        finally:
            for thread_category, thread_collection in threads[
                    'evented'].iteritems():
                ThreadHelper.stop_evented_threads(
                    thread_collection['pairs'],
                    thread_collection['r_semaphore'])
            if vm_downed is True:
                VMHandler.start_vm(parent_hypervisor, vm_to_stop)
                SystemHelper.idle_till_ovs_is_up(
                    source_storagedriver.storage_ip, **cls.get_shell_user())
                # @TODO: Remove when https://github.com/openvstorage/integrationtests/issues/540 is fixed
                FwkHandler.restart_all()
            if screen_names:
                for screen_name in screen_names:
                    compute_client.run(
                        ['screen', '-S', screen_name, '-X', 'quit'])
            for vdisk in vdisk_info.values():
                VDiskRemover.remove_vdisk(vdisk.guid)
        assert len(failed_configurations
                   ) == 0, 'Certain configuration failed: {0}'.format(
                       ' '.join(failed_configurations))
Example #16
0
    def validate_vdisk_clone(cls,
                             amount_vdisks=AMOUNT_VDISKS,
                             amount_to_write=AMOUNT_TO_WRITE):
        """
        Validate if vdisk deployment works via various ways
        INFO: 1 vPool should be available on 2 storagerouters

        :return:
        """

        cls.LOGGER.info("Starting to regress template memleak vdisks")

        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"

        try:
            vpool = next(
                (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2))
        except StopIteration:
            assert False, "Not enough Storagedrivers to test"

        # setup base information
        storagedriver_source = vpool.storagedrivers[0]
        client = SSHClient(storagedriver_source.storage_ip, username='******')

        # create required vdisk for test
        vdisk_name = VDiskTemplateChecks.PREFIX + '1'
        assert VDiskSetup.create_vdisk(
            vdisk_name=vdisk_name + '.raw',
            vpool_name=vpool.name,
            size=VDiskTemplateChecks.VDISK_SIZE,
            storagerouter_ip=storagedriver_source.storagerouter.ip) is not None
        time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_AFTER_CREATE)

        ##################
        # template vdisk #
        ##################

        VDiskSetup.set_vdisk_as_template(vdisk_name=vdisk_name + '.raw',
                                         vpool_name=vpool.name)
        time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_AFTER_CREATE)

        ######################
        # log current memory #
        ######################

        memory_usage_beginning = StatisticsHelper.get_current_memory_usage(
            storagedriver_source.storage_ip)
        cls.LOGGER.info("Starting memory usage monitor: {0}/{1}".format(
            memory_usage_beginning[0], memory_usage_beginning[1]))
        pid = int(
            client.run(
                "pgrep -a volumedriver | grep {0} | cut -d ' ' -f 1".format(
                    vpool.name),
                allow_insecure=True))
        cls.LOGGER.info(
            "Starting extended memory monitor on pid {0}: \n{1}".format(
                pid,
                StatisticsHelper.get_current_memory_usage_of_process(
                    storagedriver_source.storage_ip, pid)))

        ##################################################################
        # create vdisks from template, perform fio and delete them again #
        ##################################################################

        for vdisk in xrange(amount_vdisks):
            # create vdisk from template
            clone_vdisk_name = vdisk_name + '-template-' + str(vdisk)
            VDiskSetup.create_from_template(
                vdisk_name=vdisk_name + '.raw',
                vpool_name=vpool.name,
                new_vdisk_name=clone_vdisk_name + '.raw',
                storagerouter_ip=storagedriver_source.storagerouter.ip)
            # perform fio test
            client.run([
                "fio", "--name=test",
                "--filename=/mnt/{0}/{1}.raw".format(vpool.name,
                                                     clone_vdisk_name),
                "--ioengine=libaio", "--iodepth=4", "--rw=write", "--bs=4k",
                "--direct=1", "--size={0}M".format(amount_to_write),
                "--output-format=json", "--output={0}.json".format(vdisk_name)
            ])
            # delete vdisk
            time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE)
            VDiskRemover.remove_vdisk_by_name(vdisk_name=clone_vdisk_name,
                                              vpool_name=vpool.name)

        ###################
        # remove template #
        ###################

        time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE)
        VDiskRemover.remove_vtemplate_by_name(vdisk_name=vdisk_name,
                                              vpool_name=vpool.name)

        ######################
        # log current memory #
        ######################

        memory_usage_ending = StatisticsHelper.get_current_memory_usage(
            storagedriver_source.storage_ip)
        cls.LOGGER.info("Finished memory usage monitor: {0}/{1}".format(
            memory_usage_ending[0], memory_usage_ending[1]))
        pid = int(
            client.run(
                "pgrep -a volumedriver | grep {0} | cut -d ' ' -f 1".format(
                    vpool.name),
                allow_insecure=True))
        cls.LOGGER.info(
            "Finished extended memory monitor on pid {0}: \n{1}".format(
                pid,
                StatisticsHelper.get_current_memory_usage_of_process(
                    storagedriver_source.storage_ip, pid)))

        cls.LOGGER.info("Finished to regress template memleak vdisks")
Example #17
0
    def validate_add_extend_remove_vpool(cls, timeout=ADD_EXTEND_REMOVE_VPOOL_TIMEOUT):
        """
        Validate if we can add, extend and/or remove a vPool, testing the following scenarios:
            * Normal with no accelerated backend
            * Accelerated vPool with hdd_backend & ssd_backend

        INFO:
            * at least 2 storagerouters should be available
            * at least 2 backends should be available with default preset

        :param timeout: specify a timeout
        :type timeout: int
        :return:
        """
        cls.LOGGER.info("Starting to validate add-extend-remove vpool")


        storagerouter_ips = []
        for storagerouter_ip in StoragerouterHelper.get_storagerouter_ips():
            try:
                RoleValidation.check_required_roles(VPoolSetup.REQUIRED_VPOOL_ROLES, storagerouter_ip, "LOCAL")
                storagerouter_ips.append(storagerouter_ip)
                cls.LOGGER.info("Added `{0}` to list of eligible storagerouters".format(storagerouter_ip))
            except RuntimeError as ex:
                cls.LOGGER.warning("Did not add `{0}` to list of eligible "
                                              "storagerouters because: {1}".format(storagerouter_ip, ex))
                pass

        # Filter storagerouters without required roles
        assert len(storagerouter_ips) > 1, "We need at least 2 storagerouters with valid roles: {0}"\
            .format(storagerouter_ips)
        alba_backends = BackendHelper.get_alba_backends()
        assert len(alba_backends) >= 2, "We need at least 2 or more backends!"

        # Global vdisk details
        vdisk_deployment_ip = storagerouter_ips[0]

        # Determine backends (2)
        hdd_backend = alba_backends[0]
        ssd_backend = alba_backends[1]

        # Add preset to all alba_backends (we only use the first two as seen above)
        for alba_backend in alba_backends[0:2]:
            cls.LOGGER.info("Adding custom preset to backend {0}".format(alba_backend.name))
            preset_result = BackendSetup.add_preset(albabackend_name=alba_backend.name,
                                                    preset_details=cls.PRESET,
                                                    timeout=cls.PRESET_CREATE_TIMEOUT)
            assert preset_result is True, 'Failed to add preset to backend {0}'.format(alba_backend.name)
            cls.LOGGER.info("Finished adding custom preset to backend {0}".format(alba_backend.name))

        # Vpool configs, regressing https://github.com/openvstorage/alba/issues/560 & more
        vpool_configs = {
            "no_fragment_cache_on_disk": {
                "strategy": {"cache_on_read": False, "cache_on_write": False},
                "location": "disk"
            },
            "no_fragment_cache_on_accel": {
                "strategy": {"cache_on_read": False, "cache_on_write": False},
                "location": "backend",
                "backend": {
                    "name": ssd_backend.name,
                    "preset": cls.PRESET['name']
                }
            }
        }

        for cfg_name, cfg in vpool_configs.iteritems():
            # Create vpool
            block_cache_cfg = None
            if SystemHelper.get_ovs_version().lower() == 'ee':
                block_cache_cfg = cfg
            for storagerouter_ip in storagerouter_ips:
                cls.LOGGER.info("Add/extend vPool `{0}` on storagerouter `{1}`".format(cls.VPOOL_NAME, storagerouter_ip))
                start = time.time()
                try:
                    cls._add_vpool(vpool_name=cls.VPOOL_NAME, fragment_cache_cfg=cfg,
                                              block_cache_cfg=block_cache_cfg, albabackend_name=hdd_backend.name, timeout=timeout,
                                              preset_name=cls.PRESET['name'], storagerouter_ip=storagerouter_ip)
                except TimeOutError:
                    cls.LOGGER.warning('Adding/extending the vpool has timed out after {0}s. Polling for another {1}s.'
                                                  .format(timeout, cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING - timeout))
                    # Lets be a bit forgiving and give the fwk 5 mins to actually complete the task
                    vpool = VPoolHelper.get_vpool_by_name(cls.VPOOL_NAME)
                    while vpool.status != 'RUNNING':
                        if time.time() - start > cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING:
                            raise RuntimeError('The vpool was not added or extended after {0}s'.format(cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING))
                        cls.LOGGER.warning('Vpool status is still {0} after {1}s.'.format(vpool.status, time.time() - start))
                        time.sleep(1)
                        vpool.discard()
                    cls.LOGGER.warning('The vpool was added or extended after {0}s.'.format(time.time() - start))
                except RuntimeError as ex:
                    cls.LOGGER.error('Adding/extending the vpool has failed with {0}.'.format(str(ex)))
                    raise
                # Check #proxies
                vpool = VPoolHelper.get_vpool_by_name(cls.VPOOL_NAME)
                for storagedriver in vpool.storagedrivers:
                    assert len(storagedriver.alba_proxies) == 2, 'The vpool did not get setup with 2 proxies. Found {} instead.'.format(len(storagedriver.alba_proxies))
            # Deploy a vdisk
            vdisk_name = cls.PREFIX + cfg_name
            cls.LOGGER.info("Starting to create vdisk `{0}` on vPool `{1}` with size `{2}` on node `{3}`"
                                       .format(vdisk_name, cls.VPOOL_NAME, cls.VDISK_SIZE, vdisk_deployment_ip))
            VDiskSetup.create_vdisk(vdisk_name=vdisk_name + '.raw',
                                    vpool_name=cls.VPOOL_NAME,
                                    size=cls.VDISK_SIZE,
                                    storagerouter_ip=vdisk_deployment_ip,
                                    timeout=cls.VDISK_CREATE_TIMEOUT)
            cls.LOGGER.info("Finished creating vdisk `{0}`".format(vdisk_name))
            cls.LOGGER.info("Starting to delete vdisk `{0}`".format(vdisk_name))
            VDiskRemover.remove_vdisk_by_name(vdisk_name, cls.VPOOL_NAME)
            cls.LOGGER.info("Finished deleting vdisk `{0}`".format(vdisk_name))

            # Delete vpool
            for storagerouter_ip in storagerouter_ips:
                storagedrivers_to_delete = len(vpool.storagedrivers)
                cls.LOGGER.info("Deleting vpool `{0}` on storagerouter `{1}`".format(cls.VPOOL_NAME, storagerouter_ip))
                try:
                    VPoolRemover.remove_vpool(vpool_name=cls.VPOOL_NAME, storagerouter_ip=storagerouter_ip, timeout=timeout)
                except TimeOutError:
                    try:
                        vpool.discard()  # Discard is needed to update the vpool status as it was running before
                        while vpool.status != 'RUNNING':
                            cls.LOGGER.warning('Removal/shrinking the vpool has timed out after {0}s. Polling for another {1}s.'
                                                          .format(timeout, cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING - timeout))
                            if time.time() - start > cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING:
                                raise RuntimeError('The vpool was not removed or extended after {0}s'.format(cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING))
                            cls.LOGGER.warning('Vpool status is still {0} after {1}s.'.format(vpool.status, time.time() - start))
                            time.sleep(1)
                            vpool.discard()
                    except ObjectNotFoundException:
                        if storagedrivers_to_delete != 1:  # Should be last one
                            raise
                except RuntimeError as ex:
                    cls.LOGGER.error('Shrinking/removing the vpool has failed with {0}.'.format(str(ex)))
                    raise
            cls.LOGGER.info('Vpool has been fully removed.')
        # Delete presets
        for alba_backend in alba_backends[0:2]:
            cls.LOGGER.info("Removing custom preset from backend {0}".format(alba_backend.name))
            remove_preset_result = BackendRemover.remove_preset(albabackend_name=alba_backend.name,
                                                                preset_name=cls.PRESET['name'],
                                                                timeout=cls.PRESET_REMOVE_TIMEOUT)
            assert remove_preset_result is True, 'Failed to remove preset from backend {0}'.format(alba_backend.name)
            cls.LOGGER.info("Finshed removing custom preset from backend {0}".format(alba_backend.name))

        cls.LOGGER.info("Finished to validate add-extend-remove vpool")
Example #18
0
    def validate_vdisk_deployment(cls):
        """
        Validate if vdisk deployment works via various ways
        INFO: 1 vPool should be available on 1 storagerouter
        :return:
        """
        cls.LOGGER.info("Starting to validate the vdisk deployment")

        vpools = VPoolHelper.get_vpools()
        assert len(vpools) >= 1, "Not enough vPools to test"

        vpool = vpools[0]  # just pick the first vpool you find
        assert len(
            vpool.storagedrivers) >= 1, "Not enough Storagedrivers to test"

        # setup base information
        storagedriver = vpool.storagedrivers[0]
        protocol = storagedriver.cluster_node_config[
            'network_server_uri'].split(':')[0]
        storage_ip = storagedriver.storage_ip
        edge_port = storagedriver.ports['edge']
        client = SSHClient(storagedriver.storage_ip, username='******')
        # =======
        # VIA API
        # =======
        for size in cls.VDISK_SIZES:
            api_disk_name = cls.PREFIX + str(size) + '-api'
            cls.LOGGER.info(
                "Starting to create vdisk `{0}` on vPool `{1}` with size `{2}` "
                "on node `{3}`".format(api_disk_name, vpool.name, size,
                                       storagedriver.storagerouter.ip))
            VDiskSetup.create_vdisk(
                vdisk_name=api_disk_name + '.raw',
                vpool_name=vpool.name,
                size=size,
                storagerouter_ip=storagedriver.storagerouter.ip,
                timeout=cls.VDISK_CREATE_TIMEOUT)
            cls.LOGGER.info(
                "Finished creating vdisk `{0}`".format(api_disk_name))
            cls._check_vdisk(vdisk_name=api_disk_name, vpool_name=vpool.name)
            cls.LOGGER.info(
                "Starting to delete vdisk `{0}`".format(api_disk_name))
            VDiskRemover.remove_vdisk_by_name(api_disk_name, vpool.name)
            cls.LOGGER.info(
                "Finished deleting vdisk `{0}`".format(api_disk_name))

        # ========
        # VIA QEMU
        # ========
        for size in cls.VDISK_SIZES:
            qemu_disk_name = cls.PREFIX + str(size) + '-qemu'
            edge_info = {
                'port': edge_port,
                'protocol': protocol,
                'ip': storage_ip,
            }
            if SystemHelper.get_ovs_version(
                    storagedriver.storagerouter) == 'ee':
                edge_info.update(cls.get_shell_user())
            VMHandler.create_image(client, qemu_disk_name, size, edge_info)
            cls.LOGGER.info(
                "Finished creating vdisk `{0}`".format(qemu_disk_name))
            cls._check_vdisk(vdisk_name=qemu_disk_name, vpool_name=vpool.name)
            cls.LOGGER.info(
                "Starting to delete vdisk `{0}`".format(qemu_disk_name))
            VDiskRemover.remove_vdisk_by_name(qemu_disk_name, vpool.name)
            cls.LOGGER.info(
                "Finished deleting vdisk `{0}`".format(qemu_disk_name))

        # ============
        # VIA TRUNCATE
        # ============
        for size in cls.VDISK_SIZES:
            truncate_disk_name = cls.PREFIX + str(size) + '-trunc'
            cls.LOGGER.info(
                "Starting to create vdisk `{0}` on vPool `{1}` on node `{2}` "
                "with size `{3}`".format(truncate_disk_name, vpool.name,
                                         storagedriver.storage_ip, size))
            client.run([
                "truncate", "-s",
                str(size), "/mnt/{0}/{1}.raw".format(vpool.name,
                                                     truncate_disk_name)
            ])
            cls.LOGGER.info(
                "Finished creating vdisk `{0}`".format(truncate_disk_name))
            cls._check_vdisk(vdisk_name=truncate_disk_name,
                             vpool_name=vpool.name)
            cls.LOGGER.info(
                "Starting to delete vdisk `{0}`".format(truncate_disk_name))
            VDiskRemover.remove_vdisk_by_name(truncate_disk_name, vpool.name)
            cls.LOGGER.info(
                "Finished deleting vdisk `{0}`".format(truncate_disk_name))
        cls.LOGGER.info("Finished to validate the vdisk deployment")
Example #19
0
 def _execute_test(cls, amount_vdisks=AMOUNT_VDISKS):
     """
     Executes a offline migration
     :param amount_vdisks: amount of vdisks to test
     :type amount_vdisks: int
     :return:
     """
     cls.LOGGER.info("Starting offline migrate test.")
     vpool = None  # Get a suitable vpool
     for vp in VPoolHelper.get_vpools():
         if len(vp.storagedrivers) >= 2:
             vpool = vp
             break
     assert vpool is not None, "Not enough vPools to test. Requires 1 with at least 2 storagedrivers and found 0."
     ##########################
     # Setup base information #
     ##########################
     # Executor storagedriver_1 is current system
     std_1 = random.choice([st for st in vpool.storagedrivers])
     # Get a random other storagedriver to migrate to
     std_2 = random.choice([st for st in vpool.storagedrivers if st != std_1])
     # Cache to validate properties
     values_to_check = {
         'source_std': std_1.serialize(),
         'target_std': std_2.serialize()
     }
     ###############################
     # start deploying & migrating #
     ###############################
     created_vdisks = []
     try:
         for i in xrange(amount_vdisks):
             ################
             # create vdisk #
             ################
             vdisk_name = "{0}_{1}".format(cls.TEST_NAME, i)
             try:
                 vdisk_guid = VDiskSetup.create_vdisk(vdisk_name=vdisk_name + '.raw',
                                                      vpool_name=vpool.name,
                                                      size=cls.AMOUNT_TO_WRITE * 5,
                                                      storagerouter_ip=std_1.storagerouter.ip)
                 vdisk = VDiskHelper.get_vdisk_by_guid(vdisk_guid)  # Fetch to validate if it was properly created
                 created_vdisks.append(vdisk)
                 values_to_check['vdisk'] = vdisk.serialize()
             except TimeOutError:
                 cls.LOGGER.error("Creation of the vdisk has timed out.")
                 raise
             except (RuntimeError, TimeOutError) as ex:
                 cls.LOGGER.info("Creation of vdisk failed: {0}".format(ex))
                 raise
             else:
                 time.sleep(cls.SLEEP_TIME)
                 try:
                     cls.LOGGER.info("Moving vdisk {0} from {1} to {2}".format(vdisk_guid, std_1.storage_ip, std_2.storage_ip))
                     VDiskSetup.move_vdisk(vdisk_guid=vdisk_guid, target_storagerouter_guid=std_2.storagerouter_guid)
                     time.sleep(cls.SLEEP_TIME)
                     cls.LOGGER.info("Validating move...")
                     cls._validate_move(values_to_check)
                 except Exception as ex:
                     cls.LOGGER.exception('Failed during migation: {0}'.format(ex))
                     raise
     finally:
         for vdisk in created_vdisks:
             VDiskRemover.remove_vdisk(vdisk.guid)
     cls.LOGGER.info("Finished offline migrate test.")