예제 #1
0
def setup():
    """
    Setup for Arakoon package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, 'Please fill out a backend name in the autotest.cfg file'
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is True:
            GeneralService.stop_service(name='ovs-scheduled-tasks',
                                        client=root_client)

    storagerouters = GeneralStorageRouter.get_storage_routers()
    for sr in storagerouters:
        root_client = SSHClient(sr, username='******')
        GeneralDisk.add_db_role(sr)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    GeneralAlba.add_alba_backend(backend_name)
    GeneralArakoon.voldrv_arakoon_checkup()
예제 #2
0
    def prepare_alba_backend(name=None):
        """
        Create an ALBA backend and claim disks
        :param name: Name for the backend
        :return: None
        """
        # @TODO: Fix this, because backend_type should not be configurable if you always create an ALBA backend
        # @TODO 2: Get rid of these asserts, any test (or testsuite) should verify the required params first before starting execution
        autotest_config = General.get_config()
        if name is None:
            name = autotest_config.get('backend', 'name')
        nr_of_disks_to_claim = autotest_config.getint('backend', 'nr_of_disks_to_claim')
        type_of_disks_to_claim = autotest_config.get('backend', 'type_of_disks_to_claim')
        assert name,\
            "Please fill out a valid backend name in autotest.cfg file"

        storage_routers = GeneralStorageRouter.get_storage_routers()
        for sr in storage_routers:
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles='DB') is False:
                GeneralDisk.add_db_role(sr)
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles=['SCRUB', 'WRITE']) is False:
                GeneralDisk.add_write_scrub_roles(sr)
        backend = GeneralBackend.get_by_name(name)
        if not backend:
            alba_backend = GeneralAlba.add_alba_backend(name)
        else:
            alba_backend = backend.alba_backend
        GeneralAlba.claim_asds(alba_backend, nr_of_disks_to_claim, type_of_disks_to_claim)
        if GeneralAlba.has_preset(alba_backend=alba_backend,
                                  preset_name=GeneralAlba.ONE_DISK_PRESET) is False:
            GeneralAlba.add_preset(alba_backend=alba_backend,
                                   name=GeneralAlba.ONE_DISK_PRESET,
                                   policies=[[1, 1, 1, 2]])
    def remove_roles_from_config(config, number_of_roles_to_remain=0):
        """

        :param config: Configuration file containing all of the required information
        :type config: dict

        :param number_of_roles_to_remain: how roles may still be defined on the partition. The first 'number_of_roles_to
        _remain' will remain.
        :type number_of_roles_to_remain: int

        :return: Returns a object with the partition guid and its roles
        :type: Object
        """
        collection = {}
        # Remove disk roles
        logger.info("Starting removal of disk roles")
        for key, value in config.iteritems():
            for disk_info in value['disks']:
                disk_name = disk_info['disk_name']
                logger.info("Fetching disk with diskname '{0}' for ip '{1}'".format(disk_name, key))
                disk = GeneralStorageRouter().get_disk_by_ip(disk_name, key)
                logger.info("Fetching partition of disk '{0}'".format(disk.guid))
                partition = GeneralDisk.partition_disk(disk)
                if number_of_roles_to_remain == 0:
                    # When number_of_roles_to_remain ==0, everything should have been removed
                    remaining_roles = []
                    logger.info("Removing roles '{0}' from partition '{1}'".format(remaining_roles, partition.guid))
                    GeneralDisk.adjust_disk_role(partition, remaining_roles, 'SET')
                    # Set back to pristine condition:
                    # Unmount partition
                    logger.info("Umounting disk {2}".format(partition.roles, partition.guid, disk_name))
                    TestDiskRoles._umount(partition.mountpoint)
                    # Remove from fstab
                    logger.info(
                        "Removing {0} from fstab".format(partition.mountpoint, partition.guid, disk_name))
                    FstabHelper().remove_by_mountpoint(partition.mountpoint)
                    # Remove filesystem
                    logger.info(
                        "Removing filesystem on partition {0} on disk {1}".format(partition.guid, disk_name))
                    alias = partition.aliases[0]
                    device = '/dev/{0}'.format(disk_name)
                    TestDiskRoles._remove_filesystem(device, alias)
                    # Remove partition from model
                    logger.info("Removing partition {0} on disk {1} from model".format(partition.guid, disk_name))
                    partition.delete()
                else:
                    if len(partition.roles) < number_of_roles_to_remain:
                        logger.warning("Number of roles that should remain exceed the number of roles that are present!"
                                       " Keeping all roles instead!")
                        roles_list = partition.roles
                    else:
                        roles_list = partition.roles[number_of_roles_to_remain:]
                    remaining_roles = General.remove_list_from_list(partition.roles, roles_list)
                    logger.info("Removing roles '{0}' from partition '{1}'".format(remaining_roles, partition.guid))
                    GeneralDisk.adjust_disk_role(partition, remaining_roles, 'SET')
                # Will test if the role is an empty list
                collection[partition.guid] = remaining_roles
        return collection
예제 #4
0
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, "Please fill out a valid backend name in autotest.cfg file"

    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)
    def fdl_0001_match_model_with_reality_test():
        """
        FDL-0001 - disks in ovs model should match actual physical disk configuration
        """
        if TestFlexibleDiskLayout.continue_testing.state is False:
            logger.info('Test suite signaled to stop')
            return
        GeneralStorageRouter.sync_with_reality()

        physical_disks = dict()
        modelled_disks = dict()
        loops = dict()

        storagerouters = GeneralStorageRouter.get_storage_routers()
        for storagerouter in storagerouters:
            root_client = SSHClient(storagerouter, username='******')
            hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
            physical_disks[storagerouter.guid] = hdds
            physical_disks[storagerouter.guid].update(ssds)
            loop_devices = General.get_loop_devices(client=root_client)
            loops[storagerouter.guid] = loop_devices

        disks = GeneralDisk.get_disks()
        for disk in disks:
            if disk.storagerouter_guid not in modelled_disks:
                modelled_disks[disk.storagerouter_guid] = dict()
            if disk.name not in loops[disk.storagerouter_guid]:
                modelled_disks[disk.storagerouter_guid][disk.name] = {'is_ssd': disk.is_ssd}

        logger.info('PDISKS: {0}'.format(physical_disks))
        logger.info('MDISKS: {0}'.format(modelled_disks))

        assert len(modelled_disks.keys()) == len(physical_disks.keys()),\
            "Nr of modelled/physical disks is NOT equal!:\n PDISKS: {0}\nMDISKS: {1}".format(modelled_disks,
                                                                                             physical_disks)

        for guid in physical_disks.keys():
            assert len(physical_disks[guid]) == len(modelled_disks[guid]),\
                "Nr of modelled/physical disks differs for storagerouter {0}:\n{1}\n{2}".format(guid,
                                                                                                physical_disks[guid],
                                                                                                modelled_disks[guid])

        # basic check on hdd/ssd
        for guid in physical_disks.keys():
            mdisks = modelled_disks[guid]
            pdisks = physical_disks[guid]
            for key in mdisks.iterkeys():
                assert mdisks[key]['is_ssd'] == pdisks[key]['is_ssd'],\
                    "Disk incorrectly modelled for storagerouter {0}\n,mdisk:{1}\n,pdisk:{2}".format(guid,
                                                                                                     mdisks[key],
                                                                                                     pdisks[key])
예제 #6
0
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'backend': ['name']})
    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)

    alba_backend_name = General.get_config().get('backend', 'name')
    alba_backend = GeneralAlba.get_by_name(alba_backend_name)
    if alba_backend is None:
        GeneralAlba.add_alba_backend(alba_backend_name)
    def add_vpool(vpool_parameters=None, storagerouters=None):
        """
        Create a vPool based on the kwargs provided or default parameters found in the autotest.cfg
        :param vpool_parameters: Parameters to be used for vPool creation
        :type vpool_parameters: dict

        :param storagerouters: Guids of the Storage Routers on which to create and extend this vPool
        :type storagerouters: list

        :return: Created or extended vPool
        :rtype: VPool
        """
        if storagerouters is None:
            storagerouters = list(GeneralStorageRouter.get_storage_routers())
        if vpool_parameters is None:
            vpool_parameters = {}
        if not isinstance(storagerouters, list) or len(storagerouters) == 0:
            raise ValueError("Storage Routers should be a list and contain at least 1 element to add a vPool on")

        vpool_name = None
        storagerouter_param_map = dict(
            (sr, GeneralVPool.get_add_vpool_params(storagerouter=sr, **vpool_parameters)) for sr in storagerouters
        )
        for index, sr in enumerate(storagerouters):
            vpool_name = storagerouter_param_map[sr]["vpool_name"]
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles="DB") is False and sr.node_type == "MASTER":
                GeneralDisk.add_db_role(sr)
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles=["SCRUB", "WRITE"]) is False:
                GeneralDisk.add_write_scrub_roles(sr)

            print storagerouter_param_map[sr]
            task_result = GeneralVPool.api.execute_post_action(
                component="storagerouters",
                guid=sr.guid,
                action="add_vpool",
                data={"call_parameters": storagerouter_param_map[sr]},
                wait=True,
                timeout=GeneralVPool.TIMEOUT_ADD_VPOOL,
            )
            if task_result[0] is not True:
                raise RuntimeError(
                    "vPool was not {0} successfully: {1}".format("extended" if index > 0 else "created", task_result[1])
                )

        vpool = GeneralVPool.get_vpool_by_name(vpool_name)
        if vpool is None:
            raise RuntimeError("vPool with name {0} could not be found in model".format(vpool_name))
        return vpool, storagerouter_param_map
    def validate_roles(collection):
        """

        :param collection: object containing the partition guid as key and the list of roles as value
        :type collection: Object

        :return: returns true or false based on the condition
        :type: boolean
        """
        # Start validation
        # Check if roles are the same as specified
        iterations = 0
        successful_iterations = 0
        logger.info("Starting validation of disk roles")
        for key, value in collection.iteritems():
            iterations += 1
            # Fetch partition matching key
            try:
                partition = GeneralDisk.get_disk_partition(key)
                # Check if roles are the same
                logger.info("Comparing roles on the partition '{0}'...".format(key))
                logger.info("Found '{0}' on partition and predefined roles: '{1}'".format(partition.roles, value))
                if sorted(partition.roles) == sorted(value):
                    successful_iterations += 1
                else:
                    logger.error("The role '{0}' for partition '{1}' was not set correctly!".format(value, key))
                    logger.error("Found '{0}' and expected '{1}'!".format(partition.roles, value))
            except ObjectNotFoundException:
                logger.info('Partition was removed meaning that all roles are deleted.')
                successful_iterations += 1
        return successful_iterations == iterations
예제 #9
0
    def filter_disks(disk_names, amount, disk_type):
        """
        Filter the available disks
        :param disk_names: Disks to filter
        :param amount: Amount to retrieve
        :param disk_type: Type of disk
        :return: Filtered disks
        """
        grid_ip = General.get_config().get('main', 'grid_ip')
        storagerouter = GeneralStorageRouter.get_storage_router_by_ip(ip=grid_ip)
        root_client = SSHClient(storagerouter, username='******')
        hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
        count = 0
        filtered_disks = list()

        if disk_type == 'SATA':
            list_to_check = hdds.values()
        elif disk_type == 'SSD':
            list_to_check = ssds.values()
        else:
            hdds.update(ssds)
            list_to_check = hdds.values()

        for disk_name in disk_names:
            for disk in list_to_check:
                if disk_name == disk['name']:
                    filtered_disks.append(disk['name'])
                    count += 1
            if count == amount:
                break

        return filtered_disks
    def set_roles_from_config(config, operation_type='SET'):
        """
        :param config: Configuration file containing all of the required information
        :type config: dict

        :param operation_type: type of action that will be executed (Example: 'APPEND' or 'SET')
        :type operation_type: str

        :return: Returns a object with the partition guid and its roles
        :type: Object
        """
        # Validate input
        if not (operation_type == 'APPEND' or operation_type == 'SET'):
            raise ValueError('The specified type is not supported. Use "APPEND" or "SET"')
        # End validate input

        logger.info("Starting {0} of disk roles".format(operation_type))
        collection = {}

        for key, value in config.iteritems():
            # key = ip address
            # value = disks

            for disk_info in value['disks']:
                roles_list = disk_info['roles']
                disk_name = disk_info['disk_name']
                logger.info("Fetching disk with diskname '{0}' for ip '{1}'".format(disk_name, key))
                disk = GeneralStorageRouter().get_disk_by_ip(disk_name, key)
                logger.info("Fetching or creating new partitions for disk '{0}'".format(disk.guid))
                partition = GeneralDisk.partition_disk(disk)
                new_roles_list = None
                if len(partition.roles) > 0:
                    original_roles = partition.roles
                    logger.info("Found roles '{0}' on partition '{1}'".format(original_roles, partition.guid))
                    new_roles_list = list(roles_list)
                    for role in original_roles:
                        if role not in new_roles_list:
                            new_roles_list.append(role)
                logger.info("Adding roles '{0}' to partition '{1}'".format(roles_list, partition.guid))
                collection[partition.guid] = roles_list
                if operation_type == "APPEND":
                    GeneralDisk.adjust_disk_role(partition, roles_list, 'APPEND')
                    if new_roles_list:
                        collection[partition.guid] = new_roles_list
                elif operation_type == "SET":
                    GeneralDisk.adjust_disk_role(partition, roles_list, 'SET')
        return collection
예제 #11
0
    def add_remove_distributed_vpool_test():
        """
        Create a vPool with 'distributed' BackendType and remove it
        Related ticket: http://jira.cloudfounders.com/browse/OVS-4050
        """
        # Verify if an unused disk is available to mount
        unused_disks = GeneralDisk.get_unused_disks()
        if len(unused_disks) == 0:
            logger.info('No available disks found to mount locally for the distributed backend')
            return

        # Raise if vPool already exists
        vpool_name = 'add-remove-distr-vpool'
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        if vpool is not None:
            raise RuntimeError('vPool with name "{0}" still exists'.format(vpool_name))

        unused_disk = unused_disks[0]
        if not unused_disk.startswith('/dev/'):
            raise ValueError('Unused disk must be absolute path')

        # Create a partition on the disk
        local_sr = GeneralStorageRouter.get_local_storagerouter()
        disk = GeneralDisk.get_disk_by_devicename(storagerouter=local_sr,
                                                  device_name=unused_disk)
        partition = GeneralDisk.partition_disk(disk=disk)

        # Mount the unused disk
        if partition.mountpoint is None:
            GeneralDisk.configure_disk(storagerouter=local_sr, disk=disk, offset=0, size=disk.size, roles=[], partition=partition)
            partition.discard()  # Re-initializes the object

        # Add vPool and validate health
        vpool, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'vpool_name': vpool_name,
                                                                       'type': 'distributed',
                                                                       'distributed_mountpoint': partition.mountpoint})
        assert vpool is not None, 'vPool {0} was not created'.format(vpool_name)
        GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params)

        # Retrieve vPool information before removal
        guid = vpool.guid
        name = vpool.name
        backend_type = vpool.backend_type.code
        files = GeneralVPool.get_related_files(vpool)
        directories = GeneralVPool.get_related_directories(vpool)
        storagerouters = [sd.storagerouter for sd in vpool.storagedrivers]

        # Remove vPool and validate removal
        GeneralVPool.remove_vpool(vpool=vpool)
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool is None, 'vPool {0} was not deleted'.format(vpool_name)
        GeneralVPool.check_vpool_cleanup(vpool_info={'guid': guid,
                                                     'name': name,
                                                     'type': backend_type,
                                                     'files': files,
                                                     'directories': directories},
                                         storagerouters=storagerouters)
        GeneralDisk.unpartition_disk(disk)
예제 #12
0
    def filter_disks(disk_names, amount, disk_type):
        """
        Filter the available disks
        :param disk_names: Disks to filter
        :param amount: Amount to retrieve
        :param disk_type: Type of disk
        :return: Filtered disks
        """
        node_ids = []
        list_of_available_disks = {}
        filtered_disks = {}
        disk_count = 0
        # disk_names = dictionary with node_ids as keys and values as a list of uninitialised disk names
        # {u'InA44YDJTKxFGvIKqD3CxYMlK7XxryZ0': [u'ata-TOSHIBA_MK2002TSKB_52Q2KSOTF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q3KR6TF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q2KSORF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q2KSOVF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q2KSOUF']}
        for node_id in disk_names.iterkeys():
            node_ids.append(node_id)
            list_of_available_disks[node_id] = []
            filtered_disks[node_id] = []
            alba_node = AlbaNodeList.get_albanode_by_node_id(node_id)
            storagerouter = GeneralStorageRouter.get_storage_router_by_ip(ip=alba_node.ip)
            root_client = SSHClient(storagerouter, username='******')
            hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
            if disk_type == 'SATA':
                for hdd in hdds.values():
                    # add it to list_of_available_disks only if it's found in the uninitialised list for that node
                    if hdd['name'] in disk_names[node_id]:
                        list_of_available_disks[node_id].append(hdd)
            if disk_type == 'SSD':
                for ssd in ssds.values():
                    # add it to list_of_available_disks only if it's found in the uninitialised list for that node
                    if ssd['name'] in disk_names[node_id]:
                        list_of_available_disks[node_id].append(ssd)
            disk_count += len(list_of_available_disks[node_id])

        count = 0
        # all disks might be on a single node so we are going with the check to max of what we need
        for disk_index in range(amount):
            for node_id in node_ids:
                # if we still need disks we will add all disks found at the count value index in the list_of_available_disks disk lists
                if count < amount:
                    if disk_index < len(list_of_available_disks[node_id]):
                        filtered_disks[node_id].append('/dev/disk/by-id/' + list_of_available_disks[node_id][disk_index]['name'])
                        count += 1
        # this should run through the whole list even if we haven't reached the amount of disks needed
        return filtered_disks
    def fdl_0002_add_remove_partition_with_role_and_crosscheck_model_test():
        """
        FDL-0002 - create/remove disk partition using full disk and verify ovs model
            - look for an unused disk
            - add a partition using full disk and assign a DB role to the partition
            - validate ovs model is correctly updated with DB role
            - cleanup that partition
            - verify ovs model is correctly updated
        """
        if TestFlexibleDiskLayout.continue_testing.state is False:
            logger.info('Test suite signaled to stop')
            return

        my_sr = GeneralStorageRouter.get_local_storagerouter()

        unused_disks = GeneralDisk.get_unused_disks()
        if not unused_disks:
            logger.info("At least one unused disk should be available for partition testing")
            return

        hdds = dict()
        ssds = dict()
        mdisks = GeneralDisk.get_disks()
        for disk in mdisks:
            if disk.storagerouter_guid == my_sr.guid:
                if disk.is_ssd:
                    ssds['/dev/' + disk.name] = disk
                else:
                    hdds['/dev/' + disk.name] = disk

        all_disks = dict(ssds)
        all_disks.update(hdds)

        # check no partitions are modelled for unused disks
        partitions = GeneralDisk.get_disk_partitions()
        partitions_detected = False
        disk_guid = ''
        for path in unused_disks:
            # @TODO: remove the if when ticket OVS-4503 is solved
            if path in all_disks:
                disk_guid = all_disks[path].guid
                for partition in partitions:
                    if partition.disk_guid == disk_guid:
                        partitions_detected = True
        assert partitions_detected is False, 'Existing partitions detected on unused disks!'

        # try partition a disk using it's full reported size
        disk = all_disks[unused_disks[0]]
        GeneralDisk.configure_disk(storagerouter=my_sr,
                                   disk=disk,
                                   offset=0,
                                   size=int(disk.size),
                                   roles=['WRITE'])

        # lookup partition in model
        mountpoint = None
        partitions = GeneralDisk.get_disk_partitions()
        for partition in partitions:
            if partition.disk_guid == disk.guid and 'WRITE' in partition.roles:
                mountpoint = partition.mountpoint
                break

        GeneralDisk.configure_disk(storagerouter=my_sr,
                                   disk=disk,
                                   offset=0,
                                   partition=partition,
                                   size=int(disk.size),
                                   roles=[])

        # cleanup disk partition
        cmd = 'umount {0}; rmdir {0}; echo 0'.format(mountpoint)
        General.execute_command_on_node(my_sr.ip, cmd, allow_insecure=True)

        cmd = ['parted', '-s', '/dev/' + disk.name, 'rm', '1']
        General.execute_command_on_node(my_sr.ip, cmd, allow_nonzero=True)

        # wipe partition table to be able to reuse this disk in another test
        GeneralVDisk.write_to_volume(location=disk.aliases[0],
                                     count=64,
                                     bs='1M',
                                     input_type='zero')

        GeneralStorageRouter.sync_with_reality()

        # verify partition no longer exists in ovs model
        is_partition_removed = True
        partitions = GeneralDisk.get_disk_partitions()
        for partition in partitions:
            if partition.disk_guid == disk_guid and 'WRITE' in partition.roles:
                is_partition_removed = False
                break

        assert is_partition_removed is True,\
            'New partition was not deleted successfully from system/model!'

        assert mountpoint, 'New partition was not detected in model'
 def get_first_unused_disk():
     disks = GeneralDisk.get_unused_disks()
     if disks[0]:
         return TestDiskRoles.extract_disk_name(disks[0])
     else:
         raise ValueError('No available disks, setup is invalid for testing!')