def unpartition_disk(disk, partitions=None, wait=True):
        """
        Return disk to RAW state
        :param disk: Disk DAL object
        :param partitions: Partitions DAL object list
        :return: None
        """
        if partitions is None:
            partitions = disk.partitions
        else:
            for partition in partitions:
                if partition not in disk.partitions:
                    raise RuntimeError('Partition {0} does not belong to disk {1}'.format(partition.mountpoint, disk.name))
        if len(disk.partitions) == 0:
            return

        root_client = SSHClient(disk.storagerouter, username='******')
        for partition in partitions:
            General.unmount_partition(root_client, partition)
        root_client.run(['parted', '-s', '/dev/' + disk.name, 'mklabel', 'gpt'])
        GeneralStorageRouter.sync_with_reality(disk.storagerouter)
        counter = 0
        timeout = 60
        while counter < timeout:
            time.sleep(1)
            disk = GeneralDisk.get_disk(guid=disk.guid)
            if len(disk.partitions) == 0:
                break
            counter += 1
        if counter == timeout:
            raise RuntimeError('Removing partitions failed for disk:\n {0} '.format(disk.name))
Example #2
0
def setup():
    """
    Setup for Arakoon package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, 'Please fill out a backend name in the autotest.cfg file'
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is True:
            GeneralService.stop_service(name='ovs-scheduled-tasks',
                                        client=root_client)

    storagerouters = GeneralStorageRouter.get_storage_routers()
    for sr in storagerouters:
        root_client = SSHClient(sr, username='******')
        GeneralDisk.add_db_role(sr)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    GeneralAlba.add_alba_backend(backend_name)
    GeneralArakoon.voldrv_arakoon_checkup()
def _get_env_info():
    """
    Retrieve number of env nodes and the last two ip digits to add to the testrail title
    """
    number_of_nodes = len(GeneralStorageRouter.get_storage_routers())
    split_ip = GeneralStorageRouter.get_local_storagerouter().ip.split('.')
    return str(number_of_nodes) + 'N-' + split_ip[2] + '.' + split_ip[3]
    def prepare_alba_backend(name=None):
        """
        Create an ALBA backend and claim disks
        :param name: Name for the backend
        :return: None
        """
        # @TODO: Fix this, because backend_type should not be configurable if you always create an ALBA backend
        # @TODO 2: Get rid of these asserts, any test (or testsuite) should verify the required params first before starting execution
        autotest_config = General.get_config()
        if name is None:
            name = autotest_config.get('backend', 'name')
        nr_of_disks_to_claim = autotest_config.getint('backend', 'nr_of_disks_to_claim')
        type_of_disks_to_claim = autotest_config.get('backend', 'type_of_disks_to_claim')
        assert name,\
            "Please fill out a valid backend name in autotest.cfg file"

        storage_routers = GeneralStorageRouter.get_storage_routers()
        for sr in storage_routers:
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles='DB') is False:
                GeneralDisk.add_db_role(sr)
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles=['SCRUB', 'WRITE']) is False:
                GeneralDisk.add_write_scrub_roles(sr)
        backend = GeneralBackend.get_by_name(name)
        if not backend:
            alba_backend = GeneralAlba.add_alba_backend(name)
        else:
            alba_backend = backend.alba_backend
        GeneralAlba.claim_asds(alba_backend, nr_of_disks_to_claim, type_of_disks_to_claim)
        if GeneralAlba.has_preset(alba_backend=alba_backend,
                                  preset_name=GeneralAlba.ONE_DISK_PRESET) is False:
            GeneralAlba.add_preset(alba_backend=alba_backend,
                                   name=GeneralAlba.ONE_DISK_PRESET,
                                   policies=[[1, 1, 1, 2]])
Example #5
0
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, "Please fill out a valid backend name in autotest.cfg file"

    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)
    def fdl_0001_match_model_with_reality_test():
        """
        FDL-0001 - disks in ovs model should match actual physical disk configuration
        """
        if TestFlexibleDiskLayout.continue_testing.state is False:
            logger.info('Test suite signaled to stop')
            return
        GeneralStorageRouter.sync_with_reality()

        physical_disks = dict()
        modelled_disks = dict()
        loops = dict()

        storagerouters = GeneralStorageRouter.get_storage_routers()
        for storagerouter in storagerouters:
            root_client = SSHClient(storagerouter, username='******')
            hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
            physical_disks[storagerouter.guid] = hdds
            physical_disks[storagerouter.guid].update(ssds)
            loop_devices = General.get_loop_devices(client=root_client)
            loops[storagerouter.guid] = loop_devices

        disks = GeneralDisk.get_disks()
        for disk in disks:
            if disk.storagerouter_guid not in modelled_disks:
                modelled_disks[disk.storagerouter_guid] = dict()
            if disk.name not in loops[disk.storagerouter_guid]:
                modelled_disks[disk.storagerouter_guid][disk.name] = {'is_ssd': disk.is_ssd}

        logger.info('PDISKS: {0}'.format(physical_disks))
        logger.info('MDISKS: {0}'.format(modelled_disks))

        assert len(modelled_disks.keys()) == len(physical_disks.keys()),\
            "Nr of modelled/physical disks is NOT equal!:\n PDISKS: {0}\nMDISKS: {1}".format(modelled_disks,
                                                                                             physical_disks)

        for guid in physical_disks.keys():
            assert len(physical_disks[guid]) == len(modelled_disks[guid]),\
                "Nr of modelled/physical disks differs for storagerouter {0}:\n{1}\n{2}".format(guid,
                                                                                                physical_disks[guid],
                                                                                                modelled_disks[guid])

        # basic check on hdd/ssd
        for guid in physical_disks.keys():
            mdisks = modelled_disks[guid]
            pdisks = physical_disks[guid]
            for key in mdisks.iterkeys():
                assert mdisks[key]['is_ssd'] == pdisks[key]['is_ssd'],\
                    "Disk incorrectly modelled for storagerouter {0}\n,mdisk:{1}\n,pdisk:{2}".format(guid,
                                                                                                     mdisks[key],
                                                                                                     pdisks[key])
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'backend': ['name']})
    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)

    alba_backend_name = General.get_config().get('backend', 'name')
    alba_backend = GeneralAlba.get_by_name(alba_backend_name)
    if alba_backend is None:
        GeneralAlba.add_alba_backend(alba_backend_name)
    def add_vpool(vpool_parameters=None, storagerouters=None):
        """
        Create a vPool based on the kwargs provided or default parameters found in the autotest.cfg
        :param vpool_parameters: Parameters to be used for vPool creation
        :type vpool_parameters: dict

        :param storagerouters: Guids of the Storage Routers on which to create and extend this vPool
        :type storagerouters: list

        :return: Created or extended vPool
        :rtype: VPool
        """
        if storagerouters is None:
            storagerouters = list(GeneralStorageRouter.get_storage_routers())
        if vpool_parameters is None:
            vpool_parameters = {}
        if not isinstance(storagerouters, list) or len(storagerouters) == 0:
            raise ValueError("Storage Routers should be a list and contain at least 1 element to add a vPool on")

        vpool_name = None
        storagerouter_param_map = dict(
            (sr, GeneralVPool.get_add_vpool_params(storagerouter=sr, **vpool_parameters)) for sr in storagerouters
        )
        for index, sr in enumerate(storagerouters):
            vpool_name = storagerouter_param_map[sr]["vpool_name"]
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles="DB") is False and sr.node_type == "MASTER":
                GeneralDisk.add_db_role(sr)
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles=["SCRUB", "WRITE"]) is False:
                GeneralDisk.add_write_scrub_roles(sr)

            print storagerouter_param_map[sr]
            task_result = GeneralVPool.api.execute_post_action(
                component="storagerouters",
                guid=sr.guid,
                action="add_vpool",
                data={"call_parameters": storagerouter_param_map[sr]},
                wait=True,
                timeout=GeneralVPool.TIMEOUT_ADD_VPOOL,
            )
            if task_result[0] is not True:
                raise RuntimeError(
                    "vPool was not {0} successfully: {1}".format("extended" if index > 0 else "created", task_result[1])
                )

        vpool = GeneralVPool.get_vpool_by_name(vpool_name)
        if vpool is None:
            raise RuntimeError("vPool with name {0} could not be found in model".format(vpool_name))
        return vpool, storagerouter_param_map
Example #9
0
def teardown():
    """
    Teardown for Arakoon package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is False:
            GeneralService.start_service(name='ovs-scheduled-tasks',
                                         client=root_client)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    for key in KEY_CLEANUP:
        if EtcdConfiguration.exists('{0}/{1}'.format(GeneralArakoon.ETCD_CONFIG_ROOT, key), raw = True):
            EtcdConfiguration.delete('{0}/{1}'.format(GeneralArakoon.ETCD_CONFIG_ROOT, key))
    def filter_disks(disk_names, amount, disk_type):
        """
        Filter the available disks
        :param disk_names: Disks to filter
        :param amount: Amount to retrieve
        :param disk_type: Type of disk
        :return: Filtered disks
        """
        grid_ip = General.get_config().get('main', 'grid_ip')
        storagerouter = GeneralStorageRouter.get_storage_router_by_ip(ip=grid_ip)
        root_client = SSHClient(storagerouter, username='******')
        hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
        count = 0
        filtered_disks = list()

        if disk_type == 'SATA':
            list_to_check = hdds.values()
        elif disk_type == 'SSD':
            list_to_check = ssds.values()
        else:
            hdds.update(ssds)
            list_to_check = hdds.values()

        for disk_name in disk_names:
            for disk in list_to_check:
                if disk_name == disk['name']:
                    filtered_disks.append(disk['name'])
                    count += 1
            if count == amount:
                break

        return filtered_disks
Example #11
0
def setup():
    """
    Setup for Arakoon package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is True:
            GeneralService.stop_service(name='ovs-scheduled-tasks',
                                        client=root_client)

    for sr in GeneralStorageRouter.get_storage_routers():
        root_client = SSHClient(sr, username='******')
        for location in TEST_CLEANUP:
            root_client.run(['rm', '-rf', location])
    def ovs_2263_verify_alba_namespace_cleanup_test():
        """
        Verify ALBA namespace cleanup
        Create an amount of namespaces in ALBA
        Create a vPool and create some volumes
        Verify the amount of namespaces before and after vPool creation
        Remove the vPool and the manually created namespaces
        Verify the amount of namespaces before and after vPool deletion
        """

        # Create some namespaces in alba
        no_namespaces = 3
        backend_name = General.get_config().get('backend', 'name')
        backend = GeneralBackend.get_by_name(name=backend_name)
        namespace_name = 'autotest-ns_'
        namespace_name_regex = re.compile('^autotest-ns_\d$')
        for nmspc_index in range(no_namespaces):
            GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'create-namespace', ['{0}{1}'.format(namespace_name, nmspc_index), 'default'], False)
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))

        # Create a vPool and create volumes on it
        vpool, _ = GeneralVPool.add_vpool()
        root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******')
        if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE':
            GeneralVPool.mount_vpool(vpool=vpool,
                                     root_client=root_client)

        vdisks = []
        for disk_index in range(no_namespaces):
            vdisks.append(GeneralVDisk.create_volume(size=10,
                                                     vpool=vpool,
                                                     root_client=root_client))
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend)
        assert len(result) == 2 * no_namespaces + 1, "Expected {0} namespaces present on the {1} backend, found {2}".format(2 * no_namespaces + 1, backend_name, len(result))

        # Remove files and vPool
        for vdisk in vdisks:
            GeneralVDisk.delete_volume(vdisk=vdisk,
                                       vpool=vpool,
                                       root_client=root_client)

        if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE':
            GeneralVPool.unmount_vpool(vpool=vpool,
                                       root_client=root_client)

        GeneralVPool.remove_vpool(vpool)

        # Verify amount of namespaces
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
        for namespace in result:
            GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'delete-namespace', [namespace['name']], False)
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == 0, "Expected no namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
    def add_remove_distributed_vpool_test():
        """
        Create a vPool with 'distributed' BackendType and remove it
        Related ticket: http://jira.cloudfounders.com/browse/OVS-4050
        """
        # Verify if an unused disk is available to mount
        unused_disks = GeneralDisk.get_unused_disks()
        if len(unused_disks) == 0:
            logger.info('No available disks found to mount locally for the distributed backend')
            return

        # Raise if vPool already exists
        vpool_name = 'add-remove-distr-vpool'
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        if vpool is not None:
            raise RuntimeError('vPool with name "{0}" still exists'.format(vpool_name))

        unused_disk = unused_disks[0]
        if not unused_disk.startswith('/dev/'):
            raise ValueError('Unused disk must be absolute path')

        # Create a partition on the disk
        local_sr = GeneralStorageRouter.get_local_storagerouter()
        disk = GeneralDisk.get_disk_by_devicename(storagerouter=local_sr,
                                                  device_name=unused_disk)
        partition = GeneralDisk.partition_disk(disk=disk)

        # Mount the unused disk
        if partition.mountpoint is None:
            GeneralDisk.configure_disk(storagerouter=local_sr, disk=disk, offset=0, size=disk.size, roles=[], partition=partition)
            partition.discard()  # Re-initializes the object

        # Add vPool and validate health
        vpool, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'vpool_name': vpool_name,
                                                                       'type': 'distributed',
                                                                       'distributed_mountpoint': partition.mountpoint})
        assert vpool is not None, 'vPool {0} was not created'.format(vpool_name)
        GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params)

        # Retrieve vPool information before removal
        guid = vpool.guid
        name = vpool.name
        backend_type = vpool.backend_type.code
        files = GeneralVPool.get_related_files(vpool)
        directories = GeneralVPool.get_related_directories(vpool)
        storagerouters = [sd.storagerouter for sd in vpool.storagedrivers]

        # Remove vPool and validate removal
        GeneralVPool.remove_vpool(vpool=vpool)
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool is None, 'vPool {0} was not deleted'.format(vpool_name)
        GeneralVPool.check_vpool_cleanup(vpool_info={'guid': guid,
                                                     'name': name,
                                                     'type': backend_type,
                                                     'files': files,
                                                     'directories': directories},
                                         storagerouters=storagerouters)
        GeneralDisk.unpartition_disk(disk)
    def json_files_check_test():
        """
        Verify some configuration files in json format
        """
        issues_found = ''

        srs = GeneralStorageRouter.get_storage_routers()
        for sr in srs:
            config_contents = EtcdConfiguration.get('/ovs/framework/hosts/{0}/setupcompleted'.format(sr.machine_id), raw = True)
            if "true" not in config_contents:
                issues_found += "Setup not completed for node {0}\n".format(sr.name)

        assert issues_found == '', "Found the following issues while checking for the setupcompleted:{0}\n".format(issues_found)
    def services_check_test():
        """
        Verify some services
        """
        env_ips = GeneralStorageRouter.get_all_ips()
        non_running_services = []

        for env_ip in env_ips:
            non_running_services = GeneralSystem.list_non_running_ovs_services(env_ip)
            if len(non_running_services):
                non_running_services.append([env_ip, non_running_services])

        assert len(non_running_services) == 0, "Found non running services on {0}".format(non_running_services)
    def validate_alba_backend_removal(alba_backend_info):
        """
        Validate whether the backend has been deleted properly
        alba_backend_info should be a dictionary containing:
            - guid
            - name
            - maintenance_service_names
        :param alba_backend_info: Information about the backend
        :return: None
        """
        Toolbox.verify_required_params(actual_params=alba_backend_info,
                                       required_params={'name': (str, None),
                                                        'guid': (str, Toolbox.regex_guid),
                                                        'maintenance_service_names': (list, None)},
                                       exact_match=True)

        alba_backend_guid = alba_backend_info['guid']
        alba_backend_name = alba_backend_info['name']
        backend = GeneralBackend.get_by_name(alba_backend_name)
        assert backend is None,\
            'Still found a backend in the model with name {0}'.format(alba_backend_name)

        # Validate services removed from model
        for service in GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.ALBA_MGR):
            assert service.name != '{0}-abm'.format(alba_backend_name),\
                'An AlbaManager service has been found with name {0}'.format(alba_backend_name)
        for service in GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.NS_MGR):
            assert service.name.startswith('{0}-nsm_'.format(alba_backend_name)) is False,\
                'An NamespaceManager service has been found with name {0}'.format(alba_backend_name)

        # Validate ALBA backend configuration structure
        alba_backend_key = '/ovs/alba/backends'
        actual_configuration_keys = [key for key in Configuration.list(alba_backend_key)]
        assert alba_backend_guid not in actual_configuration_keys,\
            'Configuration still contains an entry in {0} with guid {1}'.format(alba_backend_key, alba_backend_guid)

        # Validate Arakoon configuration structure
        arakoon_keys = [key for key in Configuration.list('/ovs/arakoon') if key.startswith(alba_backend_name)]
        assert len(arakoon_keys) == 0,\
            'Configuration still contains configurations for clusters: {0}'.format(', '.join(arakoon_keys))

        # Validate services
        for storagerouter in GeneralStorageRouter.get_storage_routers():
            root_client = SSHClient(endpoint=storagerouter, username='******')
            maintenance_services = alba_backend_info['maintenance_service_names']
            abm_arakoon_service_name = 'ovs-arakoon-{0}-abm'.format(alba_backend_name)
            nsm_arakoon_service_name = 'ovs-arakoon-{0}-nsm_0'.format(alba_backend_name)
            for service_name in [abm_arakoon_service_name, nsm_arakoon_service_name] + maintenance_services:
                assert GeneralService.has_service(name=service_name, client=root_client) is False,\
                    'Service {0} still deployed on Storage Router {1}'.format(service_name, storagerouter.name)
    def ovs_2493_detect_could_not_acquire_lock_events_test():
        """
        Verify lock errors
        """
        errorlist = ""
        command = "grep -C 1 'Could not acquire lock' /var/log/ovs/lib.log"
        gridips = GeneralStorageRouter.get_all_ips()

        for gridip in gridips:
            out = General.execute_command_on_node(gridip, command + " | wc -l", allow_insecure=True)
            if not out == '0':
                errorlist += "node %s \n:{0}\n\n".format(General.execute_command_on_node(gridip, command).splitlines()) % gridip

        assert len(errorlist) == 0, "Lock errors detected in lib logs on \n" + errorlist
    def filter_disks(disk_names, amount, disk_type):
        """
        Filter the available disks
        :param disk_names: Disks to filter
        :param amount: Amount to retrieve
        :param disk_type: Type of disk
        :return: Filtered disks
        """
        node_ids = []
        list_of_available_disks = {}
        filtered_disks = {}
        disk_count = 0
        # disk_names = dictionary with node_ids as keys and values as a list of uninitialised disk names
        # {u'InA44YDJTKxFGvIKqD3CxYMlK7XxryZ0': [u'ata-TOSHIBA_MK2002TSKB_52Q2KSOTF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q3KR6TF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q2KSORF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q2KSOVF',
        #                                        u'ata-TOSHIBA_MK2002TSKB_52Q2KSOUF']}
        for node_id in disk_names.iterkeys():
            node_ids.append(node_id)
            list_of_available_disks[node_id] = []
            filtered_disks[node_id] = []
            alba_node = AlbaNodeList.get_albanode_by_node_id(node_id)
            storagerouter = GeneralStorageRouter.get_storage_router_by_ip(ip=alba_node.ip)
            root_client = SSHClient(storagerouter, username='******')
            hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
            if disk_type == 'SATA':
                for hdd in hdds.values():
                    # add it to list_of_available_disks only if it's found in the uninitialised list for that node
                    if hdd['name'] in disk_names[node_id]:
                        list_of_available_disks[node_id].append(hdd)
            if disk_type == 'SSD':
                for ssd in ssds.values():
                    # add it to list_of_available_disks only if it's found in the uninitialised list for that node
                    if ssd['name'] in disk_names[node_id]:
                        list_of_available_disks[node_id].append(ssd)
            disk_count += len(list_of_available_disks[node_id])

        count = 0
        # all disks might be on a single node so we are going with the check to max of what we need
        for disk_index in range(amount):
            for node_id in node_ids:
                # if we still need disks we will add all disks found at the count value index in the list_of_available_disks disk lists
                if count < amount:
                    if disk_index < len(list_of_available_disks[node_id]):
                        filtered_disks[node_id].append('/dev/disk/by-id/' + list_of_available_disks[node_id][disk_index]['name'])
                        count += 1
        # this should run through the whole list even if we haven't reached the amount of disks needed
        return filtered_disks
    def ssh_check_test():
        """
        Verify SSH keys
        """
        issues_found = []
        env_ips = GeneralStorageRouter.get_all_ips()
        for env_ip_from in env_ips:
            out = General.execute_command_on_node(env_ip_from, "cat ~/.ssh/known_hosts", allow_insecure=True)
            for env_ip_to in env_ips:
                if env_ip_from != env_ip_to:
                    if env_ip_to not in out:
                        issues_found.append('Host key verification not found between {0} and {1}'.format(env_ip_from,
                                                                                                         env_ip_to))

        assert len(issues_found) == 0, 'Following issues were found:\n - {0}'.format('\n - '.join(issues_found))
    def ovs_3671_validate_archiving_of_existing_arakoon_data_on_create_test():
        """
        Validate arakoon archiving on extending a cluster with already existing data
        """
        first_sr = GeneralStorageRouter.get_storage_routers()[0]

        cluster_name = 'OVS_3671-single-node-cluster'
        cluster_basedir = '/var/tmp'

        root_client = SSHClient(first_sr, username='******')
        for directory in ['/'.join([cluster_basedir, 'arakoon']), '/var/log/arakoon']:
            root_client.dir_create(os.path.dirname(directory))
            root_client.dir_chmod(os.path.dirname(directory), 0755, recursive=True)
            root_client.dir_chown(os.path.dirname(directory), 'ovs', 'ovs', recursive=True)

        files_to_create = ['/'.join([cluster_basedir, 'arakoon', cluster_name, 'db', 'one.db']),
                           '/'.join([cluster_basedir, 'arakoon', cluster_name, 'tlogs', 'one.tlog'])]

        client = SSHClient(first_sr, username='******')
        for filename in files_to_create:
            client.dir_create(os.path.dirname(filename))
            client.dir_chmod(os.path.dirname(filename), 0755, recursive=True)
            client.dir_chown(os.path.dirname(filename), 'ovs', 'ovs', recursive=True)

        client.file_create(files_to_create)
        for filename in files_to_create:
            assert client.file_exists(filename) is True, 'File {0} not present'.format(filename)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('setup and validate single node cluster')
        create_info = ArakoonInstaller.create_cluster(cluster_name, ServiceType.ARAKOON_CLUSTER_TYPES.FWK, first_sr.ip,
                                                      cluster_basedir, filesystem=False)
        TestArakoon.logger.info('create_info: \n{0}'.format(create_info))
        ArakoonInstaller.start_cluster(cluster_name, first_sr.ip, False)
        ArakoonInstaller.claim_cluster(cluster_name, first_sr, False, metadata=create_info['metadata'])
        TestArakoon.validate_arakoon_config_files([first_sr], cluster_name)
        TestArakoon.verify_arakoon_structure(root_client, cluster_name, True, True)
        for filename in files_to_create:
            assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('remove cluster')
        ArakoonInstaller.delete_cluster(cluster_name, first_sr.ip, False)
        for filename in files_to_create:
            assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)
        TestArakoon.verify_arakoon_structure(root_client, cluster_name, False, False)
    def ovs_2703_kill_various_services_test():
        """
        Kill various services and see if they recover
        """

        # @TODO 1: This test does not belong in the vPool tests, its a service test which happens to create a vPool
        # @TODO 2: Make test smarter to test all required services on all node types
        vpool = GeneralVPool.get_vpool_by_name(General.get_config().get('vpool', 'name'))
        if vpool is None:
            vpool, _ = GeneralVPool.add_vpool()

        errors = []
        root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******')
        for service_name in GeneralService.get_all_service_templates():
            if GeneralService.has_service(name=service_name,
                                          client=root_client) is False:
                continue

            if GeneralService.get_service_status(name=service_name,
                                                 client=root_client) is False:
                errors.append('Service {0} not found in running state'.format(service_name))
                continue

            pid_before = GeneralService.get_service_pid(name=service_name,
                                                        client=root_client)
            if pid_before == -1:
                errors.append('Service {0} has unknown PID before being killed'.format(service_name))
                continue
            GeneralService.kill_service(name=service_name,
                                        client=root_client)
            time.sleep(5)
            if GeneralService.get_service_status(name=service_name,
                                                 client=root_client) is False:
                errors.append('Service {0} not found in running state after killing it'.format(service_name))
                continue
            pid_after = GeneralService.get_service_pid(name=service_name,
                                                       client=root_client)
            if pid_after == -1:
                errors.append('Service {0} has unknown PID after being killed'.format(service_name))
                continue
            if pid_before == pid_after:
                errors.append('Kill command did not work on service {0}'.format(service_name))

        GeneralVPool.remove_vpool(vpool)

        assert len(errors) == 0, "Following issues were found with the services:\n - {0}".format('\n - '.join(errors))
    def ovs_4509_validate_arakoon_collapse_test():
        """
        Validate arakoon collapse
        """
        node_ips = [sr.ip for sr in GeneralStorageRouter.get_storage_routers()]
        node_ips.sort()
        for node_ip in node_ips:
            root_client = SSHClient(node_ip, username='******')
            arakoon_clusters = []
            for service in ServiceList.get_services():
                if service.is_internal is True and service.storagerouter.ip == node_ip and \
                    service.type.name in (ServiceType.SERVICE_TYPES.ARAKOON,
                                          ServiceType.SERVICE_TYPES.NS_MGR,
                                          ServiceType.SERVICE_TYPES.ALBA_MGR):
                    arakoon_clusters.append(service.name.replace('arakoon-', ''))

            for arakoon_cluster in arakoon_clusters:
                arakoon_config_path = Configuration.get_configuration_path('/ovs/arakoon/{0}/config'.format(arakoon_cluster))
                tlog_location = '/opt/OpenvStorage/db/arakoon/{0}/tlogs'.format(arakoon_cluster)

                # read_tlog_dir
                with remote(node_ip, [Configuration]) as rem:
                    config_contents = rem.Configuration.get('/ovs/arakoon/{0}/config'.format(arakoon_cluster), raw=True)
                for line in config_contents.splitlines():
                    if 'tlog_dir' in line:
                        tlog_location = line.split()[-1]

                nr_of_tlogs = TestArakoon.get_nr_of_tlogs_in_folder(root_client, tlog_location)
                old_headdb_timestamp = 0
                if root_client.file_exists('/'.join([tlog_location, 'head.db'])):
                    old_headdb_timestamp = root_client.run(['stat', '--format=%Y', tlog_location + '/head.db'])
                if nr_of_tlogs <= 2:
                    benchmark_command = ['arakoon', '--benchmark', '-n_clients', '1', '-max_n', '5_000', '-config', arakoon_config_path]
                    root_client.run(benchmark_command)

                GenericController.collapse_arakoon()

                nr_of_tlogs = TestArakoon.get_nr_of_tlogs_in_folder(root_client, tlog_location)
                new_headdb_timestamp = root_client.run(['stat', '--format=%Y', tlog_location + '/head.db'])
                assert nr_of_tlogs <= 2,\
                    'Arakoon collapse left {0} tlogs on the environment, expecting less than 2'.format(nr_of_tlogs)
                assert old_headdb_timestamp != new_headdb_timestamp,\
                    'Timestamp of the head_db file was not changed in the process of collapsing tlogs'
Example #23
0
def teardown():
    """
    Teardown for Arakoon package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is False:
            GeneralService.start_service(name='ovs-scheduled-tasks',
                                         client=root_client)

        for location in TEST_CLEANUP:
            root_client.run(['rm', '-rf', location])

    for key in KEY_CLEANUP:
        if Configuration.exists('{0}/{1}'.format(GeneralArakoon.CONFIG_ROOT, key), raw=True):
            Configuration.delete('{0}/{1}'.format(GeneralArakoon.CONFIG_ROOT, key))
    def add_vpool(vpool_parameters=None, storagerouters=None):
        """
        Create a vPool based on the kwargs provided or default parameters found in the autotest.cfg
        :param vpool_parameters: Parameters to be used for vPool creation
        :type vpool_parameters: dict

        :param storagerouters: Guids of the Storage Routers on which to create and extend this vPool
        :type storagerouters: list

        :return: Created or extended vPool
        :rtype: VPool
        """
        if storagerouters is None:
            storagerouters = [GeneralStorageRouter.get_local_storagerouter()]
        if vpool_parameters is None:
            vpool_parameters = {}
        if not isinstance(storagerouters, list) or len(storagerouters) == 0:
            raise ValueError('Storage Routers should be a list and contain at least 1 element to add a vPool on')

        vpool_name = None
        storagerouter_param_map = dict((sr, GeneralVPool.get_add_vpool_params(storagerouter=sr, **vpool_parameters)) for sr in storagerouters)
        for index, sr in enumerate(storagerouters):
            vpool_name = storagerouter_param_map[sr]['vpool_name']
            task_result = GeneralVPool.api.execute_post_action(component='storagerouters',
                                                               guid=sr.guid,
                                                               action='add_vpool',
                                                               data={'call_parameters': storagerouter_param_map[sr]},
                                                               wait=True,
                                                               timeout=500)
            if task_result[0] is not True:
                raise RuntimeError('vPool was not {0} successfully: {1}'.format('extended' if index > 0 else 'created', task_result[1]))

        vpool = GeneralVPool.get_vpool_by_name(vpool_name)
        if vpool is None:
            raise RuntimeError('vPool with name {0} could not be found in model'.format(vpool_name))
        return vpool, storagerouter_param_map
 def ar_0002_arakoon_cluster_validation_test():
     """
     Arakoon cluster validation
     """
     TestArakoon.validate_arakoon_config_files(GeneralStorageRouter.get_storage_routers())
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.

"""
A general class dedicated to OpenStack and DevStack logic
"""

import os
from ci.tests.general.general import General
from ci.tests.general.general_storagerouter import GeneralStorageRouter


# Setup environment
os.environ["OS_USERNAME"] = "******"
os.environ["OS_PASSWORD"] = "******"
os.environ["OS_TENANT_NAME"] = "admin"
os.environ["OS_AUTH_URL"] = "http://{0}:35357/v2.0".format(GeneralStorageRouter.get_local_storagerouter().ip)


class GeneralOpenStack(object):
    """
    A general class dedicated to OpenStack and DevStack logic
    """
    @staticmethod
    def is_openstack_present():
        """
        Check if OpenStack is installed
        :return: Return True if OpenStack is installed
        """
        return bool(General.execute_command("ps aux | awk '/keystone/ && !/awk/'")[0])
    def check_vpool_cleanup(vpool_info, storagerouters=None):
        """
        Check if everything related to a vPool has been cleaned up on the storagerouters provided
        vpool_info should be a dictionary containing:
            - type
            - guid
            - files
            - directories
            - name (optional)
            - vpool (optional)
            If vpool is provided:
                - storagerouters need to be provided, because on these Storage Routers, we check whether the vPool has been cleaned up
            If name is provided:
                - If storagerouters is NOT provided, all Storage Routers will be checked for a correct vPool removal
                - If storagerouters is provided, only these Storage Routers will be checked for a correct vPool removal

        :param vpool_info: Information about the vPool
        :param storagerouters: Storage Routers to check if vPool has been cleaned up
        :return: None
        """
        for required_param in ['type', 'guid', 'files', 'directories']:
            if required_param not in vpool_info:
                raise ValueError('Incorrect vpool_info provided')
        if 'vpool' in vpool_info and 'name' in vpool_info:
            raise ValueError('vpool and name are mutually exclusive')
        if 'vpool' not in vpool_info and 'name' not in vpool_info:
            raise ValueError('Either vpool or vpool_name needs to be provided')

        vpool = vpool_info.get('vpool')
        vpool_name = vpool_info.get('name')
        vpool_guid = vpool_info['guid']
        vpool_type = vpool_info['type']
        files = vpool_info['files']
        directories = vpool_info['directories']

        supported_backend_types = GeneralBackend.get_valid_backendtypes()
        if vpool_type not in supported_backend_types:
            raise ValueError('Unsupported Backend Type provided. Please choose from: {0}'.format(', '.join(supported_backend_types)))
        if storagerouters is None:
            storagerouters = GeneralStorageRouter.get_storage_routers()

        if vpool_name is not None:
            assert GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) is None, 'A vPool with name {0} still exists'.format(vpool_name)

        # Prepare some fields to check
        vpool_name = vpool.name if vpool else vpool_name
        vpool_services = ['ovs-dtl_{0}'.format(vpool_name),
                          'ovs-volumedriver_{0}'.format(vpool_name)]
        if vpool_type == 'alba':
            vpool_services.append('ovs-albaproxy_{0}'.format(vpool_name))

        # Check etcd
        if vpool is None:
            assert EtcdConfiguration.exists('/ovs/vpools/{0}'.format(vpool_guid), raw=True) is False, 'vPool config still found in etcd'
        else:
            remaining_sd_ids = set([storagedriver.storagedriver_id for storagedriver in vpool.storagedrivers])
            current_sd_ids = set([item for item in EtcdConfiguration.list('/ovs/vpools/{0}/hosts'.format(vpool_guid))])
            assert not remaining_sd_ids.difference(current_sd_ids), 'There are more storagedrivers modelled than present in etcd'
            assert not current_sd_ids.difference(remaining_sd_ids), 'There are more storagedrivers in etcd than present in model'

        # Perform checks on all storagerouters where vpool was removed
        for storagerouter in storagerouters:
            # Check management center
            mgmt_center = GeneralManagementCenter.get_mgmt_center(pmachine=storagerouter.pmachine)
            if mgmt_center is not None:
                assert GeneralManagementCenter.is_host_configured(pmachine=storagerouter.pmachine) is False, 'Management Center is still configured on Storage Router {0}'.format(storagerouter.ip)

            # Check MDS services
            mds_services = GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.MD_SERVER)
            assert len([mds_service for mds_service in mds_services if mds_service.storagerouter_guid == storagerouter.guid]) == 0, 'There are still MDS services present for Storage Router {0}'.format(storagerouter.ip)

            # Check services
            root_client = SSHClient(storagerouter, username='******')
            for service in vpool_services:
                if ServiceManager.has_service(service, client=root_client):
                    raise RuntimeError('Service {0} is still configured on Storage Router {1}'.format(service, storagerouter.ip))

            # Check KVM vpool
            if storagerouter.pmachine.hvtype == 'KVM':
                vpool_overview = root_client.run('virsh pool-list --all').splitlines()
                vpool_overview.pop(1)
                vpool_overview.pop(0)
                for vpool_info in vpool_overview:
                    kvm_vpool_name = vpool_info.split()[0].strip()
                    if vpool_name == kvm_vpool_name:
                        raise ValueError('vPool {0} is still defined on Storage Router {1}'.format(vpool_name, storagerouter.ip))

            # Check file and directory existence
            if storagerouter.guid not in directories:
                raise ValueError('Could not find directory information for Storage Router {0}'.format(storagerouter.ip))
            if storagerouter.guid not in files:
                raise ValueError('Could not find file information for Storage Router {0}'.format(storagerouter.ip))

            for directory in directories[storagerouter.guid]:
                assert root_client.dir_exists(directory) is False, 'Directory {0} still exists on Storage Router {1}'.format(directory, storagerouter.ip)
            for file_name in files[storagerouter.guid]:
                assert root_client.file_exists(file_name) is False, 'File {0} still exists on Storage Router {1}'.format(file_name, storagerouter.ip)

            # Look for errors in storagedriver log
            for error_type in ['error', 'fatal']:
                cmd = "cat -vet /var/log/ovs/volumedriver/{0}.log | tail -1000 | grep ' {1} '; echo true > /dev/null".format(vpool_name, error_type)
                errors = []
                for line in root_client.run(cmd).splitlines():
                    if "HierarchicalArakoon" in line:
                        continue
                    errors.append(line)
                if len(errors) > 0:
                    if error_type == 'error':
                        print 'Volumedriver log file contains errors on Storage Router {0}\n - {1}'.format(storagerouter.ip, '\n - '.join(errors))
                    else:
                        raise RuntimeError('Fatal errors found in volumedriver log file on Storage Router {0}\n - {1}'.format(storagerouter.ip, '\n - '.join(errors)))
    def ovs_3671_validate_archiving_of_existing_arakoon_data_on_create_and_extend_test():
        """
        Validate arakoon archiving when creating and extending an arakoon cluster
        """
        storagerouters = GeneralStorageRouter.get_storage_routers()
        storagerouters.sort(key=lambda k: k.ip)
        if len(storagerouters) < 2:
            TestArakoon.logger.info('Environment has only {0} node(s)'.format(len(storagerouters)))
            return

        cluster_name = 'OVS_3671-multi-node-cluster'
        cluster_basedir = '/var/tmp'

        archived_files = []
        files_to_create = []
        for index, sr in enumerate(storagerouters):
            root_client = SSHClient(sr, username='******')
            for directory in ['/'.join([cluster_basedir, 'arakoon']), '/var/log/arakoon']:
                root_client.dir_create(os.path.dirname(directory))
                root_client.dir_chmod(os.path.dirname(directory), 0755, recursive=True)
                root_client.dir_chown(os.path.dirname(directory), 'ovs', 'ovs', recursive=True)

            files_to_create = ['/'.join([cluster_basedir, 'arakoon', cluster_name, 'db', 'one.db']),
                               '/'.join([cluster_basedir, 'arakoon', cluster_name, 'tlogs', 'one.tlog'])]

            client = SSHClient(sr, username='******')
            for filename in files_to_create:
                client.dir_create(os.path.dirname(filename))
                client.dir_chmod(os.path.dirname(filename), 0755, recursive=True)
                client.dir_chown(os.path.dirname(filename), 'ovs', 'ovs', recursive=True)

            client.file_create(files_to_create)
            for filename in files_to_create:
                assert client.file_exists(filename) is True, 'File {0} not present'.format(filename)

            archived_files = ['/'.join(['/var/log/arakoon', cluster_name, 'archive', 'one.log'])]

            TestArakoon.logger.info('===================================================')
            TestArakoon.logger.info('setup and validate single node cluster')
            if index == 0:
                create_info = ArakoonInstaller.create_cluster(cluster_name, ServiceType.ARAKOON_CLUSTER_TYPES.FWK,
                                                              sr.ip, cluster_basedir, filesystem=False)
                TestArakoon.logger.info('create_info: \n{0}'.format(create_info))
                ArakoonInstaller.start_cluster(cluster_name, sr.ip, False)
                ArakoonInstaller.claim_cluster(cluster_name, sr, False, metadata=create_info['metadata'])
            else:
                ArakoonInstaller.extend_cluster(storagerouters[0].ip, sr.ip, cluster_name, cluster_basedir)
            TestArakoon.validate_arakoon_config_files(storagerouters[:index + 1], cluster_name)
            TestArakoon.verify_arakoon_structure(root_client, cluster_name, True, True)
            TestArakoon.check_archived_directory(client, archived_files)
            for filename in files_to_create:
                assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('remove cluster')
        ArakoonInstaller.delete_cluster(cluster_name, storagerouters[0].ip, False)

        for sr in storagerouters:
            client = SSHClient(sr, username='******')
            TestArakoon.check_archived_directory(client, archived_files)
            for filename in files_to_create:
                assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)
            TestArakoon.verify_arakoon_structure(client, cluster_name, False, False)
 def ovs_3554_4_node_cluster_config_validation_test():
     """
     Arakoon config validation of a 4 node cluster
     """
     TestArakoon.validate_arakoon_config_files(GeneralStorageRouter.get_storage_routers())
    def ar_0001_validate_create_extend_shrink_delete_cluster_test():
        """
        Validate extending and shrinking of arakoon clusters
        """
        storagerouters = GeneralStorageRouter.get_storage_routers()
        if not len(storagerouters) >= 3:
            TestArakoon.logger.info('Environment has only {0} node(s)'.format(len(storagerouters)))
            return

        cluster_name = 'ar_0001'
        cluster_basedir = '/var/tmp/'
        first_sr = storagerouters[0]
        second_sr = storagerouters[1]
        third_sr = storagerouters[2]
        first_root_client = SSHClient(first_sr, username='******')
        second_root_client = SSHClient(second_sr, username='******')
        third_root_client = SSHClient(third_sr, username='******')

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('setup and validate single node cluster')
        create_info = ArakoonInstaller.create_cluster(cluster_name, ServiceType.ARAKOON_CLUSTER_TYPES.FWK, first_sr.ip,
                                                      cluster_basedir, filesystem=False)
        TestArakoon.logger.info('create_info: \n{0}'.format(create_info))
        ArakoonInstaller.start_cluster(cluster_name, first_sr.ip, False)
        ArakoonInstaller.claim_cluster(cluster_name, first_sr, False, metadata=create_info['metadata'])
        TestArakoon.validate_arakoon_config_files([first_sr], cluster_name)
        TestArakoon.verify_arakoon_structure(first_root_client, cluster_name, True, True)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('setup and validate two node cluster')
        ArakoonInstaller.extend_cluster(first_sr.ip, second_sr.ip, cluster_name, cluster_basedir)
        TestArakoon.validate_arakoon_config_files([first_sr, second_sr], cluster_name)
        TestArakoon.verify_arakoon_structure(first_root_client, cluster_name, True, True)
        TestArakoon.verify_arakoon_structure(second_root_client, cluster_name, True, True)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('setup and validate three node cluster')
        ArakoonInstaller.extend_cluster(first_sr.ip, third_sr.ip, cluster_name, cluster_basedir)
        TestArakoon.validate_arakoon_config_files([first_sr, second_sr, third_sr], cluster_name)

        for client in [first_root_client, second_root_client, third_root_client]:
            TestArakoon.verify_arakoon_structure(client, cluster_name, True, True)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('reduce and validate three node to two node cluster')
        ArakoonInstaller.shrink_cluster(second_sr.ip, first_sr.ip, cluster_name)
        TestArakoon.validate_arakoon_config_files([first_sr, third_sr], cluster_name)
        TestArakoon.verify_arakoon_structure(first_root_client, cluster_name, True, True)
        TestArakoon.verify_arakoon_structure(second_root_client, cluster_name, True, False)
        TestArakoon.verify_arakoon_structure(third_root_client, cluster_name, True, True)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('reduce and validate two node to one node cluster')
        ArakoonInstaller.shrink_cluster(first_sr.ip, third_sr.ip, cluster_name)
        TestArakoon.validate_arakoon_config_files([third_sr], cluster_name)

        TestArakoon.verify_arakoon_structure(first_root_client, cluster_name, True, False)
        TestArakoon.verify_arakoon_structure(second_root_client, cluster_name, True, False)
        TestArakoon.verify_arakoon_structure(third_root_client, cluster_name, True, True)

        TestArakoon.logger.info('===================================================')
        TestArakoon.logger.info('remove cluster')
        ArakoonInstaller.delete_cluster(cluster_name, third_sr.ip, False)

        for client in [first_root_client, second_root_client, third_root_client]:
            TestArakoon.verify_arakoon_structure(client, cluster_name, False, False)

        GeneralArakoon.delete_config(cluster_name)