Exemplo n.º 1
0
    def ovs_2263_verify_alba_namespace_cleanup_test():
        """
        Verify ALBA namespace cleanup
        Create an amount of namespaces in ALBA
        Create a vPool and create some volumes
        Verify the amount of namespaces before and after vPool creation
        Remove the vPool and the manually created namespaces
        Verify the amount of namespaces before and after vPool deletion
        """

        # Create some namespaces in alba
        no_namespaces = 3
        backend_name = General.get_config().get('backend', 'name')
        backend = GeneralBackend.get_by_name(name=backend_name)
        namespace_name = 'autotest-ns_'
        namespace_name_regex = re.compile('^autotest-ns_\d$')
        for nmspc_index in range(no_namespaces):
            GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'create-namespace', ['{0}{1}'.format(namespace_name, nmspc_index), 'default'], False)
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))

        # Create a vPool and create volumes on it
        vpool, _ = GeneralVPool.add_vpool()
        root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******')
        if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE':
            GeneralVPool.mount_vpool(vpool=vpool,
                                     root_client=root_client)

        vdisks = []
        for disk_index in range(no_namespaces):
            vdisks.append(GeneralVDisk.create_volume(size=10,
                                                     vpool=vpool,
                                                     root_client=root_client))
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend)
        assert len(result) == 2 * no_namespaces + 1, "Expected {0} namespaces present on the {1} backend, found {2}".format(2 * no_namespaces + 1, backend_name, len(result))

        # Remove files and vPool
        for vdisk in vdisks:
            GeneralVDisk.delete_volume(vdisk=vdisk,
                                       vpool=vpool,
                                       root_client=root_client)

        if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE':
            GeneralVPool.unmount_vpool(vpool=vpool,
                                       root_client=root_client)

        GeneralVPool.remove_vpool(vpool)

        # Verify amount of namespaces
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
        for namespace in result:
            GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'delete-namespace', [namespace['name']], False)
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == 0, "Expected no namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
Exemplo n.º 2
0
        def _validate_setting_cache_value(value_to_verify):
            disk_config_params = GeneralVDisk.get_config_params(vdisk)
            disk_config_params['metadata_cache_size'] = value_to_verify

            GeneralVDisk.set_config_params(vdisk, {'new_config_params': disk_config_params})
            disk_config_params = GeneralVDisk.get_config_params(vdisk)
            actual_value = disk_config_params['metadata_cache_size']
            assert actual_value == value_to_verify,\
                'Value after set/get differs, actual: {0}, expected: {1}'.format(actual_value, value_to_verify)
Exemplo n.º 3
0
    def ovs_3756_metadata_size_test():
        """
        Validate get/set metadata cache size for a vdisk
        """
        disk_name = 'ovs-3756-disk'
        loop = 'loop0'
        vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name)
        # vdisk_size in GiB
        vdisk_size = 2
        vdisk = GeneralVDisk.create_volume(size=vdisk_size, vpool=vpool, name=disk_name, loop_device=loop, wait=True)
        storagedriver_config = StorageDriverConfiguration('storagedriver', vdisk.vpool_guid, vdisk.storagedriver_id)

        # "size" of a page = amount of entries in a page (addressable by 6 bits)
        metadata_page_capacity = 64
        cluster_size = storagedriver_config.configuration.get('volume_manager', {}).get('default_cluster_size', 4096)
        cache_capacity = int(min(vdisk.size, 2 * 1024 ** 4) / float(metadata_page_capacity * cluster_size))

        default_metadata_cache_size = StorageDriverClient.DEFAULT_METADATA_CACHE_SIZE

        def _validate_setting_cache_value(value_to_verify):
            if value_to_verify > default_metadata_cache_size:
                value_to_verify = default_metadata_cache_size
            disk_config_params = GeneralVDisk.get_config_params(vdisk)
            disk_config_params['metadata_cache_size'] = value_to_verify

            GeneralVDisk.set_config_params(vdisk, {'new_config_params': disk_config_params})
            disk_config_params = GeneralVDisk.get_config_params(vdisk)
            actual_value = disk_config_params['metadata_cache_size']
            assert actual_value == value_to_verify,\
                'Value after set/get differs, actual: {0}, expected: {1}'.format(actual_value, value_to_verify)

        config_params = GeneralVDisk.get_config_params(vdisk)

        # validate default metadata cache as it was not set explicitly
        default_implicit_value = config_params['metadata_cache_size']
        assert default_implicit_value == default_metadata_cache_size,\
            'Expected default cache size: {0}, got {1}'.format(default_metadata_cache_size, default_implicit_value)

        # verify set/get of specific value - larger than default
        _validate_setting_cache_value(10000 * cache_capacity)

        # verify set/get of specific value - default value
        _validate_setting_cache_value(default_metadata_cache_size)

        GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, loop_device=loop, wait=True)
Exemplo n.º 4
0
def teardown():
    """
    Teardown for VirtualDisk package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    vpool_name = General.get_config().get("vpool", "name")
    vpool = GeneralVPool.get_vpool_by_name(vpool_name)

    for vd in VDiskList.get_vdisks():
        GeneralVDisk.delete_volume(vd, vpool, loop_device='loop0')

    if vpool is not None:
        GeneralVPool.remove_vpool(vpool)

    alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name'))
    if alba_backend is not None:
        GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
Exemplo n.º 5
0
    def ovs_3756_metadata_size_test():
        """
        Validate get/set metadata cache size for a vdisk
        """
        metadata_cache_page_size = 256 * 24
        default_metadata_cache_size = 8192 * metadata_cache_page_size

        disk_name = 'ovs-3756-disk'
        loop = 'loop0'
        vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name)
        vdisk = GeneralVDisk.create_volume(size=2, vpool=vpool, name=disk_name, loop_device=loop, wait=True)

        def _validate_setting_cache_value(value_to_verify):
            disk_config_params = GeneralVDisk.get_config_params(vdisk)
            disk_config_params['metadata_cache_size'] = value_to_verify

            GeneralVDisk.set_config_params(vdisk, {'new_config_params': disk_config_params})
            disk_config_params = GeneralVDisk.get_config_params(vdisk)
            actual_value = disk_config_params['metadata_cache_size']
            assert actual_value == value_to_verify,\
                'Value after set/get differs, actual: {0}, expected: {1}'.format(actual_value, value_to_verify)

        config_params = GeneralVDisk.get_config_params(vdisk)

        # validate default metadata cache as it was not set explicitly
        default_implicit_value = config_params['metadata_cache_size']
        assert default_implicit_value == default_metadata_cache_size,\
            'Expected default cache size: {0}, got {1}'.format(default_metadata_cache_size, default_implicit_value)

        # verify set/get of specific value - larger than default
        _validate_setting_cache_value(10000 * metadata_cache_page_size)

        # verify set/get of specific value - default value
        _validate_setting_cache_value(default_metadata_cache_size)

        # verify set/get of specific value - smaller than default value
        _validate_setting_cache_value(100 * metadata_cache_page_size)

        GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, loop_device=loop, wait=True)
Exemplo n.º 6
0
    def validate_vpool_sanity(expected_settings):
        """
        Check if all requirements are met for a healthy vPool
        :param expected_settings: Parameters used to create a vPool, which will be verified
        :type expected_settings: dict

        :return: None
        """
        if not isinstance(expected_settings, dict) or len(expected_settings) == 0:
            raise ValueError('Cannot validate vpool when no settings are passed')

        generic_settings = expected_settings.values()[0]
        vpool_name = generic_settings['vpool_name']
        mountpoint = '/mnt/{0}'.format(vpool_name)
        backend_type = generic_settings['type']
        rdma_enabled = generic_settings['config_params']['dtl_transport'] == StorageDriverClient.FRAMEWORK_DTL_TRANSPORT_RSOCKET

        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool is not None, 'Could not find vPool with name {0}'.format(vpool_name)
        vpool_config = GeneralVPool.get_configuration(vpool)

        # Verify some basic vPool attributes
        assert vpool.name == vpool_name, 'Expected name {0} for vPool'.format(vpool_name)
        assert vpool.backend_type.code == backend_type, 'Expected backend type {0}'.format(backend_type)
        assert vpool.status == VPool.STATUSES.RUNNING, 'vPool does not have RUNNING status'
        assert vpool.rdma_enabled == rdma_enabled, 'RDMA enabled setting is incorrect'
        assert set(expected_settings.keys()) == set([sd.storagerouter for sd in vpool.storagedrivers]), "vPool storagerouters don't match the expected Storage Routers"

        # Verify vPool Storage Driver configuration
        expected_vpool_config = copy.deepcopy(generic_settings['config_params'])
        for key, value in vpool_config.iteritems():
            if key == 'dtl_enabled' or key == 'tlog_multiplier':
                continue
            if key not in expected_vpool_config:
                raise ValueError('Expected settings does not contain key {0}'.format(key))

            if value != expected_vpool_config[key]:
                raise ValueError('vPool does not have expected configuration {0} for key {1}'.format(expected_vpool_config[key], key))
            expected_vpool_config.pop(key)

        if len(expected_vpool_config) > 0:
            raise ValueError('Actual vPool configuration does not contain keys: {0}'.format(', '.join(expected_vpool_config.keys())))

        # Prepare some fields to check
        config = generic_settings['config_params']
        dtl_mode = config['dtl_mode']
        sco_size = config['sco_size']
        dedupe_mode = config['dedupe_mode']
        cluster_size = config['cluster_size']
        write_buffer = config['write_buffer']
        dtl_transport = config['dtl_transport']
        cache_strategy = config['cache_strategy']
        # @TODO: Add more validations for other expected settings (instead of None)
        expected_config = {'backend_connection_manager': {'backend_interface_retries_on_error': 5,
                                                          'backend_interface_retry_interval_secs': 1,
                                                          'backend_interface_retry_backoff_multiplier': 2.0},
                           'content_addressed_cache': {'clustercache_mount_points': None,
                                                       'read_cache_serialization_path': u'/var/rsp/{0}'.format(vpool.name)},
                           'distributed_lock_store': {'dls_arakoon_cluster_id': None,
                                                      'dls_arakoon_cluster_nodes': None,
                                                      'dls_type': u'Arakoon'},
                           'distributed_transaction_log': {'dtl_path': None,
                                                           'dtl_transport': dtl_transport.upper()},
                           'event_publisher': {'events_amqp_routing_key': u'volumerouter',
                                               'events_amqp_uris': None},
                           'file_driver': {'fd_cache_path': None,
                                           'fd_extent_cache_capacity': u'1024',
                                           'fd_namespace': None},
                           'filesystem': {'fs_dtl_config_mode': u'Automatic',
                                          'fs_dtl_mode': u'{0}'.format(StorageDriverClient.VPOOL_DTL_MODE_MAP[dtl_mode]),
                                          'fs_enable_shm_interface': 1,
                                          'fs_file_event_rules': None,
                                          'fs_metadata_backend_arakoon_cluster_nodes': None,
                                          'fs_metadata_backend_mds_nodes': None,
                                          'fs_metadata_backend_type': u'MDS',
                                          'fs_raw_disk_suffix': None,
                                          'fs_virtual_disk_format': None},
                           'metadata_server': {'mds_nodes': None},
                           'scocache': {'backoff_gap': u'2GB',
                                        'scocache_mount_points': None,
                                        'trigger_gap': u'1GB'},
                           'threadpool_component': {'num_threads': 16},
                           'volume_manager': {'clean_interval': 1,
                                              'default_cluster_size': 1024 * cluster_size,
                                              'dtl_throttle_usecs': 4000,
                                              'metadata_path': None,
                                              'non_disposable_scos_factor': float(write_buffer) / StorageDriverClient.TLOG_MULTIPLIER_MAP[sco_size] / sco_size,
                                              'number_of_scos_in_tlog': StorageDriverClient.TLOG_MULTIPLIER_MAP[sco_size],
                                              'read_cache_default_behaviour': StorageDriverClient.VPOOL_CACHE_MAP[cache_strategy],
                                              'read_cache_default_mode': StorageDriverClient.VPOOL_DEDUPE_MAP[dedupe_mode],
                                              'tlog_path': None},
                           'volume_registry': {'vregistry_arakoon_cluster_id': u'voldrv',
                                               'vregistry_arakoon_cluster_nodes': None},
                           'volume_router': {'vrouter_backend_sync_timeout_ms': 5000,
                                             'vrouter_file_read_threshold': 1024,
                                             'vrouter_file_write_threshold': 1024,
                                             'vrouter_id': None,
                                             'vrouter_max_workers': 16,
                                             'vrouter_migrate_timeout_ms': 5000,
                                             'vrouter_min_workers': 4,
                                             'vrouter_redirect_timeout_ms': u'5000',
                                             'vrouter_routing_retries': 10,
                                             'vrouter_sco_multiplier': 1024,
                                             'vrouter_volume_read_threshold': 1024,
                                             'vrouter_volume_write_threshold': 1024},
                           'volume_router_cluster': {'vrouter_cluster_id': None}}
        vpool_services = {'all': ['ovs-watcher-volumedriver',
                                  'ovs-dtl_{0}'.format(vpool.name),
                                  'ovs-volumedriver_{0}'.format(vpool.name),
                                  'ovs-volumerouter-consumer'],
                          'extra': [],
                          'master': ['ovs-arakoon-voldrv']}
        sd_partitions = {'DB': ['MD', 'MDS', 'TLOG'],
                         'READ': ['None'],
                         'WRITE': ['FD', 'DTL', 'SCO'],
                         'SCRUB': ['None']}

        if backend_type == 'alba':
            backend_metadata = {'name': (str, None),
                                'preset': (str, Toolbox.regex_preset),
                                'backend_guid': (str, Toolbox.regex_guid),
                                'arakoon_config': (dict, None),
                                'connection': (dict, {'host': (str, Toolbox.regex_ip, False),
                                                      'port': (int, {'min': 1, 'max': 65535}),
                                                      'client_id': (str, Toolbox.regex_guid),
                                                      'client_secret': (str, None),
                                                      'local': (bool, None)}),
                                'backend_info': (dict, {'policies': (list, None),
                                                        'sco_size': (float, None),
                                                        'frag_size': (float, None),
                                                        'total_size': (float, None),
                                                        'nsm_partition_guids': (list, Toolbox.regex_guid)})}
            required = {'backend': (dict, backend_metadata),
                        'backend_aa': (dict, backend_metadata, False)}
            Toolbox.verify_required_params(required_params=required,
                                           actual_params=vpool.metadata)
            vpool_services['all'].append("ovs-albaproxy_{0}".format(vpool.name))
            sd_partitions['WRITE'].append('FCACHE')
            expected_config['backend_connection_manager'].update({'alba_connection_host': None,
                                                                  'alba_connection_port': None,
                                                                  'alba_connection_preset': None,
                                                                  'alba_connection_timeout': 15,
                                                                  'backend_type': u'{0}'.format(vpool.backend_type.code.upper())})
        elif backend_type == 'distributed':
            expected_config['backend_connection_manager'].update({'backend_type': u'LOCAL',
                                                                  'local_connection_path': u'{0}'.format(generic_settings['distributed_mountpoint'])})

        assert EtcdConfiguration.exists('/ovs/arakoon/voldrv/config', raw=True), 'Volumedriver arakoon does not exist'

        # Do some verifications for all SDs
        storage_ip = None
        voldrv_config = GeneralArakoon.get_config('voldrv')
        all_files = GeneralVPool.get_related_files(vpool=vpool)
        all_directories = GeneralVPool.get_related_directories(vpool=vpool)

        for storagedriver in vpool.storagedrivers:
            storagerouter = storagedriver.storagerouter
            root_client = SSHClient(storagerouter, username='******')

            assert EtcdConfiguration.exists('/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, storagedriver.storagedriver_id), raw=True), 'vPool config not found in etcd'
            current_config_sections = set([item for item in EtcdConfiguration.list('/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, storagedriver.storagedriver_id))])
            assert not current_config_sections.difference(set(expected_config.keys())), 'New section appeared in the storage driver config in etcd'
            assert not set(expected_config.keys()).difference(current_config_sections), 'Config section expected for storage driver, but not found in etcd'

            for key, values in expected_config.iteritems():
                current_config = EtcdConfiguration.get('/ovs/vpools/{0}/hosts/{1}/config/{2}'.format(vpool.guid, storagedriver.storagedriver_id, key))
                assert set(current_config.keys()).union(set(values.keys())) == set(values.keys()), 'Not all expected keys match for key "{0}" on Storage Driver {1}'.format(key, storagedriver.name)

                for sub_key, value in current_config.iteritems():
                    expected_value = values[sub_key]
                    if expected_value is None:
                        continue
                    assert value == expected_value, 'Key: {0} - Sub key: {1} - Value: {2} - Expected value: {3}'.format(key, sub_key, value, expected_value)

            # Check services
            if storagerouter.node_type == 'MASTER':
                for service_name in vpool_services['all'] + vpool_services['master']:
                    if service_name == 'ovs-arakoon-voldrv' and GeneralStorageDriver.has_role(storagedriver, 'DB') is False:
                        continue
                    if ServiceManager.get_service_status(name=service_name,
                                                         client=root_client) is not True:
                        raise ValueError('Service {0} is not running on node {1}'.format(service_name, storagerouter.ip))
            else:
                for service_name in vpool_services['all'] + vpool_services['extra']:
                    if ServiceManager.get_service_status(name=service_name,
                                                         client=root_client) is not True:
                        raise ValueError('Service {0} is not running on node {1}'.format(service_name, storagerouter.ip))

            # Check arakoon config
            if not voldrv_config.has_section(storagerouter.machine_id):
                raise ValueError('Voldrv arakoon cluster does not have section {0}'.format(storagerouter.machine_id))

            # Basic SD checks
            assert storagedriver.cluster_ip == storagerouter.ip, 'Incorrect cluster IP. Expected: {0}  -  Actual: {1}'.format(storagerouter.ip, storagedriver.cluster_ip)
            assert storagedriver.mountpoint == '/mnt/{0}'.format(vpool.name), 'Incorrect mountpoint. Expected: {0}  -  Actual: {1}'.format(mountpoint, storagedriver.mountpoint)
            if storage_ip is not None:
                assert storagedriver.storage_ip == storage_ip, 'Incorrect storage IP. Expected: {0}  -  Actual: {1}'.format(storage_ip, storagedriver.storage_ip)
            storage_ip = storagedriver.storage_ip

            # Check required directories and files
            if storagerouter.guid not in all_directories:
                raise ValueError('Could not find directory information for Storage Router {0}'.format(storagerouter.ip))
            if storagerouter.guid not in all_files:
                raise ValueError('Could not find file information for Storage Router {0}'.format(storagerouter.ip))

            for directory in all_directories[storagerouter.guid]:
                if root_client.dir_exists(directory) is False:
                    raise ValueError('Directory {0} does not exist on Storage Router {1}'.format(directory, storagerouter.ip))
            for file_name in all_files[storagerouter.guid]:
                if root_client.file_exists(file_name) is False:
                    raise ValueError('File {0} does not exist on Storage Router {1}'.format(file_name, storagerouter.ip))

            for partition in storagedriver.partitions:
                if partition.role in sd_partitions and partition.sub_role in sd_partitions[partition.role]:
                    sd_partitions[partition.role].remove(partition.sub_role)
                elif partition.role in sd_partitions and partition.sub_role is None:
                    sd_partitions[partition.role].remove('None')

            # Verify vPool writeable
            if storagerouter.pmachine.hvtype == 'VMWARE':
                GeneralVPool.mount_vpool(vpool=vpool,
                                         root_client=root_client)

            vdisk = GeneralVDisk.create_volume(size=10,
                                               vpool=vpool,
                                               root_client=root_client)
            GeneralVDisk.write_to_volume(vdisk=vdisk,
                                         vpool=vpool,
                                         root_client=root_client,
                                         count=10,
                                         bs='1M',
                                         input_type='random')
            GeneralVDisk.delete_volume(vdisk=vdisk,
                                       vpool=vpool,
                                       root_client=root_client)

        for role, sub_roles in sd_partitions.iteritems():
            for sub_role in sub_roles:
                raise ValueError('Not a single Storage Driver found with partition role {0} and sub-role {1}'.format(role, sub_role))
Exemplo n.º 7
0
    def cleanup():
        """
        Do some cleanup actions
        :return: None
        """
        from ci.tests.general.general_pmachine import GeneralPMachine
        from ci.tests.general.general_vdisk import GeneralVDisk
        from ci.tests.general.general_vmachine import GeneralVMachine

        def _get_remote_ssh_connection(ip_address, username, password):
            import paramiko
            ssh_connection = paramiko.SSHClient()
            ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh_connection.connect(ip_address, username=username, password=password, timeout=2)
            sftp = ssh_connection.open_sftp()
            return ssh_connection, sftp

        # @TODO: Split this cleanup function up in relevant parts and put them in the correct general files
        machine_name = "AT_"

        from ci.tests.general import general_hypervisor
        from ci.tests.general.general_vpool import GeneralVPool
        for vpool in GeneralVPool.get_vpools():
            if vpool:
                hpv = general_hypervisor.Hypervisor.get(vpool)
                vm_names = [vm.name for vm in GeneralVMachine.get_vmachines()]
                for name in vm_names:
                    vm = GeneralVMachine.get_vmachine_by_name(name)
                    if not vm:
                        continue
                    vm = vm[0]
                    if not vm.name.startswith(machine_name):
                        continue
                    if vm.is_vtemplate:
                        hpv.delete_clones(vm)
                    logging.log(1, "Deleting {0} on hypervisor".format(vm.name))
                    hpv.poweroff(vm.name)
                    hpv.delete(vm.name)

                env_macs = General.execute_command("""ip a | awk '/link\/ether/ {gsub(":","",$2);print $2;}'""")[0].splitlines()
                if vpool.storagedrivers:
                    mountpoint = vpool.storagedrivers[0].mountpoint
                    if os.path.exists(mountpoint):
                        for d in os.listdir(mountpoint):
                            if d.startswith(machine_name):
                                p = '/'.join([mountpoint, d])
                                if os.path.isdir(p):
                                    logging.log(1, "removing tree: {0}".format(p))
                                    shutil.rmtree(p)
                                else:
                                    logging.log(1, "removing file: {0}".format(p))
                                    if os.path.isfile(p):
                                        os.remove(p)
                        for mac in env_macs:
                            mac_path = '/'.join([mountpoint, mac])
                            if os.path.exists(mac_path):
                                for f in os.listdir(mac_path):
                                    logging.log(1, "removing file: {0}".format(f))
                                    os.remove('/'.join([mac_path, f]))

                # remove existing disks
                vdisks = GeneralVDisk.get_vdisks()
                for vdisk in vdisks:
                    if vdisk:
                        for junction in vdisk.mds_services:
                            if junction:
                                junction.delete()
                        vdisk.delete()
                        logging.log(1, 'WARNING: Removed leftover disk: {0}'.format(vdisk.name))

                GeneralVPool.remove_vpool(vpool)

                if GeneralPMachine.get_hypervisor_type() == 'VMWARE':
                    from ci.tests.general.general_hypervisor import GeneralHypervisor
                    hypervisor_info = GeneralHypervisor.get_hypervisor_info()
                    ssh_con = _get_remote_ssh_connection(*hypervisor_info)[0]
                    cmd = "esxcli storage nfs remove -v {0}".format(vpool.name)
                    ssh_con.exec_command(cmd)

                vmachines = GeneralVMachine.get_vmachines()
                for vmachine in vmachines:
                    logging.log(1, 'WARNING: Removing leftover vmachine: {0}'.format(vmachine.name))
                    vmachine.delete()
    def fdl_0002_add_remove_partition_with_role_and_crosscheck_model_test():
        """
        FDL-0002 - create/remove disk partition using full disk and verify ovs model
            - look for an unused disk
            - add a partition using full disk and assign a DB role to the partition
            - validate ovs model is correctly updated with DB role
            - cleanup that partition
            - verify ovs model is correctly updated
        """
        if TestFlexibleDiskLayout.continue_testing.state is False:
            logger.info('Test suite signaled to stop')
            return

        my_sr = GeneralStorageRouter.get_local_storagerouter()

        unused_disks = GeneralDisk.get_unused_disks()
        if not unused_disks:
            logger.info("At least one unused disk should be available for partition testing")
            return

        hdds = dict()
        ssds = dict()
        mdisks = GeneralDisk.get_disks()
        for disk in mdisks:
            if disk.storagerouter_guid == my_sr.guid:
                if disk.is_ssd:
                    ssds['/dev/' + disk.name] = disk
                else:
                    hdds['/dev/' + disk.name] = disk

        all_disks = dict(ssds)
        all_disks.update(hdds)

        # check no partitions are modelled for unused disks
        partitions = GeneralDisk.get_disk_partitions()
        partitions_detected = False
        disk_guid = ''
        for path in unused_disks:
            # @TODO: remove the if when ticket OVS-4503 is solved
            if path in all_disks:
                disk_guid = all_disks[path].guid
                for partition in partitions:
                    if partition.disk_guid == disk_guid:
                        partitions_detected = True
        assert partitions_detected is False, 'Existing partitions detected on unused disks!'

        # try partition a disk using it's full reported size
        disk = all_disks[unused_disks[0]]
        GeneralDisk.configure_disk(storagerouter=my_sr,
                                   disk=disk,
                                   offset=0,
                                   size=int(disk.size),
                                   roles=['WRITE'])

        # lookup partition in model
        mountpoint = None
        partitions = GeneralDisk.get_disk_partitions()
        for partition in partitions:
            if partition.disk_guid == disk.guid and 'WRITE' in partition.roles:
                mountpoint = partition.mountpoint
                break

        GeneralDisk.configure_disk(storagerouter=my_sr,
                                   disk=disk,
                                   offset=0,
                                   partition=partition,
                                   size=int(disk.size),
                                   roles=[])

        # cleanup disk partition
        cmd = 'umount {0}; rmdir {0}; echo 0'.format(mountpoint)
        General.execute_command_on_node(my_sr.ip, cmd, allow_insecure=True)

        cmd = ['parted', '-s', '/dev/' + disk.name, 'rm', '1']
        General.execute_command_on_node(my_sr.ip, cmd, allow_nonzero=True)

        # wipe partition table to be able to reuse this disk in another test
        GeneralVDisk.write_to_volume(location=disk.aliases[0],
                                     count=64,
                                     bs='1M',
                                     input_type='zero')

        GeneralStorageRouter.sync_with_reality()

        # verify partition no longer exists in ovs model
        is_partition_removed = True
        partitions = GeneralDisk.get_disk_partitions()
        for partition in partitions:
            if partition.disk_guid == disk_guid and 'WRITE' in partition.roles:
                is_partition_removed = False
                break

        assert is_partition_removed is True,\
            'New partition was not deleted successfully from system/model!'

        assert mountpoint, 'New partition was not detected in model'
    def test_basic_logrotate():
        """
        Verify current openvstorage logrotate configuration
        Apply the openvstorage logrotate on custom logfile and see if it rotates as predicted
        Update ownership of custom file and verify logrotate raises issue
        """
        storagerouters = GeneralStorageRouter.get_storage_routers()
        logrotate_content = """{0} {{
    rotate 5
    size 20M
    compress
    copytruncate
    notifempty
}}

{1} {{
    su ovs ovs
    rotate 10
    size 19M
    compress
    delaycompress
    notifempty
    create 666 ovs ovs
    postrotate
        /usr/bin/pkill -SIGUSR1 arakoon
    endscript
}}"""
        if len(storagerouters) == 0:
            raise ValueError('No Storage Routers found in the model')

        logrotate_include_dir = '/etc/logrotate.d'
        logrotate_cfg_file = '/etc/logrotate.conf'
        logrotate_cron_file = '/etc/cron.daily/logrotate'
        logrotate_ovs_file = '{0}/openvstorage-logs'.format(logrotate_include_dir)
        expected_logrotate_content = logrotate_content.format('/var/log/ovs/*.log', '/var/log/arakoon/*/*.log')

        # Verify basic logrotate configurations
        for storagerouter in storagerouters:
            root_client = SSHClient(endpoint=storagerouter, username='******')
            assert_true(expr=root_client.file_exists(filename=logrotate_cfg_file),
                        msg='Logrotate config {0} does not exist on Storage Router {1}'.format(logrotate_cfg_file, storagerouter.name))
            assert_true(expr=root_client.file_exists(filename=logrotate_ovs_file),
                        msg='Logrotate file {0} does not exist on Storage Router {1}'.format(logrotate_ovs_file, storagerouter.name))
            assert_true(expr=root_client.file_exists(filename=logrotate_cron_file),
                        msg='Logrotate file {0} does not exist on Storage Router {1}'.format(logrotate_cron_file, storagerouter.name))
            assert_true(expr='include {0}'.format(logrotate_include_dir) in root_client.file_read(filename=logrotate_cfg_file).splitlines(),
                        msg='Logrotate on Storage Router {0} does not include {1}'.format(storagerouter.name, logrotate_include_dir))
            assert_true(expr='/usr/sbin/logrotate /etc/logrotate.conf' in root_client.file_read(filename=logrotate_cron_file).splitlines(),
                        msg='Logrotate will not be executed on Storage Router {0}'.format(storagerouter.name))
            actual_file_contents = root_client.file_read(filename=logrotate_ovs_file).rstrip('\n')
            assert_equal(first=expected_logrotate_content,
                         second=actual_file_contents,
                         msg='Logrotate contents does not match expected contents on Storage Router {0}'.format(storagerouter.name))

        # Create custom logrotate file for testing purposes
        custom_logrotate_cfg_file = '/opt/OpenvStorage/ci/logrotate-conf'
        custom_logrotate_dir = '/opt/OpenvStorage/ci/logrotate'
        custom_logrotate_file1 = '{0}/logrotate_test_file1.log'.format(custom_logrotate_dir)
        custom_logrotate_file2 = '{0}/logrotate_test_file2.log'.format(custom_logrotate_dir)
        custom_logrotate_content = logrotate_content.format(custom_logrotate_file1, custom_logrotate_file2)
        local_sr = GeneralStorageRouter.get_local_storagerouter()
        root_client = SSHClient(endpoint=local_sr, username='******')
        root_client.file_write(filename=custom_logrotate_cfg_file, contents=custom_logrotate_content)

        # No logfile present --> logrotate should fail
        assert_raises(excClass=CalledProcessError,
                      callableObj=root_client.run,
                      command='logrotate {0}'.format(custom_logrotate_cfg_file))

        ##########################################
        # Test 1st logrotate configuration entry #
        ##########################################
        root_client.dir_create(directories=custom_logrotate_dir)
        root_client.dir_chown(directories=custom_logrotate_dir,
                              user='******',
                              group='ovs',
                              recursive=True)
        root_client.run(command='touch {0}'.format(custom_logrotate_file1))
        root_client.run(command='touch {0}'.format(custom_logrotate_file2))
        root_client.file_chmod(filename=custom_logrotate_file1, mode=666)
        root_client.file_chmod(filename=custom_logrotate_file2, mode=666)

        # Write data to the file less than size for rotation and verify rotation
        GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
                                     count=15,
                                     bs='1M',
                                     input_type='zero',
                                     root_client=root_client)
        root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
        assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
                     second=2,
                     msg='More files than expected present in {0}'.format(custom_logrotate_dir))

        # Write data to file larger than size in configuration and verify amount of rotations
        files_to_delete = []
        for counter in range(7):
            expected_file = '{0}.{1}.gz'.format(custom_logrotate_file1, counter + 1 if counter < 5 else 5)
            GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
                                         count=30,
                                         bs='1M',
                                         input_type='zero',
                                         root_client=root_client)
            root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
            assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
                         second=counter + 3 if counter < 5 else 7,
                         msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir))
            assert_true(expr=root_client.file_exists(filename=expected_file),
                        msg='Logrotate did not create the expected file {0}'.format(expected_file))
            user_info = General.get_owner_group_for_path(path=expected_file,
                                                         root_client=root_client)
            assert_equal(first='root',
                         second=user_info['user']['name'],
                         msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name']))
            assert_equal(first='root',
                         second=user_info['group']['name'],
                         msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name']))
            files_to_delete.append(expected_file)
        root_client.file_delete(filenames=files_to_delete)

        ##########################################
        # Test 2nd logrotate configuration entry #
        ##########################################
        root_client.file_chown(filenames=custom_logrotate_file2,
                               user='******',
                               group='ovs')

        # Write data to the file less than size for rotation and verify rotation
        GeneralVDisk.write_to_volume(location=custom_logrotate_file2,
                                     count=15,
                                     bs='1M',
                                     input_type='zero',
                                     root_client=root_client)
        root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
        assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
                     second=2,
                     msg='More files than expected present in {0}'.format(custom_logrotate_dir))

        # Write data to file larger than size in configuration and verify amount of rotations
        for counter in range(12):
            if counter == 0:  # Delaycompress --> file is not compressed during initial cycle
                expected_file = '{0}.1'.format(custom_logrotate_file2)
            else:
                expected_file = '{0}.{1}.gz'.format(custom_logrotate_file2, counter + 1 if counter < 10 else 10)
            GeneralVDisk.write_to_volume(location=custom_logrotate_file2,
                                         count=30,
                                         bs='1M',
                                         input_type='zero',
                                         root_client=root_client)
            root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
            assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
                         second=counter + 3 if counter < 10 else 12,
                         msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir))
            assert_true(expr=root_client.file_exists(filename=expected_file),
                        msg='Logrotate did not create the expected file {0}'.format(expected_file))
            user_info = General.get_owner_group_for_path(path=expected_file,
                                                         root_client=root_client)
            assert_equal(first='ovs',
                         second=user_info['user']['name'],
                         msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name']))
            assert_equal(first='ovs',
                         second=user_info['group']['name'],
                         msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name']))

        root_client.dir_delete(directories=custom_logrotate_dir)
        root_client.file_delete(filenames=custom_logrotate_cfg_file)
Exemplo n.º 10
0
    def validate_vpool_sanity(expected_settings):
        """
        Check if all requirements are met for a healthy vPool
        :param expected_settings: Parameters used to create a vPool, which will be verified
        :type expected_settings: dict

        :return: None
        """
        if not isinstance(expected_settings, dict) or len(expected_settings) == 0:
            raise ValueError("Cannot validate vpool when no settings are passed")

        generic_settings = expected_settings.values()[0]
        vpool_name = generic_settings["vpool_name"]
        mountpoint = "/mnt/{0}".format(vpool_name)
        backend_type = generic_settings["type"]
        rdma_enabled = (
            generic_settings["config_params"]["dtl_transport"] == StorageDriverClient.FRAMEWORK_DTL_TRANSPORT_RSOCKET
        )

        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool is not None, "Could not find vPool with name {0}".format(vpool_name)
        vpool_config = GeneralVPool.get_configuration(vpool)

        # Verify some basic vPool attributes
        assert vpool.name == vpool_name, "Expected name {0} for vPool".format(vpool_name)
        assert vpool.status == VPool.STATUSES.RUNNING, "vPool does not have RUNNING status"
        assert vpool.rdma_enabled == rdma_enabled, "RDMA enabled setting is incorrect"
        assert set(expected_settings.keys()) == set(
            [sd.storagerouter for sd in vpool.storagedrivers]
        ), "vPool storagerouters don't match the expected Storage Routers"

        # Verify vPool Storage Driver configuration
        expected_vpool_config = copy.deepcopy(generic_settings["config_params"])
        for key, value in vpool_config.iteritems():
            if key == "dtl_enabled" or key == "tlog_multiplier" or key == "dtl_config_mode":
                continue
            if key not in expected_vpool_config:
                raise ValueError("Expected settings does not contain key {0}".format(key))

            if value != expected_vpool_config[key]:
                raise ValueError(
                    "vPool does not have expected configuration {0} for key {1}".format(expected_vpool_config[key], key)
                )
            expected_vpool_config.pop(key)

        if len(expected_vpool_config) > 0:
            raise ValueError(
                "Actual vPool configuration does not contain keys: {0}".format(", ".join(expected_vpool_config.keys()))
            )

        # Prepare some fields to check
        config = generic_settings["config_params"]
        dtl_mode = config["dtl_mode"]
        sco_size = config["sco_size"]
        cluster_size = config["cluster_size"]
        write_buffer = config["write_buffer"]
        dtl_transport = config["dtl_transport"]
        # @TODO: Add more validations for other expected settings (instead of None)
        expected_config = {
            "backend_connection_manager": {
                "backend_interface_retries_on_error": 5,
                "backend_interface_retry_interval_secs": 1,
                "backend_interface_retry_backoff_multiplier": 2.0,
            },
            "content_addressed_cache": {
                "clustercache_mount_points": None,
                "read_cache_serialization_path": u"/var/rsp/{0}".format(vpool.name),
            },
            "distributed_lock_store": {
                "dls_arakoon_cluster_id": None,
                "dls_arakoon_cluster_nodes": None,
                "dls_type": u"Arakoon",
            },
            "distributed_transaction_log": {"dtl_path": None, "dtl_transport": dtl_transport.upper()},
            "event_publisher": {"events_amqp_routing_key": u"volumerouter", "events_amqp_uris": None},
            "file_driver": {"fd_cache_path": None, "fd_extent_cache_capacity": u"1024", "fd_namespace": None},
            "filesystem": {
                "fs_dtl_config_mode": u"Automatic",
                "fs_dtl_mode": u"{0}".format(StorageDriverClient.VPOOL_DTL_MODE_MAP[dtl_mode]),
                "fs_enable_shm_interface": 1,
                "fs_file_event_rules": None,
                "fs_metadata_backend_arakoon_cluster_nodes": None,
                "fs_metadata_backend_mds_nodes": None,
                "fs_metadata_backend_type": u"MDS",
                "fs_raw_disk_suffix": None,
                "fs_virtual_disk_format": None,
            },
            "metadata_server": {"mds_nodes": None},
            "scocache": {"backoff_gap": u"2GB", "scocache_mount_points": None, "trigger_gap": u"1GB"},
            "threadpool_component": {"num_threads": 16},
            "volume_manager": {
                "clean_interval": 1,
                "default_cluster_size": 1024 * cluster_size,
                "dtl_throttle_usecs": 4000,
                "metadata_path": None,
                "non_disposable_scos_factor": float(write_buffer)
                / StorageDriverClient.TLOG_MULTIPLIER_MAP[sco_size]
                / sco_size,
                "number_of_scos_in_tlog": StorageDriverClient.TLOG_MULTIPLIER_MAP[sco_size],
                "tlog_path": None,
            },
            "volume_registry": {"vregistry_arakoon_cluster_id": u"voldrv", "vregistry_arakoon_cluster_nodes": None},
            "volume_router": {
                "vrouter_backend_sync_timeout_ms": 5000,
                "vrouter_file_read_threshold": 1024,
                "vrouter_file_write_threshold": 1024,
                "vrouter_id": None,
                "vrouter_max_workers": 16,
                "vrouter_migrate_timeout_ms": 5000,
                "vrouter_min_workers": 4,
                "vrouter_redirect_timeout_ms": u"5000",
                "vrouter_routing_retries": 10,
                "vrouter_sco_multiplier": 1024,
                "vrouter_volume_read_threshold": 1024,
                "vrouter_volume_write_threshold": 1024,
            },
            "volume_router_cluster": {"vrouter_cluster_id": None},
        }
        vpool_services = {
            "all": [
                "ovs-watcher-volumedriver",
                "ovs-dtl_{0}".format(vpool.name),
                "ovs-volumedriver_{0}".format(vpool.name),
                "ovs-volumerouter-consumer",
            ],
            "extra": [],
            "master": ["ovs-arakoon-voldrv"],
        }
        sd_partitions = {"DB": ["MD", "MDS", "TLOG"], "WRITE": ["FD", "DTL", "SCO"]}

        assert Configuration.exists("/ovs/arakoon/voldrv/config", raw=True), "Volumedriver arakoon does not exist"

        # Do some verifications for all SDs
        storage_ip = None
        voldrv_config = GeneralArakoon.get_config("voldrv")
        all_files = GeneralVPool.get_related_files(vpool=vpool)
        all_directories = GeneralVPool.get_related_directories(vpool=vpool)

        for storagedriver in vpool.storagedrivers:
            storagerouter = storagedriver.storagerouter
            root_client = SSHClient(storagerouter, username="******")

            assert Configuration.exists(
                "/ovs/vpools/{0}/hosts/{1}/config".format(vpool.guid, storagedriver.storagedriver_id), raw=True
            ), "vPool config not found in configuration"
            # @todo: replace next lines with implementation defined in: http://jira.openvstorage.com/browse/OVS-4577
            # current_config_sections = set([item for item in Configuration.list('/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, storagedriver.storagedriver_id))])
            # assert not current_config_sections.difference(set(expected_config.keys())), 'New section appeared in the storage driver config in configuration'
            # assert not set(expected_config.keys()).difference(current_config_sections), 'Config section expected for storage driver, but not found in configuration'
            #
            # for key, values in expected_config.iteritems():
            #     current_config = Configuration.get('/ovs/vpools/{0}/hosts/{1}/config/{2}'.format(vpool.guid, storagedriver.storagedriver_id, key))
            #     assert set(current_config.keys()).union(set(values.keys())) == set(values.keys()), 'Not all expected keys match for key "{0}" on Storage Driver {1}'.format(key, storagedriver.name)
            #
            #     for sub_key, value in current_config.iteritems():
            #         expected_value = values[sub_key]
            #         if expected_value is None:
            #             continue
            #         assert value == expected_value, 'Key: {0} - Sub key: {1} - Value: {2} - Expected value: {3}'.format(key, sub_key, value, expected_value)

            # Check services
            if storagerouter.node_type == "MASTER":
                for service_name in vpool_services["all"] + vpool_services["master"]:
                    if (
                        service_name == "ovs-arakoon-voldrv"
                        and GeneralStorageDriver.has_role(storagedriver, "DB") is False
                    ):
                        continue
                    exitcode, output = ServiceManager.get_service_status(name=service_name, client=root_client)
                    if exitcode is not True:
                        raise ValueError(
                            "Service {0} is not running on node {1} - {2}".format(
                                service_name, storagerouter.ip, output
                            )
                        )
            else:
                for service_name in vpool_services["all"] + vpool_services["extra"]:
                    exitcode, output = ServiceManager.get_service_status(name=service_name, client=root_client)
                    if exitcode is not True:
                        raise ValueError(
                            "Service {0} is not running on node {1} - {2}".format(
                                service_name, storagerouter.ip, output
                            )
                        )

            # Check arakoon config
            if not voldrv_config.has_section(storagerouter.machine_id):
                raise ValueError("Voldrv arakoon cluster does not have section {0}".format(storagerouter.machine_id))

            # Basic SD checks
            assert (
                storagedriver.cluster_ip == storagerouter.ip
            ), "Incorrect cluster IP. Expected: {0}  -  Actual: {1}".format(storagerouter.ip, storagedriver.cluster_ip)
            assert storagedriver.mountpoint == "/mnt/{0}".format(
                vpool.name
            ), "Incorrect mountpoint. Expected: {0}  -  Actual: {1}".format(mountpoint, storagedriver.mountpoint)
            if storage_ip is not None:
                assert (
                    storagedriver.storage_ip == storage_ip
                ), "Incorrect storage IP. Expected: {0}  -  Actual: {1}".format(storage_ip, storagedriver.storage_ip)
            storage_ip = storagedriver.storage_ip

            # Check required directories and files
            if storagerouter.guid not in all_directories:
                raise ValueError("Could not find directory information for Storage Router {0}".format(storagerouter.ip))
            if storagerouter.guid not in all_files:
                raise ValueError("Could not find file information for Storage Router {0}".format(storagerouter.ip))

            for directory in all_directories[storagerouter.guid]:
                if root_client.dir_exists(directory) is False:
                    raise ValueError(
                        "Directory {0} does not exist on Storage Router {1}".format(directory, storagerouter.ip)
                    )
            for file_name in all_files[storagerouter.guid]:
                if root_client.file_exists(file_name) is False:
                    raise ValueError(
                        "File {0} does not exist on Storage Router {1}".format(file_name, storagerouter.ip)
                    )

            # @TODO: check roles and sub_roles for all storagedrivers and not just once
            for partition in storagedriver.partitions:
                if partition.role in sd_partitions and partition.sub_role in sd_partitions[partition.role]:
                    sd_partitions[partition.role].remove(partition.sub_role)
                elif (
                    partition.role in sd_partitions
                    and partition.sub_role is None
                    and len(sd_partitions[partition.role])
                ):
                    sd_partitions[partition.role].remove("None")

            # Verify vPool writeable
            if GeneralHypervisor.get_hypervisor_type() == "VMWARE":
                GeneralVPool.mount_vpool(vpool=vpool, root_client=root_client)

            vdisk = GeneralVDisk.create_volume(size=10, vpool=vpool, root_client=root_client)
            GeneralVDisk.write_to_volume(
                vdisk=vdisk, vpool=vpool, root_client=root_client, count=10, bs="1M", input_type="random"
            )
            GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, root_client=root_client)

        for role, sub_roles in sd_partitions.iteritems():
            for sub_role in sub_roles:
                raise ValueError(
                    "Not a single Storage Driver found with partition role {0} and sub-role {1}".format(role, sub_role)
                )
Exemplo n.º 11
0
    def ovs_3791_validate_backend_sync_test():
        """
        Validate vdisk backend sync method
        """
        disk_name = 'ovs-3791-disk'
        loop = 'loop0'
        vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name)
        vdisk = GeneralVDisk.create_volume(size=2, vpool=vpool, name=disk_name, loop_device=loop, wait=True)

        _, snap_id1 = GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0')
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '1'), size=512)
        _, snap_id2 = GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap1')
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '2'), size=512)

        tlog_name = GeneralVDisk.schedule_backend_sync(vdisk)
        assert tlog_name[:5] == 'tlog_' and len(tlog_name) == 41,\
            'Unexpected result: {0} does not match tlog type'.format(tlog_name)

        timeout = 300
        status = False
        while timeout > 0:
            status = GeneralVDisk.is_volume_synced_up_to_snapshot(vdisk=vdisk, snapshot_id=snap_id2)
            print 'sync up to snapshot: {0}'.format(status)
            if status is True:
                break
            timeout -= 1
        assert status is True, 'Snapshot not synced to backend within 5 minutes'

        status = False
        timeout = 300
        while timeout > 0:
            status = GeneralVDisk.is_volume_synced_up_to_tlog(vdisk=vdisk, tlog_name=tlog_name)
            print 'sync up to tlog: {0}'.format(status)
            if status is True:
                break
            timeout -= 1
        assert status is True, 'Tlog not synced to backend within 5 minutes'

        GeneralVDisk.delete_volume(vdisk, vpool, loop)
Exemplo n.º 12
0
    def validate_clone_disk_test():
        """
        Validate vdisk clone method
        """
        disk_name = 'clone-disk'
        clone_disk_name = 'new-cloned-disk'
        test_file_name = 'file-contents'
        test_file_size = 5000
        loop = 'loop0'
        clone_loop = 'loop1'

        vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name)
        vdisk = GeneralVDisk.create_volume(size=50, vpool=vpool, name=disk_name, loop_device=loop, wait=True)

        TestVDisk.logger.info('clone_disk_test - create initial snapshot')
        GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0')

        TestVDisk.logger.info('clone_disk_test - create 1st {0} GB test file'.format(test_file_size / 1000.0))
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '1'), size=test_file_size)

        TestVDisk.logger.info('clone_disk_test - create 2nd {0} GB test file'.format(test_file_size / 1000.0))
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '2'), size=test_file_size)

        GeneralVDisk.logger.info(General.execute_command('sync'))

        TestVDisk.logger.info('clone_disk_test - cloning disk')
        cloned_vdisk = GeneralVDisk.clone_volume(vdisk, clone_disk_name)
        TestVDisk.logger.info('clone_disk_test - cloned disk')

        GeneralVDisk.connect_volume(vpool, name=clone_disk_name, loop_device=clone_loop)

        md5_sum_1 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '1'))[0].split('  ')[0]
        md5_sum_2 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '2'))[0].split('  ')[0]
        md5_clone_1 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(clone_loop, test_file_name, '1'))[0].split('  ')[0]
        md5_clone_2 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(clone_loop, test_file_name, '2'))[0].split('  ')[0]

        GeneralVDisk.disconnect_volume(loop_device=clone_loop)
        GeneralVDisk.delete_volume(VDisk(cloned_vdisk['vdisk_guid']), vpool, wait=True)
        GeneralVDisk.delete_volume(vdisk, vpool, loop, wait=True)

        assert md5_sum_1 == md5_clone_1,\
            'file contents for /mnt/{0}/{1}_{2}.txt is not identical on source and clone!'.format(loop, vdisk.name, '1')
        assert md5_sum_2 == md5_clone_2,\
            'file contents for /mnt/{0}/{1}_{2}.txt is not identical on source and clone!'.format(loop, vdisk.name, '2')
Exemplo n.º 13
0
    def ovs_3700_validate_test():
        """
        Validate something test
        """
        def _get_scrubber_log_size():
            scrubber_log_name = '/var/log/upstart/ovs-scrubber.log'
            if os.path.exists(scrubber_log_name):
                return os.stat(scrubber_log_name).st_size
            return 0

        loop = 'loop0'
        vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name)
        vdisk = GeneralVDisk.create_volume(size=2, vpool=vpool, name='ovs-3700-disk', loop_device=loop, wait=True)

        GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0')
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '1'), size=512)

        GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap1')
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '2'), size=512)

        GeneralVDisk.delete_snapshot(disk=vdisk, snapshot_name='snap1')

        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '3'), size=512)
        GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap2')

        pre_scrubber_logsize = _get_scrubber_log_size()
        ScheduledTaskController.gather_scrub_work()
        post_scrubber_logsize = _get_scrubber_log_size()

        GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, loop_device=loop)

        assert post_scrubber_logsize > pre_scrubber_logsize, "Scrubber actions were not logged!"