def _get_env_info(): """ Retrieve number of env nodes and the last two ip digits to add to the testrail title """ number_of_nodes = len(GeneralStorageRouter.get_storage_routers()) split_ip = GeneralStorageRouter.get_local_storagerouter().ip.split('.') return str(number_of_nodes) + 'N-' + split_ip[2] + '.' + split_ip[3]
def prepare_alba_backend(name=None): """ Create an ALBA backend and claim disks :param name: Name for the backend :return: None """ # @TODO: Fix this, because backend_type should not be configurable if you always create an ALBA backend # @TODO 2: Get rid of these asserts, any test (or testsuite) should verify the required params first before starting execution autotest_config = General.get_config() if name is None: name = autotest_config.get('backend', 'name') nr_of_disks_to_claim = autotest_config.getint('backend', 'nr_of_disks_to_claim') type_of_disks_to_claim = autotest_config.get('backend', 'type_of_disks_to_claim') assert name, "Please fill out a valid backend name in autotest.cfg file" my_sr = GeneralStorageRouter.get_local_storagerouter() if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False: GeneralDisk.add_db_role(my_sr) if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles=['READ', 'SCRUB', 'WRITE']) is False: GeneralDisk.add_read_write_scrub_roles(my_sr) backend = GeneralBackend.get_by_name(name) if not backend: alba_backend = GeneralAlba.add_alba_backend(name) else: alba_backend = backend.alba_backend GeneralAlba.claim_asds(alba_backend, nr_of_disks_to_claim, type_of_disks_to_claim)
def add_remove_distributed_vpool_test(): """ Create a vPool with 'distributed' BackendType and remove it Related ticket: http://jira.cloudfounders.com/browse/OVS-4050 """ # Verify if an unused disk is available to mount unused_disks = GeneralDisk.get_unused_disks() if len(unused_disks) == 0: logger.info('No available disks found to mount locally for the distributed backend') return # Raise if vPool already exists vpool_name = 'add-remove-distr-vpool' vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) if vpool is not None: raise RuntimeError('vPool with name "{0}" still exists'.format(vpool_name)) unused_disk = unused_disks[0] if not unused_disk.startswith('/dev/'): raise ValueError('Unused disk must be absolute path') # Create a partition on the disk local_sr = GeneralStorageRouter.get_local_storagerouter() disk = GeneralDisk.get_disk_by_devicename(storagerouter=local_sr, device_name=unused_disk) partition = GeneralDisk.partition_disk(disk=disk) # Mount the unused disk if partition.mountpoint is None: GeneralDisk.configure_disk(storagerouter=local_sr, disk=disk, offset=0, size=disk.size, roles=[], partition=partition) partition.discard() # Re-initializes the object # Add vPool and validate health vpool, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'vpool_name': vpool_name, 'type': 'distributed', 'distributed_mountpoint': partition.mountpoint}) assert vpool is not None, 'vPool {0} was not created'.format(vpool_name) GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params) # Retrieve vPool information before removal guid = vpool.guid name = vpool.name backend_type = vpool.backend_type.code files = GeneralVPool.get_related_files(vpool) directories = GeneralVPool.get_related_directories(vpool) storagerouters = [sd.storagerouter for sd in vpool.storagedrivers] # Remove vPool and validate removal GeneralVPool.remove_vpool(vpool=vpool) vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert vpool is None, 'vPool {0} was not deleted'.format(vpool_name) GeneralVPool.check_vpool_cleanup(vpool_info={'guid': guid, 'name': name, 'type': backend_type, 'files': files, 'directories': directories}, storagerouters=storagerouters) GeneralDisk.unpartition_disk(disk)
def ovs_2263_verify_alba_namespace_cleanup_test(): """ Verify ALBA namespace cleanup Create an amount of namespaces in ALBA Create a vPool and create some volumes Verify the amount of namespaces before and after vPool creation Remove the vPool and the manually created namespaces Verify the amount of namespaces before and after vPool deletion """ # Create some namespaces in alba no_namespaces = 3 backend_name = General.get_config().get('backend', 'name') backend = GeneralBackend.get_by_name(name=backend_name) namespace_name = 'autotest-ns_' namespace_name_regex = re.compile('^autotest-ns_\d$') for nmspc_index in range(no_namespaces): GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'create-namespace', ['{0}{1}'.format(namespace_name, nmspc_index), 'default'], False) result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend, name=namespace_name_regex) assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result)) # Create a vPool and create volumes on it vpool, _ = GeneralVPool.add_vpool() root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******') if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE': GeneralVPool.mount_vpool(vpool=vpool, root_client=root_client) vdisks = [] for disk_index in range(no_namespaces): vdisks.append(GeneralVDisk.create_volume(size=10, vpool=vpool, root_client=root_client)) result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend) assert len(result) == 2 * no_namespaces + 1, "Expected {0} namespaces present on the {1} backend, found {2}".format(2 * no_namespaces + 1, backend_name, len(result)) # Remove files and vPool for vdisk in vdisks: GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, root_client=root_client) if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE': GeneralVPool.unmount_vpool(vpool=vpool, root_client=root_client) GeneralVPool.remove_vpool(vpool) # Verify amount of namespaces result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend, name=namespace_name_regex) assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result)) for namespace in result: GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'delete-namespace', [namespace['name']], False) result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend, name=namespace_name_regex) assert len(result) == 0, "Expected no namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
def setup(): """ Setup for Backend package, will be executed when any test in this package is being executed Make necessary changes before being able to run the tests :return: None """ autotest_config = General.get_config() backend_name = autotest_config.get('backend', 'name') assert backend_name, "Please fill out a valid backend name in autotest.cfg file" my_sr = GeneralStorageRouter.get_local_storagerouter() if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False: GeneralDisk.add_db_role(my_sr)
def setup(): """ Setup for Backend package, will be executed when any test in this package is being executed Make necessary changes before being able to run the tests :return: None """ General.validate_required_config_settings(settings={'backend': ['name']}) my_sr = GeneralStorageRouter.get_local_storagerouter() if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False: GeneralDisk.add_db_role(my_sr) alba_backend_name = General.get_config().get('backend', 'name') alba_backend = GeneralAlba.get_by_name(alba_backend_name) if alba_backend is None: GeneralAlba.add_alba_backend(alba_backend_name)
def ovs_2703_kill_various_services_test(): """ Kill various services and see if they recover """ # @TODO 1: This test does not belong in the vPool tests, its a service test which happens to create a vPool # @TODO 2: Make test smarter to test all required services on all node types vpool = GeneralVPool.get_vpool_by_name(General.get_config().get('vpool', 'name')) if vpool is None: vpool, _ = GeneralVPool.add_vpool() errors = [] root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******') for service_name in GeneralService.get_all_service_templates(): if GeneralService.has_service(name=service_name, client=root_client) is False: continue if GeneralService.get_service_status(name=service_name, client=root_client) is False: errors.append('Service {0} not found in running state'.format(service_name)) continue pid_before = GeneralService.get_service_pid(name=service_name, client=root_client) if pid_before == -1: errors.append('Service {0} has unknown PID before being killed'.format(service_name)) continue GeneralService.kill_service(name=service_name, client=root_client) time.sleep(5) if GeneralService.get_service_status(name=service_name, client=root_client) is False: errors.append('Service {0} not found in running state after killing it'.format(service_name)) continue pid_after = GeneralService.get_service_pid(name=service_name, client=root_client) if pid_after == -1: errors.append('Service {0} has unknown PID after being killed'.format(service_name)) continue if pid_before == pid_after: errors.append('Kill command did not work on service {0}'.format(service_name)) GeneralVPool.remove_vpool(vpool) assert len(errors) == 0, "Following issues were found with the services:\n - {0}".format('\n - '.join(errors))
def add_vpool(vpool_parameters=None, storagerouters=None): """ Create a vPool based on the kwargs provided or default parameters found in the autotest.cfg :param vpool_parameters: Parameters to be used for vPool creation :type vpool_parameters: dict :param storagerouters: Guids of the Storage Routers on which to create and extend this vPool :type storagerouters: list :return: Created or extended vPool :rtype: VPool """ if storagerouters is None: storagerouters = [GeneralStorageRouter.get_local_storagerouter()] if vpool_parameters is None: vpool_parameters = {} if not isinstance(storagerouters, list) or len(storagerouters) == 0: raise ValueError('Storage Routers should be a list and contain at least 1 element to add a vPool on') vpool_name = None storagerouter_param_map = dict((sr, GeneralVPool.get_add_vpool_params(storagerouter=sr, **vpool_parameters)) for sr in storagerouters) for index, sr in enumerate(storagerouters): vpool_name = storagerouter_param_map[sr]['vpool_name'] task_result = GeneralVPool.api.execute_post_action(component='storagerouters', guid=sr.guid, action='add_vpool', data={'call_parameters': storagerouter_param_map[sr]}, wait=True, timeout=500) if task_result[0] is not True: raise RuntimeError('vPool was not {0} successfully: {1}'.format('extended' if index > 0 else 'created', task_result[1])) vpool = GeneralVPool.get_vpool_by_name(vpool_name) if vpool is None: raise RuntimeError('vPool with name {0} could not be found in model'.format(vpool_name)) return vpool, storagerouter_param_map
# Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. """ A general class dedicated to OpenStack and DevStack logic """ import os from ci.tests.general.general import General from ci.tests.general.general_storagerouter import GeneralStorageRouter # Setup environment os.environ["OS_USERNAME"] = "******" os.environ["OS_PASSWORD"] = "******" os.environ["OS_TENANT_NAME"] = "admin" os.environ["OS_AUTH_URL"] = "http://{0}:35357/v2.0".format(GeneralStorageRouter.get_local_storagerouter().ip) class GeneralOpenStack(object): """ A general class dedicated to OpenStack and DevStack logic """ @staticmethod def is_openstack_present(): """ Check if OpenStack is installed :return: Return True if OpenStack is installed """ return bool(General.execute_command("ps aux | awk '/keystone/ && !/awk/'")[0])
def fdl_0002_add_remove_partition_with_role_and_crosscheck_model_test(): """ FDL-0002 - create/remove disk partition using full disk and verify ovs model - look for an unused disk - add a partition using full disk and assign a DB role to the partition - validate ovs model is correctly updated with DB role - cleanup that partition - verify ovs model is correctly updated """ if TestFlexibleDiskLayout.continue_testing.state is False: logger.info('Test suite signaled to stop') return my_sr = GeneralStorageRouter.get_local_storagerouter() unused_disks = GeneralDisk.get_unused_disks() if not unused_disks: logger.info("At least one unused disk should be available for partition testing") return hdds = dict() ssds = dict() mdisks = GeneralDisk.get_disks() for disk in mdisks: if disk.storagerouter_guid == my_sr.guid: if disk.is_ssd: ssds['/dev/' + disk.name] = disk else: hdds['/dev/' + disk.name] = disk all_disks = dict(ssds) all_disks.update(hdds) # check no partitions are modelled for unused disks partitions = GeneralDisk.get_disk_partitions() partitions_detected = False disk_guid = '' for path in unused_disks: # @TODO: remove the if when ticket OVS-4503 is solved if path in all_disks: disk_guid = all_disks[path].guid for partition in partitions: if partition.disk_guid == disk_guid: partitions_detected = True assert partitions_detected is False, 'Existing partitions detected on unused disks!' # try partition a disk using it's full reported size disk = all_disks[unused_disks[0]] GeneralDisk.configure_disk(storagerouter=my_sr, disk=disk, offset=0, size=int(disk.size), roles=['WRITE']) # lookup partition in model mountpoint = None partitions = GeneralDisk.get_disk_partitions() for partition in partitions: if partition.disk_guid == disk.guid and 'WRITE' in partition.roles: mountpoint = partition.mountpoint break GeneralDisk.configure_disk(storagerouter=my_sr, disk=disk, offset=0, partition=partition, size=int(disk.size), roles=[]) # cleanup disk partition cmd = 'umount {0}; rmdir {0}; echo 0'.format(mountpoint) General.execute_command_on_node(my_sr.ip, cmd, allow_insecure=True) cmd = ['parted', '-s', '/dev/' + disk.name, 'rm', '1'] General.execute_command_on_node(my_sr.ip, cmd, allow_nonzero=True) # wipe partition table to be able to reuse this disk in another test GeneralVDisk.write_to_volume(location=disk.aliases[0], count=64, bs='1M', input_type='zero') GeneralStorageRouter.sync_with_reality() # verify partition no longer exists in ovs model is_partition_removed = True partitions = GeneralDisk.get_disk_partitions() for partition in partitions: if partition.disk_guid == disk_guid and 'WRITE' in partition.roles: is_partition_removed = False break assert is_partition_removed is True,\ 'New partition was not deleted successfully from system/model!' assert mountpoint, 'New partition was not detected in model'
def test_basic_logrotate(): """ Verify current openvstorage logrotate configuration Apply the openvstorage logrotate on custom logfile and see if it rotates as predicted Update ownership of custom file and verify logrotate raises issue """ storagerouters = GeneralStorageRouter.get_storage_routers() logrotate_content = """{0} {{ rotate 5 size 20M compress copytruncate notifempty }} {1} {{ su ovs ovs rotate 10 size 19M compress delaycompress notifempty create 666 ovs ovs postrotate /usr/bin/pkill -SIGUSR1 arakoon endscript }}""" if len(storagerouters) == 0: raise ValueError('No Storage Routers found in the model') logrotate_include_dir = '/etc/logrotate.d' logrotate_cfg_file = '/etc/logrotate.conf' logrotate_cron_file = '/etc/cron.daily/logrotate' logrotate_ovs_file = '{0}/openvstorage-logs'.format(logrotate_include_dir) expected_logrotate_content = logrotate_content.format('/var/log/ovs/*.log', '/var/log/arakoon/*/*.log') # Verify basic logrotate configurations for storagerouter in storagerouters: root_client = SSHClient(endpoint=storagerouter, username='******') assert_true(expr=root_client.file_exists(filename=logrotate_cfg_file), msg='Logrotate config {0} does not exist on Storage Router {1}'.format(logrotate_cfg_file, storagerouter.name)) assert_true(expr=root_client.file_exists(filename=logrotate_ovs_file), msg='Logrotate file {0} does not exist on Storage Router {1}'.format(logrotate_ovs_file, storagerouter.name)) assert_true(expr=root_client.file_exists(filename=logrotate_cron_file), msg='Logrotate file {0} does not exist on Storage Router {1}'.format(logrotate_cron_file, storagerouter.name)) assert_true(expr='include {0}'.format(logrotate_include_dir) in root_client.file_read(filename=logrotate_cfg_file).splitlines(), msg='Logrotate on Storage Router {0} does not include {1}'.format(storagerouter.name, logrotate_include_dir)) assert_true(expr='/usr/sbin/logrotate /etc/logrotate.conf' in root_client.file_read(filename=logrotate_cron_file).splitlines(), msg='Logrotate will not be executed on Storage Router {0}'.format(storagerouter.name)) actual_file_contents = root_client.file_read(filename=logrotate_ovs_file).rstrip('\n') assert_equal(first=expected_logrotate_content, second=actual_file_contents, msg='Logrotate contents does not match expected contents on Storage Router {0}'.format(storagerouter.name)) # Create custom logrotate file for testing purposes custom_logrotate_cfg_file = '/opt/OpenvStorage/ci/logrotate-conf' custom_logrotate_dir = '/opt/OpenvStorage/ci/logrotate' custom_logrotate_file1 = '{0}/logrotate_test_file1.log'.format(custom_logrotate_dir) custom_logrotate_file2 = '{0}/logrotate_test_file2.log'.format(custom_logrotate_dir) custom_logrotate_content = logrotate_content.format(custom_logrotate_file1, custom_logrotate_file2) local_sr = GeneralStorageRouter.get_local_storagerouter() root_client = SSHClient(endpoint=local_sr, username='******') root_client.file_write(filename=custom_logrotate_cfg_file, contents=custom_logrotate_content) # No logfile present --> logrotate should fail assert_raises(excClass=CalledProcessError, callableObj=root_client.run, command='logrotate {0}'.format(custom_logrotate_cfg_file)) ########################################## # Test 1st logrotate configuration entry # ########################################## root_client.dir_create(directories=custom_logrotate_dir) root_client.dir_chown(directories=custom_logrotate_dir, user='******', group='ovs', recursive=True) root_client.run(command='touch {0}'.format(custom_logrotate_file1)) root_client.run(command='touch {0}'.format(custom_logrotate_file2)) root_client.file_chmod(filename=custom_logrotate_file1, mode=666) root_client.file_chmod(filename=custom_logrotate_file2, mode=666) # Write data to the file less than size for rotation and verify rotation GeneralVDisk.write_to_volume(location=custom_logrotate_file1, count=15, bs='1M', input_type='zero', root_client=root_client) root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file)) assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)), second=2, msg='More files than expected present in {0}'.format(custom_logrotate_dir)) # Write data to file larger than size in configuration and verify amount of rotations files_to_delete = [] for counter in range(7): expected_file = '{0}.{1}.gz'.format(custom_logrotate_file1, counter + 1 if counter < 5 else 5) GeneralVDisk.write_to_volume(location=custom_logrotate_file1, count=30, bs='1M', input_type='zero', root_client=root_client) root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file)) assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)), second=counter + 3 if counter < 5 else 7, msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir)) assert_true(expr=root_client.file_exists(filename=expected_file), msg='Logrotate did not create the expected file {0}'.format(expected_file)) user_info = General.get_owner_group_for_path(path=expected_file, root_client=root_client) assert_equal(first='root', second=user_info['user']['name'], msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name'])) assert_equal(first='root', second=user_info['group']['name'], msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name'])) files_to_delete.append(expected_file) root_client.file_delete(filenames=files_to_delete) ########################################## # Test 2nd logrotate configuration entry # ########################################## root_client.file_chown(filenames=custom_logrotate_file2, user='******', group='ovs') # Write data to the file less than size for rotation and verify rotation GeneralVDisk.write_to_volume(location=custom_logrotate_file2, count=15, bs='1M', input_type='zero', root_client=root_client) root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file)) assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)), second=2, msg='More files than expected present in {0}'.format(custom_logrotate_dir)) # Write data to file larger than size in configuration and verify amount of rotations for counter in range(12): if counter == 0: # Delaycompress --> file is not compressed during initial cycle expected_file = '{0}.1'.format(custom_logrotate_file2) else: expected_file = '{0}.{1}.gz'.format(custom_logrotate_file2, counter + 1 if counter < 10 else 10) GeneralVDisk.write_to_volume(location=custom_logrotate_file2, count=30, bs='1M', input_type='zero', root_client=root_client) root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file)) assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)), second=counter + 3 if counter < 10 else 12, msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir)) assert_true(expr=root_client.file_exists(filename=expected_file), msg='Logrotate did not create the expected file {0}'.format(expected_file)) user_info = General.get_owner_group_for_path(path=expected_file, root_client=root_client) assert_equal(first='ovs', second=user_info['user']['name'], msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name'])) assert_equal(first='ovs', second=user_info['group']['name'], msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name'])) root_client.dir_delete(directories=custom_logrotate_dir) root_client.file_delete(filenames=custom_logrotate_cfg_file)