def config_files_check_test():
        """
        Verify some configuration files
        """
        issues_found = ''

        config_keys = {
            "/ovs/framework/memcache",
            "/ovs/arakoon/ovsdb/config"
        }

        for key_to_check in config_keys:
            if not Configuration.exists(key_to_check, raw=True):
                issues_found += "Couldn't find {0}\n".format(key_to_check)

        config_files = {
            "rabbitmq.config": "/etc/rabbitmq/rabbitmq.config",
        }
        grid_ip = General.get_config().get('main', 'grid_ip')
        ssh_pass = General.get_config().get('mgmtcenter', 'password')
        client = SSHClient(grid_ip, username='******', password=ssh_pass)
        for config_file_to_check in config_files.iterkeys():
            if not client.file_exists(config_files[config_file_to_check]):
                issues_found += "Couldn't find {0}\n".format(config_file_to_check)

        assert issues_found == '',\
            "Found the following issues while checking for the config files:{0}\n".format(issues_found)
    def system_services_check_test():
        """
        Verify some system services
        """
        services_to_commands = {
            "nginx": """ps -efx|grep nginx|grep -v grep""",
            "rabbitmq-server": """ps -ef|grep rabbitmq-|grep -v grep""",
            "memcached": """ps -ef|grep memcached|grep -v grep""",
            "ovs-arakoon-ovsdb": """initctl list| grep ovsdb""",
            "ovs-snmp": """initctl list| grep ovs-snmp""",
            "ovs-support-agent": """initctl list| grep support""",
            "ovs-volumerouter-consumer": """initctl list| grep volumerou""",
            "ovs-watcher-framework": """initctl list| grep watcher-fr"""
        }

        errors = ''
        services_checked = 'Following services found running:\n'
        grid_ip = General.get_config().get('main', 'grid_ip')
        ssh_pass = General.get_config().get('mgmtcenter', 'password')
        client = SSHClient(grid_ip, username='******', password=ssh_pass)

        for service_to_check in services_to_commands.iterkeys():
            out, err = client.run(services_to_commands[service_to_check], debug=True)
            if len(err):
                errors += "Error when trying to run {0}:\n{1}".format(services_to_commands[service_to_check], err)
            else:
                if len(out):
                    services_checked += "{0}\n".format(service_to_check)
                else:
                    errors += "Couldn't find {0} running process\n".format(service_to_check)

        print services_checked
        assert len(errors) == 0, "Found the following errors while checking for the system services:{0}\n".format(errors)
    def system_services_check_test():
        """
        Verify some system services
        """
        services_to_commands = {
            "nginx": """ps -efx|grep nginx|grep -v grep""",
            "rabbitmq-server": """ps -ef|grep rabbitmq-|grep -v grep""",
            "memcached": """ps -ef|grep memcached|grep -v grep""",
        }

        errors = ''
        services_checked = 'Following services found running:\n'
        grid_ip = General.get_config().get('main', 'grid_ip')
        ssh_pass = General.get_config().get('mgmtcenter', 'password')
        client = SSHClient(grid_ip, username='******', password=ssh_pass)

        for service_to_check in services_to_commands.iterkeys():
            out, err = client.run(services_to_commands[service_to_check], debug=True, allow_insecure=True,
                                  return_stderr=True)
            if len(err):
                errors += "Error when trying to run {0}:\n{1}".format(services_to_commands[service_to_check], err)
            else:
                if len(out):
                    services_checked += "{0}\n".format(service_to_check)
                else:
                    errors += "Couldn't find {0} running process\n".format(service_to_check)

        for non_running_service in GeneralSystem.list_non_running_ovs_services(grid_ip):
            errors += str(non_running_service)

        assert len(errors) == 0,\
            "Found the following errors while checking for the system services:{0}\n".format(errors)
예제 #4
0
    def remove_alba_namespaces(alba_backend, namespaces=None):
        """
        Remove ALBA namespaces
        :param alba_backend: ALBA backend
        :param namespaces: Name of namespaces to remove
        :return: None
        """
        ns_to_delete = namespaces
        if namespaces is None:
            ns_to_delete = GeneralAlba.list_alba_namespaces(alba_backend=alba_backend)

        cmd_delete = "alba delete-namespace {0} ".format(GeneralAlba.get_abm_config(alba_backend))
        fd_namespaces = []
        for ns in ns_to_delete:
            namespace_name = str(ns['name'])
            if 'fd-' in namespace_name:
                fd_namespaces.append(ns)
                GeneralAlba.logger.info("Skipping vpool namespace: {0}".format(namespace_name))
                continue
            GeneralAlba.logger.info("WARNING: Deleting leftover namespace: {0}".format(str(ns)))
            GeneralAlba.logger.info(General.execute_command(cmd_delete + namespace_name)[0].replace('true', 'True'))

        if namespaces is None:
            for ns in fd_namespaces:
                GeneralAlba.logger.info("WARNING: Deleting leftover vpool namespace: {0}".format(str(ns)))
                GeneralAlba.logger.info(General.execute_command(cmd_delete + str(ns['name']))[0].replace('true', 'True'))
            assert len(fd_namespaces) == 0,\
                "Removing Alba namespaces should not be necessary!"
 def list_ovs_services(host=IP):
     if GeneralSystem.INIT_SYSTEM == "init":
         return General.execute_command_on_node(host, "initctl list | grep ovs-*", allow_insecure=True).splitlines()
     elif GeneralSystem.INIT_SYSTEM == "systemd":
         return General.execute_command_on_node(
             host, ["systemctl", "-l", "--no-legend", "--no-pager", "list-units", "ovs-*"]
         ).splitlines()
예제 #6
0
def setup():
    """
    Setup for DiskLayout package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings()
예제 #7
0
    def unpartition_disk(disk, partitions=None, wait=True):
        """
        Return disk to RAW state
        :param disk: Disk DAL object
        :param partitions: Partitions DAL object list
        :return: None
        """
        if partitions is None:
            partitions = disk.partitions
        else:
            for partition in partitions:
                if partition not in disk.partitions:
                    raise RuntimeError('Partition {0} does not belong to disk {1}'.format(partition.mountpoint, disk.name))
        if len(disk.partitions) == 0:
            return

        root_client = SSHClient(disk.storagerouter, username='******')
        for partition in partitions:
            General.unmount_partition(root_client, partition)
        root_client.run(['parted', '-s', '/dev/' + disk.name, 'mklabel', 'gpt'])
        GeneralStorageRouter.sync_with_reality(disk.storagerouter)
        counter = 0
        timeout = 60
        while counter < timeout:
            time.sleep(1)
            disk = GeneralDisk.get_disk(guid=disk.guid)
            if len(disk.partitions) == 0:
                break
            counter += 1
        if counter == timeout:
            raise RuntimeError('Removing partitions failed for disk:\n {0} '.format(disk.name))
예제 #8
0
    def __init__(self, ip=None, username=None, password=None, verify=False):
        if ip is None:
            ip = General.get_config().get('main', 'grid_ip')
            assert ip, "Please specify a valid ip in autotests.cfg for grid_ip"
        if username is None:
            username = General.get_config().get('main', 'username')
            assert username, "Please specify a valid username in autotests.cfg"
        if password is None:
            password = General.get_config().get('main', 'password')
            assert password, "Please specify a valid password in autotests.cfg"

        self.ip = ip
        self.username = username
        self.password = password
        self.verify = verify

        self.headers = {'Accept': 'application/json; version=3'}
        if os.path.exists(self.TOKEN_CACHE_FILENAME) \
                and (time.time() - os.path.getmtime(self.TOKEN_CACHE_FILENAME) > 3600.0):
            os.remove(self.TOKEN_CACHE_FILENAME)
        if os.path.exists(self.TOKEN_CACHE_FILENAME):
            with open(self.TOKEN_CACHE_FILENAME, 'r') as token_cache_file:
                self.token = token_cache_file.read()
                self.headers['Authorization'] = 'Bearer {0}'.format(self.token)
        else:
            self.token = ''
            self.authenticate()

        if 'Authorization' not in self.headers.keys():
            self.authenticate()
예제 #9
0
def setup():
    """
    Setup for Virtual Machine package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'vpool': ['name'],
                                                        'backend': ['name']})

    # Download the template
    cmd = '[ -d {0} ] && echo "Dir exists" || echo "Dir does not exists"'.format(GeneralVMachine.template_target_folder)
    out, err, _ = General.execute_command(cmd)
    if err:
        GeneralVMachine.logger.error("Error while executing command {1}: {0}".format(err, cmd))
    if 'not' not in out:
        General.execute_command('rm -rf {0}'.format(GeneralVMachine.template_target_folder))
        General.execute_command('mkdir {0}'.format(GeneralVMachine.template_target_folder))
    grid_ip = General.get_config().get('main', 'grid_ip')

    if grid_ip.split('.')[0] == '172' and grid_ip.split('.')[1] == '20':
        server_location = 'http://172.20.3.8/templates/openvstorage'
    else:
        server_location = 'http://sso-qpackages-loch.cloudfounders.com/templates/openvstorage'

    GeneralVMachine.logger.info("Getting template from {0}".format(server_location))
    out, err, _ = General.execute_command('wget -P {0} {1}{2}{3}'.format(GeneralVMachine.template_target_folder, server_location, '/fio_debian/', GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while downloading template: {0}".format(err))
    out, err, _ = General.execute_command('chown root {0}{1}'.format(GeneralVMachine.template_target_folder, GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while changing user owner to root for template: {0}".format(err))

    GeneralAlba.prepare_alba_backend()
    _, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'preset': GeneralAlba.ONE_DISK_PRESET})
    GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params)
예제 #10
0
def setup():
    """
    Setup for System package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    print "setup called " + __name__
    General.cleanup()
예제 #11
0
def setup():
    """
    Setup for Sanity package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    print "Setup called " + __name__
    General.validate_required_config_settings()
예제 #12
0
def setup():
    """
    Setup for vPool package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'vpool': ['name', 'type', 'readcache_size', 'writecache_size', 'integrate_mgmt',
                                                                  'storage_ip', 'config_params', 'fragment_cache_on_read', 'fragment_cache_on_write'],
                                                        'backend': ['name']})
    GeneralAlba.prepare_alba_backend()
예제 #13
0
    def get_unused_disks():
        """
        Retrieve all disks not in use
        :return: List of disks not being used
        """
        # @TODO: Make this call possible on all nodes, not only on node executing the tests
        all_disks = General.execute_command("""fdisk -l 2>/dev/null| awk '/Disk \/.*:/ {gsub(":","",$s);print $2}' | grep -v ram""")[0].splitlines()
        out = General.execute_command("df -h | awk '{print $1}'")[0]

        return [d for d in all_disks if d not in out and not 'mapper' in d and not General.execute_command("fuser {0}".format(d))[0]]
예제 #14
0
def setup():
    """
    Setup for VirtualDisk package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'vpool': ['name'],
                                                        'backend': ['name']})
    GeneralAlba.prepare_alba_backend()
    _, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'preset': GeneralAlba.ONE_DISK_PRESET})
    GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params)
    def ovs_2493_detect_could_not_acquire_lock_events_test():
        """
        Verify lock errors
        """
        errorlist = ""
        command = "grep -C 1 'Could not acquire lock' /var/log/ovs/lib.log"
        gridips = GeneralPMachine.get_all_ips()

        for gridip in gridips:
            out = General.execute_command_on_node(gridip, command + " | wc -l")
            if not out == '0':
                errorlist += "node %s \n:{0}\n\n".format(General.execute_command_on_node(gridip, command).splitlines()) % gridip

        assert len(errorlist) == 0, "Lock errors detected in lib logs on \n" + errorlist
예제 #16
0
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'backend': ['name']})
    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)

    alba_backend_name = General.get_config().get('backend', 'name')
    alba_backend = GeneralAlba.get_by_name(alba_backend_name)
    if alba_backend is None:
        GeneralAlba.add_alba_backend(alba_backend_name)
예제 #17
0
def teardown():
    """
    Teardown for VirtualMachine package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    vpool_name = General.get_config().get('vpool', 'name')
    vpool = GeneralVPool.get_vpool_by_name(vpool_name)
    assert vpool is not None, "No vpool found where one was expected"
    GeneralVMachine.logger.info("Cleaning vpool")
    GeneralVPool.remove_vpool(vpool)

    alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name'))
    if alba_backend is not None:
        GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
예제 #18
0
 def load_path(source):
     """
     Retrieve the absolute path for the logfile
     :param source: Source for the logfile
     :return: Absolute path to logfile
     """
     log_path = General.get_config().get('logger', 'path')
     if not os.path.exists(log_path):
         os.mkdir(log_path)
     file_name = LogHandler.targets[source] if source in LogHandler.targets else General.get_config().get('logger', 'default_file')
     log_filename = '{0}/{1}.log'.format(log_path, file_name)
     if not os.path.exists(log_filename):
         open(log_filename, 'a').close()
         os.chmod(log_filename, 0o666)
     return log_filename
 def get_hypervisor_type():
     """
     Retrieve type of hypervisor
     :return hypervisor type ['KVM'|'VMWARE']
     """
     config = General.get_config()
     return config.get('hypervisor', 'type')
예제 #20
0
    def post_reboot_checks_test():
        """
        Perform service checks after reboot
        """
        rebooted_host = os.environ.get('POST_REBOOT_HOST')
        if not rebooted_host:
            logger.info('Test not setup to run')
            return

        logger.info('Post reboot check node {0}\n'.format(rebooted_host))

        wait_time = 5 * 60
        sleep_time = 5

        non_running_services = ''
        while wait_time > 0:
            out = General.execute_command_on_node(rebooted_host, "initctl list | grep ovs-*")
            statuses = out.splitlines()

            non_running_services = [s for s in statuses if 'start/running' not in s]
            if len(non_running_services) == 0:
                break

            wait_time -= sleep_time
            time.sleep(sleep_time)

        assert len(non_running_services) == 0,\
            "Found non running services after reboot on node {0}\n{1}".format(rebooted_host, non_running_services)
예제 #21
0
    def initialise_disks(alba_backend, nr_of_disks, disk_type):
        """
        Initialize disks
        :param alba_backend: ALBA backend
        :param nr_of_disks: Amount of disks to initialize
        :param disk_type: Type of disks
        :return: None
        """
        # Assume no disks are claimed by a remote environment
        alba_backend.invalidate_dynamics(['storage_stack'])
        storage_stack = alba_backend.storage_stack

        initialised_disks = 0
        uninitialized_disk_names = []
        for disks in storage_stack.values():
            for disk_id, disk in disks.iteritems():
                if disk['status'] == 'initialized':
                    initialised_disks += 1
                elif disk['status'] == 'uninitialized':
                    uninitialized_disk_names.append(disk_id)
        nr_of_disks_to_init = nr_of_disks - initialised_disks
        if nr_of_disks_to_init <= 0:
            return True

        assert len(uninitialized_disk_names) >= nr_of_disks_to_init, "Not enough disks to initialize!"

        disks_to_init = GeneralAlba.filter_disks(uninitialized_disk_names, nr_of_disks_to_init, disk_type)
        assert len(disks_to_init) >= nr_of_disks_to_init, "Not enough disks to initialize!"

        grid_ip = General.get_config().get('main', 'grid_ip')
        alba_node = AlbaNodeList.get_albanode_by_ip(grid_ip)
        failures = AlbaNodeController.initialize_disks(alba_node.guid, dict((disk_id, 1) for disk_id in disks_to_init))
        assert not failures, 'Alba disk initialization failed for (some) disks: {0}'.format(failures)
예제 #22
0
    def prepare_alba_backend(name=None):
        """
        Create an ALBA backend and claim disks
        :param name: Name for the backend
        :return: None
        """
        # @TODO: Fix this, because backend_type should not be configurable if you always create an ALBA backend
        # @TODO 2: Get rid of these asserts, any test (or testsuite) should verify the required params first before starting execution
        autotest_config = General.get_config()
        if name is None:
            name = autotest_config.get('backend', 'name')
        nr_of_disks_to_claim = autotest_config.getint('backend', 'nr_of_disks_to_claim')
        type_of_disks_to_claim = autotest_config.get('backend', 'type_of_disks_to_claim')
        assert name,\
            "Please fill out a valid backend name in autotest.cfg file"

        storage_routers = GeneralStorageRouter.get_storage_routers()
        for sr in storage_routers:
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles='DB') is False:
                GeneralDisk.add_db_role(sr)
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles=['SCRUB', 'WRITE']) is False:
                GeneralDisk.add_write_scrub_roles(sr)
        backend = GeneralBackend.get_by_name(name)
        if not backend:
            alba_backend = GeneralAlba.add_alba_backend(name)
        else:
            alba_backend = backend.alba_backend
        GeneralAlba.claim_asds(alba_backend, nr_of_disks_to_claim, type_of_disks_to_claim)
        if GeneralAlba.has_preset(alba_backend=alba_backend,
                                  preset_name=GeneralAlba.ONE_DISK_PRESET) is False:
            GeneralAlba.add_preset(alba_backend=alba_backend,
                                   name=GeneralAlba.ONE_DISK_PRESET,
                                   policies=[[1, 1, 1, 2]])
예제 #23
0
    def write_to_volume(vdisk=None, vpool=None, location=None, count=1024, bs='1M', input_type='random',
                        root_client=None):
        """
        Write some data to a file
        :param vdisk: Virtual disk to write on
        :param vpool: vPool which hosts the Virtual Disk
        :param location: Absolute path to file
        :param count: amount of blocks to write
        :param bs: Size of the blocks to write
        :param input_type: Type of input (null, zero, random)
        :param root_client: SSHClient object
        :return: None
        """
        if location is None and (vdisk is None or vpool is None):
            raise ValueError('vDisk and vPool must be provided if no location has been provided')

        if location is None:
            location = GeneralVDisk.get_filesystem_location(vpool=vpool,
                                                            vdisk_name=vdisk.name)
        if root_client is None:
            root_client = SSHClient('127.0.0.1', username='******')

        if input_type not in ('null', 'zero', 'random'):
            raise ValueError('Invalid input type provided')
        if General.check_file_is_link(location, root_client.ip, root_client.username, root_client.password):
            print "Writing to {0}".format(root_client.file_read_link(location))
        else:
            if not root_client.file_exists(location):
                raise ValueError('File {0} does not exist on Storage Router {1}'.format(location, root_client.ip))
        if not isinstance(count, int) or count < 1:
            raise ValueError('Count must be an integer > 0')
        root_client.run('dd conv=notrunc if=/dev/{0} of={1} bs={2} count={3}'.format(input_type, location, bs, count))
 def ovs_2053_check_for_alba_warnings_test():
     """
     Check ALBA warning presence
     """
     out = General.execute_command_on_node('127.0.0.1', 'grep "warning: syncfs" /var/log/upstart/*-asd-*.log | wc -l')
     assert out == '0', \
         "syncfs warnings detected in asd logs\n:{0}".format(out.splitlines())
예제 #25
0
 def shrink_vpool(storage_driver):
     """
     Remove a Storage Driver from a vPool
     :param storage_driver: Storage Driver to remove from the vPool
     :return: None
     """
     vpool = storage_driver.vpool
     if GeneralHypervisor.get_hypervisor_type() == "VMWARE":
         root_client = SSHClient(storage_driver.storagerouter, username="******")
         if storage_driver.mountpoint in General.get_mountpoints(root_client):
             root_client.run(["umount", "storage_driver.mountpoint"])
     task_result = GeneralVPool.api.execute_post_action(
         component="vpools",
         guid=vpool.guid,
         action="shrink_vpool",
         data={"storagerouter_guid": storage_driver.storagerouter.guid},
         wait=True,
         timeout=GeneralVPool.TIMEOUT_ADD_VPOOL,
     )
     if task_result[0] is not True:
         raise RuntimeError(
             "Storage Driver with ID {0} was not successfully removed from vPool {1}".format(
                 storage_driver.storagedriver_id, vpool.name
             ),
             task_result,
         )
     return GeneralVPool.get_vpool_by_name(vpool_name=vpool.name)
예제 #26
0
def list_tests(args=None, with_plugin=False):
    """
    Lists all the tests that nose detects under TESTS_DIR
    :param args: Extra arguments for listing tests
    :param with_plugin: Use the --with-testEnum plugin
    """
    if not args:
        arguments = ['--where', General.TESTS_DIR, '--verbosity', '3', '--collect-only']
    else:
        arguments = args + ['--collect-only']

    if with_plugin is True:
        arguments.append('--with-testEnum')

        fake_stdout = StringIO.StringIO()
        old_stdout = sys.stdout
        sys.stdout = fake_stdout

        try:
            nose.run(argv=arguments, addplugins=[testEnum.TestEnum()])
        except Exception:
            raise
        finally:
            sys.stdout = old_stdout

        return fake_stdout.getvalue().split()

    testcases = []
    for line in General.execute_command(command='nosetests {0}'.format(' '.join(arguments)))[1].splitlines():
        if line.startswith('ci.tests'):
            testcases.append(line.split(' ... ')[0])
    return testcases
예제 #27
0
    def execute_alba_cli_action(alba_backend, action, params=None, json_output=True):
        """
        Execute an ALBA CLI command
        :param alba_backend: ALBA backend
        :param action: Action to execute
        :param params: Parameters to pass to the action
        :param json_output: Return JSON output
        :return: Output of the action
        """
        config = GeneralAlba.get_abm_config(alba_backend)
        cmd = ['alba', action]
        cmd.extend(config)
        if json_output:
            cmd.append('--to-json')
        if params is None:
            params = []
        cmd.extend(params)

        GeneralAlba.logger.info('Running alba cli command: {0}'.format(cmd))
        output = ''
        try:
            output, error, exit_code = General.execute_command(' '.join(cmd))
            if exit_code != 0:
                GeneralAlba.logger.error('Exit code: {0}'.format(exit_code))
                GeneralAlba.logger.error('Error thrown: {0}'.format(error))
                raise RuntimeError('ALBA command failed with exitcode {0} and error {1}'.format(exit_code, error))
            if json_output is True:
                return json.loads(output)['result']
            return output
        except (ValueError, RuntimeError):
            GeneralAlba.logger.error("Command {0} failed:\nOutput: {1}".format(cmd, output))
            raise RuntimeError("Command {0} failed:\nOutput: {1}".format(cmd, output))
예제 #28
0
    def filter_disks(disk_names, amount, disk_type):
        """
        Filter the available disks
        :param disk_names: Disks to filter
        :param amount: Amount to retrieve
        :param disk_type: Type of disk
        :return: Filtered disks
        """
        grid_ip = General.get_config().get('main', 'grid_ip')
        storagerouter = GeneralStorageRouter.get_storage_router_by_ip(ip=grid_ip)
        root_client = SSHClient(storagerouter, username='******')
        hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
        count = 0
        filtered_disks = list()

        if disk_type == 'SATA':
            list_to_check = hdds.values()
        elif disk_type == 'SSD':
            list_to_check = ssds.values()
        else:
            hdds.update(ssds)
            list_to_check = hdds.values()

        for disk_name in disk_names:
            for disk in list_to_check:
                if disk_name == disk['name']:
                    filtered_disks.append(disk['name'])
                    count += 1
            if count == amount:
                break

        return filtered_disks
예제 #29
0
def setup():
    """
    Setup for Arakoon package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, 'Please fill out a backend name in the autotest.cfg file'
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is True:
            GeneralService.stop_service(name='ovs-scheduled-tasks',
                                        client=root_client)

    storagerouters = GeneralStorageRouter.get_storage_routers()
    for sr in storagerouters:
        root_client = SSHClient(sr, username='******')
        GeneralDisk.add_db_role(sr)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    GeneralAlba.add_alba_backend(backend_name)
    GeneralArakoon.voldrv_arakoon_checkup()
예제 #30
0
def teardown():
    """
    Teardown for Arakoon package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is False:
            GeneralService.start_service(name='ovs-scheduled-tasks',
                                         client=root_client)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    for key in KEY_CLEANUP:
        if EtcdConfiguration.exists('{0}/{1}'.format(GeneralArakoon.ETCD_CONFIG_ROOT, key), raw = True):
            EtcdConfiguration.delete('{0}/{1}'.format(GeneralArakoon.ETCD_CONFIG_ROOT, key))