def config_files_check_test():
        """
        Verify some configuration files
        """
        issues_found = ''

        config_keys = {
            "/ovs/framework/memcache",
            "/ovs/arakoon/ovsdb/config"
        }

        for key_to_check in config_keys:
            if not Configuration.exists(key_to_check, raw=True):
                issues_found += "Couldn't find {0}\n".format(key_to_check)

        config_files = {
            "rabbitmq.config": "/etc/rabbitmq/rabbitmq.config",
        }
        grid_ip = General.get_config().get('main', 'grid_ip')
        ssh_pass = General.get_config().get('mgmtcenter', 'password')
        client = SSHClient(grid_ip, username='******', password=ssh_pass)
        for config_file_to_check in config_files.iterkeys():
            if not client.file_exists(config_files[config_file_to_check]):
                issues_found += "Couldn't find {0}\n".format(config_file_to_check)

        assert issues_found == '',\
            "Found the following issues while checking for the config files:{0}\n".format(issues_found)
    def system_services_check_test():
        """
        Verify some system services
        """
        services_to_commands = {
            "nginx": """ps -efx|grep nginx|grep -v grep""",
            "rabbitmq-server": """ps -ef|grep rabbitmq-|grep -v grep""",
            "memcached": """ps -ef|grep memcached|grep -v grep""",
        }

        errors = ''
        services_checked = 'Following services found running:\n'
        grid_ip = General.get_config().get('main', 'grid_ip')
        ssh_pass = General.get_config().get('mgmtcenter', 'password')
        client = SSHClient(grid_ip, username='******', password=ssh_pass)

        for service_to_check in services_to_commands.iterkeys():
            out, err = client.run(services_to_commands[service_to_check], debug=True, allow_insecure=True,
                                  return_stderr=True)
            if len(err):
                errors += "Error when trying to run {0}:\n{1}".format(services_to_commands[service_to_check], err)
            else:
                if len(out):
                    services_checked += "{0}\n".format(service_to_check)
                else:
                    errors += "Couldn't find {0} running process\n".format(service_to_check)

        for non_running_service in GeneralSystem.list_non_running_ovs_services(grid_ip):
            errors += str(non_running_service)

        assert len(errors) == 0,\
            "Found the following errors while checking for the system services:{0}\n".format(errors)
Ejemplo n.º 3
0
    def __init__(self, ip=None, username=None, password=None, verify=False):
        if ip is None:
            ip = General.get_config().get('main', 'grid_ip')
            assert ip, "Please specify a valid ip in autotests.cfg for grid_ip"
        if username is None:
            username = General.get_config().get('main', 'username')
            assert username, "Please specify a valid username in autotests.cfg"
        if password is None:
            password = General.get_config().get('main', 'password')
            assert password, "Please specify a valid password in autotests.cfg"

        self.ip = ip
        self.username = username
        self.password = password
        self.verify = verify

        self.headers = {'Accept': 'application/json; version=3'}
        if os.path.exists(self.TOKEN_CACHE_FILENAME) \
                and (time.time() - os.path.getmtime(self.TOKEN_CACHE_FILENAME) > 3600.0):
            os.remove(self.TOKEN_CACHE_FILENAME)
        if os.path.exists(self.TOKEN_CACHE_FILENAME):
            with open(self.TOKEN_CACHE_FILENAME, 'r') as token_cache_file:
                self.token = token_cache_file.read()
                self.headers['Authorization'] = 'Bearer {0}'.format(self.token)
        else:
            self.token = ''
            self.authenticate()

        if 'Authorization' not in self.headers.keys():
            self.authenticate()
    def system_services_check_test():
        """
        Verify some system services
        """
        services_to_commands = {
            "nginx": """ps -efx|grep nginx|grep -v grep""",
            "rabbitmq-server": """ps -ef|grep rabbitmq-|grep -v grep""",
            "memcached": """ps -ef|grep memcached|grep -v grep""",
            "ovs-arakoon-ovsdb": """initctl list| grep ovsdb""",
            "ovs-snmp": """initctl list| grep ovs-snmp""",
            "ovs-support-agent": """initctl list| grep support""",
            "ovs-volumerouter-consumer": """initctl list| grep volumerou""",
            "ovs-watcher-framework": """initctl list| grep watcher-fr"""
        }

        errors = ''
        services_checked = 'Following services found running:\n'
        grid_ip = General.get_config().get('main', 'grid_ip')
        ssh_pass = General.get_config().get('mgmtcenter', 'password')
        client = SSHClient(grid_ip, username='******', password=ssh_pass)

        for service_to_check in services_to_commands.iterkeys():
            out, err = client.run(services_to_commands[service_to_check], debug=True)
            if len(err):
                errors += "Error when trying to run {0}:\n{1}".format(services_to_commands[service_to_check], err)
            else:
                if len(out):
                    services_checked += "{0}\n".format(service_to_check)
                else:
                    errors += "Couldn't find {0} running process\n".format(service_to_check)

        print services_checked
        assert len(errors) == 0, "Found the following errors while checking for the system services:{0}\n".format(errors)
Ejemplo n.º 5
0
 def load_path(source):
     """
     Retrieve the absolute path for the logfile
     :param source: Source for the logfile
     :return: Absolute path to logfile
     """
     log_path = General.get_config().get('logger', 'path')
     if not os.path.exists(log_path):
         os.mkdir(log_path)
     file_name = LogHandler.targets[source] if source in LogHandler.targets else General.get_config().get('logger', 'default_file')
     log_filename = '{0}/{1}.log'.format(log_path, file_name)
     if not os.path.exists(log_filename):
         open(log_filename, 'a').close()
         os.chmod(log_filename, 0o666)
     return log_filename
Ejemplo n.º 6
0
def teardown():
    """
    Teardown for VirtualMachine package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    vpool_name = General.get_config().get('vpool', 'name')
    vpool = GeneralVPool.get_vpool_by_name(vpool_name)
    assert vpool is not None, "No vpool found where one was expected"
    GeneralVMachine.logger.info("Cleaning vpool")
    GeneralVPool.remove_vpool(vpool)

    alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name'))
    if alba_backend is not None:
        GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
Ejemplo n.º 7
0
    def initialise_disks(alba_backend, nr_of_disks, disk_type):
        """
        Initialize disks
        :param alba_backend: ALBA backend
        :param nr_of_disks: Amount of disks to initialize
        :param disk_type: Type of disks
        :return: None
        """
        # Assume no disks are claimed by a remote environment
        alba_backend.invalidate_dynamics(['storage_stack'])
        storage_stack = alba_backend.storage_stack

        initialised_disks = 0
        uninitialized_disk_names = []
        for disks in storage_stack.values():
            for disk_id, disk in disks.iteritems():
                if disk['status'] == 'initialized':
                    initialised_disks += 1
                elif disk['status'] == 'uninitialized':
                    uninitialized_disk_names.append(disk_id)
        nr_of_disks_to_init = nr_of_disks - initialised_disks
        if nr_of_disks_to_init <= 0:
            return True

        assert len(uninitialized_disk_names) >= nr_of_disks_to_init, "Not enough disks to initialize!"

        disks_to_init = GeneralAlba.filter_disks(uninitialized_disk_names, nr_of_disks_to_init, disk_type)
        assert len(disks_to_init) >= nr_of_disks_to_init, "Not enough disks to initialize!"

        grid_ip = General.get_config().get('main', 'grid_ip')
        alba_node = AlbaNodeList.get_albanode_by_ip(grid_ip)
        failures = AlbaNodeController.initialize_disks(alba_node.guid, dict((disk_id, 1) for disk_id in disks_to_init))
        assert not failures, 'Alba disk initialization failed for (some) disks: {0}'.format(failures)
Ejemplo n.º 8
0
    def filter_disks(disk_names, amount, disk_type):
        """
        Filter the available disks
        :param disk_names: Disks to filter
        :param amount: Amount to retrieve
        :param disk_type: Type of disk
        :return: Filtered disks
        """
        grid_ip = General.get_config().get('main', 'grid_ip')
        storagerouter = GeneralStorageRouter.get_storage_router_by_ip(ip=grid_ip)
        root_client = SSHClient(storagerouter, username='******')
        hdds, ssds = GeneralDisk.get_physical_disks(client=root_client)
        count = 0
        filtered_disks = list()

        if disk_type == 'SATA':
            list_to_check = hdds.values()
        elif disk_type == 'SSD':
            list_to_check = ssds.values()
        else:
            hdds.update(ssds)
            list_to_check = hdds.values()

        for disk_name in disk_names:
            for disk in list_to_check:
                if disk_name == disk['name']:
                    filtered_disks.append(disk['name'])
                    count += 1
            if count == amount:
                break

        return filtered_disks
Ejemplo n.º 9
0
def setup():
    """
    Setup for Arakoon package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, 'Please fill out a backend name in the autotest.cfg file'
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is True:
            GeneralService.stop_service(name='ovs-scheduled-tasks',
                                        client=root_client)

    storagerouters = GeneralStorageRouter.get_storage_routers()
    for sr in storagerouters:
        root_client = SSHClient(sr, username='******')
        GeneralDisk.add_db_role(sr)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    GeneralAlba.add_alba_backend(backend_name)
    GeneralArakoon.voldrv_arakoon_checkup()
Ejemplo n.º 10
0
def teardown():
    """
    Teardown for Arakoon package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    backend = GeneralBackend.get_by_name(backend_name)
    if backend is not None:
        GeneralAlba.remove_alba_backend(backend.alba_backend)

    for storagerouter in GeneralStorageRouter.get_masters():
        root_client = SSHClient(storagerouter, username='******')
        if GeneralService.get_service_status(name='ovs-scheduled-tasks',
                                             client=root_client) is False:
            GeneralService.start_service(name='ovs-scheduled-tasks',
                                         client=root_client)

        for location in TEST_CLEANUP:
            root_client.run('rm -rf {0}'.format(location))

    for key in KEY_CLEANUP:
        if EtcdConfiguration.exists('{0}/{1}'.format(GeneralArakoon.ETCD_CONFIG_ROOT, key), raw = True):
            EtcdConfiguration.delete('{0}/{1}'.format(GeneralArakoon.ETCD_CONFIG_ROOT, key))
Ejemplo n.º 11
0
    def prepare_alba_backend(name=None):
        """
        Create an ALBA backend and claim disks
        :param name: Name for the backend
        :return: None
        """
        # @TODO: Fix this, because backend_type should not be configurable if you always create an ALBA backend
        # @TODO 2: Get rid of these asserts, any test (or testsuite) should verify the required params first before starting execution
        autotest_config = General.get_config()
        if name is None:
            name = autotest_config.get('backend', 'name')
        nr_of_disks_to_claim = autotest_config.getint('backend', 'nr_of_disks_to_claim')
        type_of_disks_to_claim = autotest_config.get('backend', 'type_of_disks_to_claim')
        assert name,\
            "Please fill out a valid backend name in autotest.cfg file"

        storage_routers = GeneralStorageRouter.get_storage_routers()
        for sr in storage_routers:
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles='DB') is False:
                GeneralDisk.add_db_role(sr)
            if GeneralStorageRouter.has_roles(storagerouter=sr, roles=['SCRUB', 'WRITE']) is False:
                GeneralDisk.add_write_scrub_roles(sr)
        backend = GeneralBackend.get_by_name(name)
        if not backend:
            alba_backend = GeneralAlba.add_alba_backend(name)
        else:
            alba_backend = backend.alba_backend
        GeneralAlba.claim_asds(alba_backend, nr_of_disks_to_claim, type_of_disks_to_claim)
        if GeneralAlba.has_preset(alba_backend=alba_backend,
                                  preset_name=GeneralAlba.ONE_DISK_PRESET) is False:
            GeneralAlba.add_preset(alba_backend=alba_backend,
                                   name=GeneralAlba.ONE_DISK_PRESET,
                                   policies=[[1, 1, 1, 2]])
Ejemplo n.º 12
0
def setup():
    """
    Setup for Virtual Machine package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'vpool': ['name'],
                                                        'backend': ['name']})

    # Download the template
    cmd = '[ -d {0} ] && echo "Dir exists" || echo "Dir does not exists"'.format(GeneralVMachine.template_target_folder)
    out, err, _ = General.execute_command(cmd)
    if err:
        GeneralVMachine.logger.error("Error while executing command {1}: {0}".format(err, cmd))
    if 'not' not in out:
        General.execute_command('rm -rf {0}'.format(GeneralVMachine.template_target_folder))
        General.execute_command('mkdir {0}'.format(GeneralVMachine.template_target_folder))
    grid_ip = General.get_config().get('main', 'grid_ip')

    if grid_ip.split('.')[0] == '172' and grid_ip.split('.')[1] == '20':
        server_location = 'http://172.20.3.8/templates/openvstorage'
    else:
        server_location = 'http://sso-qpackages-loch.cloudfounders.com/templates/openvstorage'

    GeneralVMachine.logger.info("Getting template from {0}".format(server_location))
    out, err, _ = General.execute_command('wget -P {0} {1}{2}{3}'.format(GeneralVMachine.template_target_folder, server_location, '/fio_debian/', GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while downloading template: {0}".format(err))
    out, err, _ = General.execute_command('chown root {0}{1}'.format(GeneralVMachine.template_target_folder, GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while changing user owner to root for template: {0}".format(err))

    GeneralAlba.prepare_alba_backend()
    _, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'preset': GeneralAlba.ONE_DISK_PRESET})
    GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params)
 def get_hypervisor_type():
     """
     Retrieve type of hypervisor
     :return hypervisor type ['KVM'|'VMWARE']
     """
     config = General.get_config()
     return config.get('hypervisor', 'type')
Ejemplo n.º 14
0
    def ovs_2263_verify_alba_namespace_cleanup_test():
        """
        Verify ALBA namespace cleanup
        Create an amount of namespaces in ALBA
        Create a vPool and create some volumes
        Verify the amount of namespaces before and after vPool creation
        Remove the vPool and the manually created namespaces
        Verify the amount of namespaces before and after vPool deletion
        """

        # Create some namespaces in alba
        no_namespaces = 3
        backend_name = General.get_config().get('backend', 'name')
        backend = GeneralBackend.get_by_name(name=backend_name)
        namespace_name = 'autotest-ns_'
        namespace_name_regex = re.compile('^autotest-ns_\d$')
        for nmspc_index in range(no_namespaces):
            GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'create-namespace', ['{0}{1}'.format(namespace_name, nmspc_index), 'default'], False)
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))

        # Create a vPool and create volumes on it
        vpool, _ = GeneralVPool.add_vpool()
        root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******')
        if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE':
            GeneralVPool.mount_vpool(vpool=vpool,
                                     root_client=root_client)

        vdisks = []
        for disk_index in range(no_namespaces):
            vdisks.append(GeneralVDisk.create_volume(size=10,
                                                     vpool=vpool,
                                                     root_client=root_client))
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend)
        assert len(result) == 2 * no_namespaces + 1, "Expected {0} namespaces present on the {1} backend, found {2}".format(2 * no_namespaces + 1, backend_name, len(result))

        # Remove files and vPool
        for vdisk in vdisks:
            GeneralVDisk.delete_volume(vdisk=vdisk,
                                       vpool=vpool,
                                       root_client=root_client)

        if vpool.storagedrivers[0].storagerouter.pmachine.hvtype == 'VMWARE':
            GeneralVPool.unmount_vpool(vpool=vpool,
                                       root_client=root_client)

        GeneralVPool.remove_vpool(vpool)

        # Verify amount of namespaces
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == no_namespaces, "Expected {0} namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
        for namespace in result:
            GeneralAlba.execute_alba_cli_action(backend.alba_backend, 'delete-namespace', [namespace['name']], False)
        result = GeneralAlba.list_alba_namespaces(alba_backend=backend.alba_backend,
                                                  name=namespace_name_regex)
        assert len(result) == 0, "Expected no namespaces present on the {1} backend, found {2}".format(no_namespaces, backend_name, len(result))
Ejemplo n.º 15
0
def teardown():
    """
    Teardown for VirtualDisk package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    vpool_name = General.get_config().get("vpool", "name")
    vpool = GeneralVPool.get_vpool_by_name(vpool_name)

    for vd in VDiskList.get_vdisks():
        GeneralVDisk.delete_volume(vd, vpool, loop_device='loop0')

    if vpool is not None:
        GeneralVPool.remove_vpool(vpool)

    alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name'))
    if alba_backend is not None:
        GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
Ejemplo n.º 16
0
def teardown():
    """
    Teardown for vPool package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name'))
    if alba_backend:
        GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
 def get_hypervisor_info():
     """
     Retrieve info about hypervisor (ip, username, password)
     """
     config = General.get_config()
     hv_ip = config.get(section='hypervisor', option='ip')
     hv_user = config.get(section='hypervisor', option='username')
     hv_pass = config.get(section='hypervisor', option='password')
     if not hv_ip or not hv_user or not hv_pass:
         raise RuntimeError('Not all hypervisor information present in config')
     return [hv_ip, hv_user, hv_pass]
Ejemplo n.º 18
0
def setup():
    """
    Setup for vPool package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, "Please fill out a valid backend name in autotest.cfg file"

    GeneralAlba.prepare_alba_backend()
Ejemplo n.º 19
0
    def __init__(self, source, name=None, propagate=False):
        """
        Initializes the logger
        """
        parent_invoker = inspect.stack()[1]
        if not __file__.startswith(parent_invoker[1]) or parent_invoker[3] != 'get':
            raise RuntimeError('Cannot invoke instance from outside this class. Please use LogHandler.get(source, name=None) instead')

        if name is None:
            name = General.get_config().get('logger', 'default_name')

        log_filename = LogHandler.load_path(source)

        formatter = logging.Formatter('%(asctime)s - [%(process)s] - [%(levelname)s] - [{0}] - [%(name)s] - %(message)s'.format(source))
        handler = logging.FileHandler(log_filename)
        handler.setFormatter(formatter)

        self.logger = logging.getLogger(name)
        self.logger.propagate = propagate
        self.logger.setLevel(getattr(logging, General.get_config().get('logger', 'level')))
        self.logger.addHandler(handler)
Ejemplo n.º 20
0
def teardown():
    """
    Teardown for Backend package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    backend = GeneralBackend.get_by_name(backend_name)
    if backend:
        GeneralAlba.unclaim_disks(backend.alba_backend)
        GeneralAlba.remove_alba_backend(backend.alba_backend)
Ejemplo n.º 21
0
    def get_add_vpool_params(storagerouter, **kwargs):
        """
        Retrieve the default configuration settings to create a vPool
        :param storagerouter: Storage Router on which to add or extend the vPool
        :type storagerouter: StorageRouter

        :return: Dictionary with default settings
        :rtype: dict
        """
        test_config = General.get_config()
        config_params = json.loads(test_config.get("vpool", "config_params"))
        vpool_type = kwargs.get("type", test_config.get("vpool", "type"))
        vpool_params = {
            "type": vpool_type,
            "vpool_name": kwargs.get("vpool_name", test_config.get("vpool", "name")),
            "storage_ip": kwargs.get("storage_ip", test_config.get("vpool", "storage_ip")),
            "integratemgmt": kwargs.get("integrate_mgmt", test_config.getboolean("vpool", "integrate_mgmt")),
            "readcache_size": kwargs.get("readcache_size", test_config.getint("vpool", "readcache_size")),
            "writecache_size": kwargs.get("writecache_size", test_config.getint("vpool", "writecache_size")),
            "storagerouter_ip": storagerouter.ip,
            "config_params": {
                "dtl_mode": kwargs.get("dtl_mode", config_params.get("dtl_mode", "a_sync")),
                "sco_size": kwargs.get("sco_size", config_params.get("sco_size", 4)),
                "cluster_size": kwargs.get("cluster_size", config_params.get("cluster_size", 4)),
                "write_buffer": kwargs.get("write_buffer", config_params.get("write_buffer", 128)),
                "dtl_transport": kwargs.get("dtl_transport", config_params.get("dtl_transport", "tcp")),
            },
            "backend_connection_info": {"host": ""},
        }
        if vpool_type not in ["local", "distributed"]:
            vpool_params["backend_connection_info"] = {
                "host": kwargs.get("alba_connection_host", test_config.get("vpool", "alba_connection_host")),
                "port": kwargs.get("alba_connection_port", test_config.getint("vpool", "alba_connection_port")),
                "username": kwargs.get("alba_connection_user", test_config.get("vpool", "alba_connection_user")),
                "password": kwargs.get("alba_connection_pass", test_config.get("vpool", "alba_connection_pass")),
            }
            if vpool_type == "alba":
                backend = BackendList.get_by_name(kwargs.get("backend_name", test_config.get("backend", "name")))
                if backend is not None:
                    vpool_params["fragment_cache_on_read"] = kwargs.get(
                        "fragment_cache_on_read", test_config.getboolean("vpool", "fragment_cache_on_read")
                    )
                    vpool_params["fragment_cache_on_write"] = kwargs.get(
                        "fragment_cache_on_write", test_config.getboolean("vpool", "fragment_cache_on_write")
                    )
                    vpool_params["backend_connection_info"]["backend"] = {
                        "backend": backend.alba_backend_guid,
                        "metadata": "default",
                    }
        elif vpool_type == "distributed":
            vpool_params["distributed_mountpoint"] = kwargs.get("distributed_mountpoint", "/tmp")
        return vpool_params
Ejemplo n.º 22
0
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, "Please fill out a valid backend name in autotest.cfg file"

    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)
Ejemplo n.º 23
0
def setup():
    """
    Setup for Virtual Machine package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, "Please fill out a valid backend name in autotest.cfg file"

    # Download the template
    cmd = '[ -d {0} ] && echo "Dir exists" || echo "Dir does not exists"'.format(GeneralVMachine.template_target_folder)
    out, err, _ = General.execute_command(cmd)
    if err:
        GeneralVMachine.logger.error("Error while executing command {1}: {0}".format(err, cmd))
    if 'not' not in out:
        General.execute_command('rm -rf {0}'.format(GeneralVMachine.template_target_folder))
        General.execute_command('mkdir {0}'.format(GeneralVMachine.template_target_folder))
    grid_ip = General.get_config().get('main', 'grid_ip')

    if grid_ip.split('.')[0] == '172' and grid_ip.split('.')[1] == '20':
        server_location = 'http://172.20.3.8/templates/openvstorage'
    else:
        server_location = 'http://sso-qpackages-loch.cloudfounders.com/templates/openvstorage'

    GeneralVMachine.logger.info("Getting template from {0}".format(server_location))
    out, err, _ = General.execute_command('wget -P {0} {1}{2}{3}'.format(GeneralVMachine.template_target_folder, server_location, '/fio_debian/', GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while downloading template: {0}".format(err))
    out, err, _ = General.execute_command('chown root {0}{1}'.format(GeneralVMachine.template_target_folder, GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while changing user owner to root for template: {0}".format(err))

    GeneralAlba.prepare_alba_backend()
    GeneralManagementCenter.create_generic_mgmt_center()
    GeneralVPool.add_vpool()
Ejemplo n.º 24
0
def setup():
    """
    Setup for Backend package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'backend': ['name']})
    my_sr = GeneralStorageRouter.get_local_storagerouter()
    if GeneralStorageRouter.has_roles(storagerouter=my_sr, roles='DB') is False:
        GeneralDisk.add_db_role(my_sr)

    alba_backend_name = General.get_config().get('backend', 'name')
    alba_backend = GeneralAlba.get_by_name(alba_backend_name)
    if alba_backend is None:
        GeneralAlba.add_alba_backend(alba_backend_name)
Ejemplo n.º 25
0
    def ovs_2703_kill_various_services_test():
        """
        Kill various services and see if they recover
        """

        # @TODO 1: This test does not belong in the vPool tests, its a service test which happens to create a vPool
        # @TODO 2: Make test smarter to test all required services on all node types
        vpool = GeneralVPool.get_vpool_by_name(General.get_config().get('vpool', 'name'))
        if vpool is None:
            vpool, _ = GeneralVPool.add_vpool()

        errors = []
        root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******')
        for service_name in GeneralService.get_all_service_templates():
            if GeneralService.has_service(name=service_name,
                                          client=root_client) is False:
                continue

            if GeneralService.get_service_status(name=service_name,
                                                 client=root_client) is False:
                errors.append('Service {0} not found in running state'.format(service_name))
                continue

            pid_before = GeneralService.get_service_pid(name=service_name,
                                                        client=root_client)
            if pid_before == -1:
                errors.append('Service {0} has unknown PID before being killed'.format(service_name))
                continue
            GeneralService.kill_service(name=service_name,
                                        client=root_client)
            time.sleep(5)
            if GeneralService.get_service_status(name=service_name,
                                                 client=root_client) is False:
                errors.append('Service {0} not found in running state after killing it'.format(service_name))
                continue
            pid_after = GeneralService.get_service_pid(name=service_name,
                                                       client=root_client)
            if pid_after == -1:
                errors.append('Service {0} has unknown PID after being killed'.format(service_name))
                continue
            if pid_before == pid_after:
                errors.append('Kill command did not work on service {0}'.format(service_name))

        GeneralVPool.remove_vpool(vpool)

        assert len(errors) == 0, "Following issues were found with the services:\n - {0}".format('\n - '.join(errors))
Ejemplo n.º 26
0
    def get_add_vpool_params(storagerouter, **kwargs):
        """
        Retrieve the default configuration settings to create a vPool
        :param storagerouter: Storage Router on which to add or extend the vPool
        :type storagerouter: StorageRouter

        :return: Dictionary with default settings
        :rtype: dict
        """
        test_config = General.get_config()
        config_params = json.loads(test_config.get('vpool', 'config_params'))
        vpool_type = kwargs.get('type', test_config.get('vpool', 'type'))
        vpool_params = {'type': vpool_type,
                        'vpool_name': kwargs.get('vpool_name', test_config.get('vpool', 'name')),
                        'storage_ip': kwargs.get('storage_ip', test_config.get('vpool', 'storage_ip')),
                        'integratemgmt': kwargs.get('integrate_mgmt', test_config.getboolean('vpool', 'integrate_mgmt')),
                        'readcache_size': kwargs.get('readcache_size', test_config.getint('vpool', 'readcache_size')),
                        'writecache_size': kwargs.get('writecache_size', test_config.getint('vpool', 'writecache_size')),
                        'storagerouter_ip': storagerouter.ip,
                        'config_params': {'dtl_mode': kwargs.get('dtl_mode', config_params.get('dtl_mode', 'a_sync')),
                                          'sco_size': kwargs.get('sco_size', config_params.get('sco_size', 4)),
                                          'dedupe_mode': kwargs.get('dedupe_mode', config_params.get('dedupe_mode', 'dedupe')),
                                          'cluster_size': kwargs.get('cluster_size', config_params.get('cluster_size', 4)),
                                          'write_buffer': kwargs.get('write_buffer', config_params.get('write_buffer', 128)),
                                          'dtl_transport': kwargs.get('dtl_transport', config_params.get('dtl_transport', 'tcp')),
                                          'cache_strategy': kwargs.get('cache_strategy', config_params.get('cache_strategy', 'on_read'))}}
        if vpool_type not in ['local', 'distributed']:
            vpool_params['backend_connection_info'] = {'host': kwargs.get('alba_connection_host', test_config.get('vpool', 'alba_connection_host')),
                                                       'port': kwargs.get('alba_connection_port', test_config.getint('vpool', 'alba_connection_port')),
                                                       'username': kwargs.get('alba_connection_user', test_config.get('vpool', 'alba_connection_user')),
                                                       'password': kwargs.get('alba_connection_pass', test_config.get('vpool', 'alba_connection_pass'))}
            if vpool_type == 'alba':
                backend = BackendList.get_by_name(kwargs.get('backend_name', test_config.get('backend', 'name')))
                if backend is not None:
                    vpool_params['fragment_cache_on_read'] = kwargs.get('fragment_cache_on_read', test_config.getboolean('vpool', 'fragment_cache_on_read'))
                    vpool_params['fragment_cache_on_write'] = kwargs.get('fragment_cache_on_write', test_config.getboolean('vpool', 'fragment_cache_on_write'))
                    vpool_params['backend_connection_info']['backend'] = {'backend': backend.alba_backend_guid,
                                                                          'metadata': 'default'}
        elif vpool_type == 'distributed':
            vpool_params['distributed_mountpoint'] = kwargs.get('distributed_mountpoint', '/tmp')
        return vpool_params
Ejemplo n.º 27
0
 def _wait_for_asd_count_with_status(_alba_backend, _nr_of_asds, status):
     grid_ip = General.get_config().get('main', 'grid_ip')
     alba_node = AlbaNodeList.get_albanode_by_ip(grid_ip)
     counter = GeneralAlba.ALBA_TIMER / GeneralAlba.ALBA_TIMER_STEP
     asds_with_status = {}
     while counter > 0:
         GeneralAlba.logger.info('counter: {0}'.format(counter))
         _alba_backend.invalidate_dynamics(['storage_stack'])
         if alba_node.node_id in _alba_backend.storage_stack:
             for _disk in _alba_backend.storage_stack[alba_node.node_id].values():
                 for _asd_id, _asd in _disk['asds'].iteritems():
                     if _asd['status'] == status:
                         asds_with_status[_asd_id] = _disk.get('guid')
         GeneralAlba.logger.info('looking for {0} asds with status {1}: {2}'.format(_nr_of_asds, status, asds_with_status))
         if len(asds_with_status) >= _nr_of_asds:
             break
         counter -= 1
         time.sleep(GeneralAlba.ALBA_TIMER_STEP)
     assert len(asds_with_status) >= _nr_of_asds,\
         "Unable to find {0} asds, only found {1} asds with status: {2}.\n".format(_nr_of_asds, len(asds_with_status), status)
     return asds_with_status
Ejemplo n.º 28
0
import os
import re
import sys
import nose
import time
import datetime
import StringIO
import subprocess
import ConfigParser
from xml.dom import minidom
from ci.tests.general.general import General
from ci.tests.general.general_pmachine import GeneralPMachine
from ci.scripts import testrailapi, testEnum
from ci.scripts import xunit_testrail

at_config = General.get_config()
TESTRAIL_STATUS_ID_PASSED = '1'
TESTRAIL_STATUS_ID_BLOCKED = '2'
TESTRAIL_STATUS_ID_FAILED = '5'
TESTRAIL_STATUS_ID_SKIPPED = '11'
TESTRAIL_FOLDER = at_config.get(section="main", option="output_folder")
TESTRAIL_KEY = at_config.get(section="testrail", option="key")
TESTRAIL_PROJECT = at_config.get(section="testrail", option="test_project")
TESTRAIL_QUALITYLEVEL = at_config.get(section="main", option="qualitylevel")
TESTRAIL_SERVER = at_config.get(section="testrail", option="server")

BLOCKED_MESSAGE = "BLOCKED"


class TestRunnerOutputFormat(object):
    """
Ejemplo n.º 29
0
    def vms_with_fio_test():
        """
        Test virtual machines with FIO
        """
        timeout = 30
        timer_step = 5
        nr_of_disks = 10
        vpool_name = General.get_config().get('vpool', 'name')
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool, "No vpool found where one was expected"
        for disk_number in range(nr_of_disks):
            disk_name = "disk-{0}".format(disk_number)
            GeneralVMachine.logger.info("Starting RAW disk creation")
            template_folder = GeneralVMachine.template_target_folder
            image_name = GeneralVMachine.template_image
            out, err, _ = General.execute_command('qemu-img convert -O raw {0}{1} /mnt/{2}/{3}.raw'.format(template_folder, image_name, vpool_name, disk_name))
            if err:
                GeneralVMachine.logger.error("Error while creating raw disk: {0}".format(err))

        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert len(vpool.vdisks) == nr_of_disks, "Only {0} out of {1} VDisks have been created".format(len(vpool.vdisks), nr_of_disks)

        for vm_number in range(nr_of_disks):
            machine_name = "machine-{0}".format(vm_number)
            disk_name = "disk-{0}".format(vm_number)
            GeneralVMachine.logger.info("Starting vmachine creation from RAW disk")
            out, err, _ = General.execute_command('virt-install --connect qemu:///system -n {0} -r 512 --disk /mnt/{1}/{2}.raw,'
                                                  'device=disk --noautoconsole --graphics vnc,listen=0.0.0.0 --vcpus=1 --network network=default,mac=RANDOM,'
                                                  'model=e1000 --import'.format(machine_name, vpool_name, disk_name))
            if err:
                GeneralVMachine.logger.error("Error while creating vmachine: {0}".format(err))

        counter = timeout / timer_step
        while counter > 0:
            vms = GeneralVMachine.get_vmachines()
            if len(vms) == nr_of_disks:
                counter = 0
            else:
                counter -= 1
                time.sleep(timer_step)
        vms = GeneralVMachine.get_vmachines()
        assert len(vms) == nr_of_disks, "Only {0} out of {1} VMachines have been created after {2} seconds".format(len(vms), nr_of_disks, timeout)

        # Waiting for 5 minutes of FIO activity on the vmachines
        time.sleep(300)
        vms = GeneralVMachine.get_vmachines()
        for vm in vms:
            assert vm.hypervisor_status == 'RUNNING', "Machine {0} has wrong status on the hypervisor: {1}".format(vm.name, vm.hypervisor_status)

        for vm_number in range(nr_of_disks):
            vmachine_name = "machine-{0}".format(vm_number)
            GeneralVMachine.logger.info("Removing {0} vmachine".format(vmachine_name))
            out, err, _ = General.execute_command('virsh destroy {0}'.format(vmachine_name))
            if err:
                GeneralVMachine.logger.error("Error while stopping vmachine: {0}".format(err))
            out, err, _ = General.execute_command('virsh undefine {0}'.format(vmachine_name))
            if err:
                GeneralVMachine.logger.error("Error while removing vmachine: {0}".format(err))

        counter = timeout / timer_step
        while counter > 0:
            vms = GeneralVMachine.get_vmachines()
            if len(vms):
                counter -= 1
                time.sleep(timer_step)
            else:
                counter = 0
        vms = GeneralVMachine.get_vmachines()
        assert len(vms) == 0, "Still some machines left on the vpool after waiting for {0} seconds: {1}".format(timeout, [vm.name for vm in vms])

        GeneralVMachine.logger.info("Removing vpool vdisks from {0} vpool".format(vpool_name))
        out, err, _ = General.execute_command("rm -rf /mnt/{0}/*.raw".format(vpool_name))
        if err:
            GeneralVMachine.logger.error("Error while removing vdisks: {0}".format(err))

        counter = timeout / timer_step
        while counter > 0:
            vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
            if len(vpool.vdisks):
                counter -= 1
                time.sleep(timer_step)
            else:
                counter = 0
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert len(vpool.vdisks) == 0, "Still some disks left on the vpool after waiting {0} seconds: {1}".format(timeout, vpool.vdisks_guids)
Ejemplo n.º 30
0
    def check_scrubbing_test():
        """
        Check scrubbing of vdisks test
        """
        initial_counter = 100
        step = 5
        vdisk = None
        vpool_name = General.get_config().get('vpool', 'name')
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool, "No vpool found where one was expected"

        template_folder = GeneralVMachine.template_target_folder
        image_name = GeneralVMachine.template_image

        disk_name = "scrubdisk"
        GeneralVMachine.logger.info("Starting RAW disk creation")
        out, err, _ = General.execute_command('qemu-img convert -O raw {0}{1} /mnt/{2}/{3}.raw'.format(template_folder, image_name, vpool_name, disk_name))
        if err:
            GeneralVMachine.logger.error("Error while creating raw disk: {0}".format(err))

        def snapshot_vdisk(vdisk):
            metadata = {'label': 'snap-' + vdisk.name,
                        'is_consistent': True,
                        'timestamp': time.time(),
                        'is_automatic': False,
                        'is_sticky': False}
            VDiskController.create_snapshot(vdisk.guid, metadata)

        counter = initial_counter
        while counter and vdisk is None:
            time.sleep(step)
            vdisk = VDiskList.get_by_devicename_and_vpool('/' + disk_name + '.raw', vpool)
            counter -= step
        assert counter > 0, "Vdisk with name {0} didn't appear in the model after 60 seconds".format(disk_name)
        # snapshot disks for the first time
        snapshot_vdisk(vdisk)
        counter = initial_counter
        while counter > 0:
            time.sleep(step)
            out, err, _ = General.execute_command('dd if=/dev/zero of=/mnt/{0}/{1}.raw bs=10K count=1000 conv=notrunc'.format(vpool_name, disk_name))
            counter -= step
            snapshot_vdisk(vdisk)

        # saving disk 'stored' info / the only attribute that is lowered after scrubbing
        vdisk.invalidate_dynamics(['statistics'])
        disk_backend_data = vdisk.statistics['stored']

        # deleting middle snapshots
        for snapshot in vdisk.snapshots[1:-1]:
            VDiskController.delete_snapshot(vdisk.guid, snapshot['guid'])

        # starting scrubber
        try:
            GenericController.execute_scrub()
            # waiting for model to catch up
            counter = initial_counter
            while counter > 0:
                time.sleep(step)
                vdisk.invalidate_dynamics(['statistics'])
                # checking result of scrub work
                if vdisk.statistics['stored'] < disk_backend_data:
                    GeneralVMachine.logger.info("It took {0} seconds for the value to change from {1} to {2}\n".format((initial_counter - counter) * step,
                                                                                                                       disk_backend_data,
                                                                                                                       vdisk.statistics['stored']))
                    break
                counter -= step
        finally:
            # removing vdisk
            GeneralVMachine.logger.info("Removing vpool vdisks from {0} vpool".format(vpool_name))
            out, err, _ = General.execute_command("rm -rf /mnt/{0}/*.raw".format(vpool_name))
            if err:
                GeneralVMachine.logger.error("Error while removing vdisk: {0}".format(err))

        assert counter > 0, "Scrubbing didn't run as expected, backend size of vdisk remained at {0}:\n".format(disk_backend_data)