Пример #1
0
    def remove_alba_namespaces(alba_backend, namespaces=None):
        """
        Remove ALBA namespaces
        :param alba_backend: ALBA backend
        :param namespaces: Name of namespaces to remove
        :return: None
        """
        ns_to_delete = namespaces
        if namespaces is None:
            ns_to_delete = GeneralAlba.list_alba_namespaces(alba_backend=alba_backend)

        cmd_delete = "alba delete-namespace {0} ".format(GeneralAlba.get_abm_config(alba_backend))
        fd_namespaces = []
        for ns in ns_to_delete:
            namespace_name = str(ns['name'])
            if 'fd-' in namespace_name:
                fd_namespaces.append(ns)
                GeneralAlba.logger.info("Skipping vpool namespace: {0}".format(namespace_name))
                continue
            GeneralAlba.logger.info("WARNING: Deleting leftover namespace: {0}".format(str(ns)))
            GeneralAlba.logger.info(General.execute_command(cmd_delete + namespace_name)[0].replace('true', 'True'))

        if namespaces is None:
            for ns in fd_namespaces:
                GeneralAlba.logger.info("WARNING: Deleting leftover vpool namespace: {0}".format(str(ns)))
                GeneralAlba.logger.info(General.execute_command(cmd_delete + str(ns['name']))[0].replace('true', 'True'))
            assert len(fd_namespaces) == 0,\
                "Removing Alba namespaces should not be necessary!"
Пример #2
0
    def get_unused_disks():
        """
        Retrieve all disks not in use
        :return: List of disks not being used
        """
        # @TODO: Make this call possible on all nodes, not only on node executing the tests
        all_disks = General.execute_command("""fdisk -l 2>/dev/null| awk '/Disk \/.*:/ {gsub(":","",$s);print $2}' | grep -v ram""")[0].splitlines()
        out = General.execute_command("df -h | awk '{print $1}'")[0]

        return [d for d in all_disks if d not in out and not 'mapper' in d and not General.execute_command("fuser {0}".format(d))[0]]
Пример #3
0
def list_tests(args=None, with_plugin=False):
    """
    Lists all the tests that nose detects under TESTS_DIR
    :param args: Extra arguments for listing tests
    :param with_plugin: Use the --with-testEnum plugin
    """
    if not args:
        arguments = ['--where', General.TESTS_DIR, '--verbosity', '3', '--collect-only']
    else:
        arguments = args + ['--collect-only']

    if with_plugin is True:
        arguments.append('--with-testEnum')

        fake_stdout = StringIO.StringIO()
        old_stdout = sys.stdout
        sys.stdout = fake_stdout

        try:
            nose.run(argv=arguments, addplugins=[testEnum.TestEnum()])
        except Exception:
            raise
        finally:
            sys.stdout = old_stdout

        return fake_stdout.getvalue().split()

    testcases = []
    for line in General.execute_command(command='nosetests {0}'.format(' '.join(arguments)))[1].splitlines():
        if line.startswith('ci.tests'):
            testcases.append(line.split(' ... ')[0])
    return testcases
Пример #4
0
    def execute_alba_cli_action(alba_backend, action, params=None, json_output=True):
        """
        Execute an ALBA CLI command
        :param alba_backend: ALBA backend
        :param action: Action to execute
        :param params: Parameters to pass to the action
        :param json_output: Return JSON output
        :return: Output of the action
        """
        config = GeneralAlba.get_abm_config(alba_backend)
        cmd = ['alba', action]
        cmd.extend(config)
        if json_output:
            cmd.append('--to-json')
        if params is None:
            params = []
        cmd.extend(params)

        GeneralAlba.logger.info('Running alba cli command: {0}'.format(cmd))
        output = ''
        try:
            output, error, exit_code = General.execute_command(' '.join(cmd))
            if exit_code != 0:
                GeneralAlba.logger.error('Exit code: {0}'.format(exit_code))
                GeneralAlba.logger.error('Error thrown: {0}'.format(error))
                raise RuntimeError('ALBA command failed with exitcode {0} and error {1}'.format(exit_code, error))
            if json_output is True:
                return json.loads(output)['result']
            return output
        except (ValueError, RuntimeError):
            GeneralAlba.logger.error("Command {0} failed:\nOutput: {1}".format(cmd, output))
            raise RuntimeError("Command {0} failed:\nOutput: {1}".format(cmd, output))
Пример #5
0
def setup():
    """
    Setup for Virtual Machine package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    General.validate_required_config_settings(settings={'vpool': ['name'],
                                                        'backend': ['name']})

    # Download the template
    cmd = '[ -d {0} ] && echo "Dir exists" || echo "Dir does not exists"'.format(GeneralVMachine.template_target_folder)
    out, err, _ = General.execute_command(cmd)
    if err:
        GeneralVMachine.logger.error("Error while executing command {1}: {0}".format(err, cmd))
    if 'not' not in out:
        General.execute_command('rm -rf {0}'.format(GeneralVMachine.template_target_folder))
        General.execute_command('mkdir {0}'.format(GeneralVMachine.template_target_folder))
    grid_ip = General.get_config().get('main', 'grid_ip')

    if grid_ip.split('.')[0] == '172' and grid_ip.split('.')[1] == '20':
        server_location = 'http://172.20.3.8/templates/openvstorage'
    else:
        server_location = 'http://sso-qpackages-loch.cloudfounders.com/templates/openvstorage'

    GeneralVMachine.logger.info("Getting template from {0}".format(server_location))
    out, err, _ = General.execute_command('wget -P {0} {1}{2}{3}'.format(GeneralVMachine.template_target_folder, server_location, '/fio_debian/', GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while downloading template: {0}".format(err))
    out, err, _ = General.execute_command('chown root {0}{1}'.format(GeneralVMachine.template_target_folder, GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while changing user owner to root for template: {0}".format(err))

    GeneralAlba.prepare_alba_backend()
    _, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'preset': GeneralAlba.ONE_DISK_PRESET})
    GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params)
Пример #6
0
    def validate_clone_disk_test():
        """
        Validate vdisk clone method
        """
        disk_name = 'clone-disk'
        clone_disk_name = 'new-cloned-disk'
        test_file_name = 'file-contents'
        test_file_size = 5000
        loop = 'loop0'
        clone_loop = 'loop1'

        vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name)
        vdisk = GeneralVDisk.create_volume(size=50, vpool=vpool, name=disk_name, loop_device=loop, wait=True)

        TestVDisk.logger.info('clone_disk_test - create initial snapshot')
        GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0')

        TestVDisk.logger.info('clone_disk_test - create 1st {0} GB test file'.format(test_file_size / 1000.0))
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '1'), size=test_file_size)

        TestVDisk.logger.info('clone_disk_test - create 2nd {0} GB test file'.format(test_file_size / 1000.0))
        GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '2'), size=test_file_size)

        GeneralVDisk.logger.info(General.execute_command('sync'))

        TestVDisk.logger.info('clone_disk_test - cloning disk')
        cloned_vdisk = GeneralVDisk.clone_volume(vdisk, clone_disk_name)
        TestVDisk.logger.info('clone_disk_test - cloned disk')

        GeneralVDisk.connect_volume(vpool, name=clone_disk_name, loop_device=clone_loop)

        md5_sum_1 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '1'))[0].split('  ')[0]
        md5_sum_2 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '2'))[0].split('  ')[0]
        md5_clone_1 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(clone_loop, test_file_name, '1'))[0].split('  ')[0]
        md5_clone_2 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(clone_loop, test_file_name, '2'))[0].split('  ')[0]

        GeneralVDisk.disconnect_volume(loop_device=clone_loop)
        GeneralVDisk.delete_volume(VDisk(cloned_vdisk['vdisk_guid']), vpool, wait=True)
        GeneralVDisk.delete_volume(vdisk, vpool, loop, wait=True)

        assert md5_sum_1 == md5_clone_1,\
            'file contents for /mnt/{0}/{1}_{2}.txt is not identical on source and clone!'.format(loop, vdisk.name, '1')
        assert md5_sum_2 == md5_clone_2,\
            'file contents for /mnt/{0}/{1}_{2}.txt is not identical on source and clone!'.format(loop, vdisk.name, '2')
    def create_vm(self, name, ram=1024, small=False):
        """
        Create a Virtual Machine
        :param name: Name of the Virtual Machine
        :param ram: Amount of RAM
        :param small: Small
        :return: None
        """
        os_name = General.get_os()
        bootdisk_path_remote = General.get_os_info(os_name + '_small' if small else os_name)['bootdisk_location']

        vm_path = '/'.join([self.mountpoint, name])
        if not os.path.exists(vm_path):
            os.mkdir(vm_path)

        if small:
            bootdisk_path = '/'.join([self.mountpoint, name, "bootdiskfast.raw"])
        else:
            bootdisk_path = '/'.join([self.mountpoint, name, "bootdisk.raw"])
        if not os.path.exists(bootdisk_path):
            template_server = General.get_template_server()
            bootdisk_url = urlparse.urljoin(template_server, bootdisk_path_remote)
            logging.log(1, 'Template url: {0}'.format(bootdisk_url))
            logging.log(1, 'Bootdisk path: {0}'.format(bootdisk_path))

            GeneralHypervisor.download_to_vpool(bootdisk_url, bootdisk_path)

            # When running with devstack need to set kvm group
            if GeneralOpenStack.is_openstack_present():
                General.execute_command("chgrp kvm {0}".format(bootdisk_path))

            cmd = "virt-install --connect qemu:///system -n {name} -r {ram} --disk {bootdisk_path},device=disk,format=raw,bus=virtio --import --graphics vnc,listen=0.0.0.0 --vcpus=1 --network network=default,mac=RANDOM,model=e1000 --boot hd"
            cmd = cmd.format(name=name,
                             bootdisk_path=bootdisk_path,
                             ram=ram)
            out, error, _ = General.execute_command(cmd)
            logging.log(1, 'cmd: ---')
            logging.log(1, cmd)
            logging.log(1, 'stdout: ---')
            logging.log(1, 'stdout: ---')
            logging.log(1, out)
            logging.log(1, 'stderr: ---')
            logging.log(1, error)
    def ovs_2468_verify_no_mds_files_left_after_remove_vpool_test():
        """
        Verify MDS presence after vpool removal
        """
        vpools = GeneralVPool.get_vpools()
        vpool_names = [vpool.name for vpool in vpools]
        command = "find /mnt -name '*mds*'"
        mdsvpoolnames = []

        out = General.execute_command(command + " | wc -l")
        if not out == '0':
            mdsvpoolnames = [line.split('/')[-1] for line in General.execute_command(command)[0].splitlines()]

        mds_files_still_in_filesystem = ""

        for mdsvpoolname in mdsvpoolnames:
            if mdsvpoolname.split('_')[1] not in vpool_names:
                mds_files_still_in_filesystem += mdsvpoolname + "\n"

        assert len(mds_files_still_in_filesystem) == 0,\
            "MDS files still present in filesystem after remove vpool test:\n %s" % mds_files_still_in_filesystem
    def check_reachability_test():
        """
        Verify the management center is reachable
        """
        if GeneralManagementCenter.is_devstack_installed() is False:
            logger.info('No devstack/openstack present')
            return

        management_centers = GeneralManagementCenter.get_mgmt_centers()
        issues_found = ""

        for mgmtcenter in management_centers:
            out, err, _ = General.execute_command("ping {0} -c 1".format(mgmtcenter.ip))
            if "Destination Host Unreachable" in out:
                issues_found += "Management center {0} with ip {1}\n".format(mgmtcenter.name, mgmtcenter.ip)

        assert issues_found == "", "Following management centers could not be reached:\n{0}".format(issues_found)
Пример #10
0
    def add_alba_backend(name, wait=True):
        """
        Put an ALBA backend in the model
        :param name: Name of the backend
        :param wait: Wait for backend to enter RUNNING state
        :return: Newly created ALBA backend
        """
        backend = GeneralBackend.get_by_name(name)
        if backend is None:
            backend = GeneralBackend.add_backend(name, 'alba')
            alba_backend = AlbaBackend(GeneralAlba.api.add('alba/backends', {'backend_guid': backend.guid})['guid'])
            if wait is True:
                GeneralAlba.wait_for_alba_backend_status(alba_backend)

        out, err, _ = General.execute_command('etcdctl ls /ovs/alba/asdnodes')
        if err == '' and len(out):
            AlbaNodeController.model_local_albanode()

        return GeneralBackend.get_by_name(name).alba_backend
Пример #11
0
def setup():
    """
    Setup for Virtual Machine package, will be executed when any test in this package is being executed
    Make necessary changes before being able to run the tests
    :return: None
    """
    autotest_config = General.get_config()
    backend_name = autotest_config.get('backend', 'name')
    assert backend_name, "Please fill out a valid backend name in autotest.cfg file"

    # Download the template
    cmd = '[ -d {0} ] && echo "Dir exists" || echo "Dir does not exists"'.format(GeneralVMachine.template_target_folder)
    out, err, _ = General.execute_command(cmd)
    if err:
        GeneralVMachine.logger.error("Error while executing command {1}: {0}".format(err, cmd))
    if 'not' not in out:
        General.execute_command('rm -rf {0}'.format(GeneralVMachine.template_target_folder))
        General.execute_command('mkdir {0}'.format(GeneralVMachine.template_target_folder))
    grid_ip = General.get_config().get('main', 'grid_ip')

    if grid_ip.split('.')[0] == '172' and grid_ip.split('.')[1] == '20':
        server_location = 'http://172.20.3.8/templates/openvstorage'
    else:
        server_location = 'http://sso-qpackages-loch.cloudfounders.com/templates/openvstorage'

    GeneralVMachine.logger.info("Getting template from {0}".format(server_location))
    out, err, _ = General.execute_command('wget -P {0} {1}{2}{3}'.format(GeneralVMachine.template_target_folder, server_location, '/fio_debian/', GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while downloading template: {0}".format(err))
    out, err, _ = General.execute_command('chown root {0}{1}'.format(GeneralVMachine.template_target_folder, GeneralVMachine.template_image))
    if err:
        GeneralVMachine.logger.error("Error while changing user owner to root for template: {0}".format(err))

    GeneralAlba.prepare_alba_backend()
    GeneralManagementCenter.create_generic_mgmt_center()
    GeneralVPool.add_vpool()
 def is_openstack_present():
     """
     Check if OpenStack is installed
     :return: Return True if OpenStack is installed
     """
     return bool(General.execute_command("ps aux | awk '/keystone/ && !/awk/'")[0])
Пример #13
0
    def vms_with_fio_test():
        """
        Test virtual machines with FIO
        """
        timeout = 30
        timer_step = 5
        nr_of_disks = 10
        vpool_name = General.get_config().get('vpool', 'name')
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool, "No vpool found where one was expected"
        for disk_number in range(nr_of_disks):
            disk_name = "disk-{0}".format(disk_number)
            GeneralVMachine.logger.info("Starting RAW disk creation")
            template_folder = GeneralVMachine.template_target_folder
            image_name = GeneralVMachine.template_image
            out, err, _ = General.execute_command('qemu-img convert -O raw {0}{1} /mnt/{2}/{3}.raw'.format(template_folder, image_name, vpool_name, disk_name))
            if err:
                GeneralVMachine.logger.error("Error while creating raw disk: {0}".format(err))

        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert len(vpool.vdisks) == nr_of_disks, "Only {0} out of {1} VDisks have been created".format(len(vpool.vdisks), nr_of_disks)

        for vm_number in range(nr_of_disks):
            machine_name = "machine-{0}".format(vm_number)
            disk_name = "disk-{0}".format(vm_number)
            GeneralVMachine.logger.info("Starting vmachine creation from RAW disk")
            out, err, _ = General.execute_command('virt-install --connect qemu:///system -n {0} -r 512 --disk /mnt/{1}/{2}.raw,'
                                                  'device=disk --noautoconsole --graphics vnc,listen=0.0.0.0 --vcpus=1 --network network=default,mac=RANDOM,'
                                                  'model=e1000 --import'.format(machine_name, vpool_name, disk_name))
            if err:
                GeneralVMachine.logger.error("Error while creating vmachine: {0}".format(err))

        counter = timeout / timer_step
        while counter > 0:
            vms = GeneralVMachine.get_vmachines()
            if len(vms) == nr_of_disks:
                counter = 0
            else:
                counter -= 1
                time.sleep(timer_step)
        vms = GeneralVMachine.get_vmachines()
        assert len(vms) == nr_of_disks, "Only {0} out of {1} VMachines have been created after {2} seconds".format(len(vms), nr_of_disks, timeout)

        # Waiting for 5 minutes of FIO activity on the vmachines
        time.sleep(300)
        vms = GeneralVMachine.get_vmachines()
        for vm in vms:
            assert vm.hypervisor_status == 'RUNNING', "Machine {0} has wrong status on the hypervisor: {1}".format(vm.name, vm.hypervisor_status)

        for vm_number in range(nr_of_disks):
            vmachine_name = "machine-{0}".format(vm_number)
            GeneralVMachine.logger.info("Removing {0} vmachine".format(vmachine_name))
            out, err, _ = General.execute_command('virsh destroy {0}'.format(vmachine_name))
            if err:
                GeneralVMachine.logger.error("Error while stopping vmachine: {0}".format(err))
            out, err, _ = General.execute_command('virsh undefine {0}'.format(vmachine_name))
            if err:
                GeneralVMachine.logger.error("Error while removing vmachine: {0}".format(err))

        counter = timeout / timer_step
        while counter > 0:
            vms = GeneralVMachine.get_vmachines()
            if len(vms):
                counter -= 1
                time.sleep(timer_step)
            else:
                counter = 0
        vms = GeneralVMachine.get_vmachines()
        assert len(vms) == 0, "Still some machines left on the vpool after waiting for {0} seconds: {1}".format(timeout, [vm.name for vm in vms])

        GeneralVMachine.logger.info("Removing vpool vdisks from {0} vpool".format(vpool_name))
        out, err, _ = General.execute_command("rm -rf /mnt/{0}/*.raw".format(vpool_name))
        if err:
            GeneralVMachine.logger.error("Error while removing vdisks: {0}".format(err))

        counter = timeout / timer_step
        while counter > 0:
            vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
            if len(vpool.vdisks):
                counter -= 1
                time.sleep(timer_step)
            else:
                counter = 0
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert len(vpool.vdisks) == 0, "Still some disks left on the vpool after waiting {0} seconds: {1}".format(timeout, vpool.vdisks_guids)
Пример #14
0
    def validate_alba_backend_sanity_without_claimed_disks(alba_backend):
        """
        Validate whether the ALBA backend is configured correctly
        :param alba_backend: ALBA backend
        :return: None
        """
        # Attribute validation
        assert alba_backend.available is True,\
            'ALBA backend {0} is not available'.format(alba_backend.backend.name)
        assert len(alba_backend.presets) >= 1,\
            'No preset found for ALBA backend {0}'.format(alba_backend.backend.name)
        assert len([default for default in alba_backend.presets if default['is_default'] is True]) == 1,\
            'Could not find default preset for backend {0}'.format(alba_backend.backend.name)
        assert alba_backend.backend.backend_type.code == 'alba',\
            'Backend type for ALBA backend is {0}'.format(alba_backend.backend.backend_type.code)
        assert alba_backend.backend.status == 'RUNNING',\
            'Status for ALBA backend is {0}'.format(alba_backend.backend.status)

        # Validate ABM and NSM services
        storagerouters = GeneralStorageRouter.get_storage_routers()
        storagerouters_with_db_role = [sr for sr in storagerouters if GeneralStorageRouter.has_roles(storagerouter=sr, roles='DB') is True and sr.node_type == 'MASTER']

        assert len(alba_backend.abm_services) == len(storagerouters_with_db_role),\
            'Not enough ABM services found'
        assert len(alba_backend.nsm_services) == len(storagerouters_with_db_role),\
            'Not enough NSM services found'

        # Validate ALBA backend configuration structure
        alba_backend_key = '/ovs/alba/backends'
        assert Configuration.dir_exists(key=alba_backend_key) is True,\
            'Configuration does not contain key {0}'.format(alba_backend_key)

        actual_config_keys = [key for key in Configuration.list(alba_backend_key)]
        expected_config_keys = ['global_gui_error_interval', alba_backend.guid, 'default_nsm_hosts']
        optional_config_keys = ['verification_factor']

        expected_keys_amount = 0
        for optional_key in optional_config_keys:
            if optional_key in actual_config_keys:
                expected_keys_amount += 1

        for expected_key in expected_config_keys:
            if not re.match(Toolbox.regex_guid, expected_key):
                expected_keys_amount += 1
            assert expected_key in actual_config_keys,\
                'Key {0} was not found in tree {1}'.format(expected_key, alba_backend_key)

        for actual_key in list(actual_config_keys):
            if re.match(Toolbox.regex_guid, actual_key):
                actual_config_keys.remove(actual_key)  # Remove all alba backend keys
        assert len(actual_config_keys) == expected_keys_amount,\
            'Another key was added to the {0} tree'.format(alba_backend_key)

        this_alba_backend_key = '{0}/{1}'.format(alba_backend_key, alba_backend.guid)
        actual_keys = [key for key in Configuration.list(this_alba_backend_key)]
        expected_keys = ['maintenance']
        assert actual_keys == expected_keys,\
            'Actual keys: {0} - Expected keys: {1}'.format(actual_keys, expected_keys)

        maintenance_key = '{0}/maintenance'.format(this_alba_backend_key)
        actual_keys = [key for key in Configuration.list(maintenance_key)]
        expected_keys = ['nr_of_agents', 'config']
        assert set(actual_keys) == set(expected_keys),\
            'Actual keys: {0} - Expected keys: {1}'.format(actual_keys, expected_keys)
        # @TODO: Add validation for config values

        # Validate ASD node configuration structure
        alba_nodes = GeneralAlba.get_alba_nodes()
        assert len(alba_nodes) > 0,\
            'Could not find any ALBA nodes in the model'
        alba_node_key = '/ovs/alba/asdnodes'
        actual_keys = [key for key in Configuration.list(alba_node_key)]
        assert len(alba_nodes) == len(actual_keys),\
            'Amount of ALBA nodes in model: {0} >< amount of ALBA nodes in configuration: {1}.'.format(len(alba_nodes),
                                                                                                       len(actual_keys))
        for alba_node in alba_nodes:
            assert alba_node.node_id in actual_keys,\
                'ALBA node with ID {0} not present in configuration'.format(alba_node.node_id)

            actual_asdnode_keys = [key for key in Configuration.list('{0}/{1}'.format(alba_node_key, alba_node.node_id))]
            expected_asdnode_keys = ['config', 'services']
            assert actual_asdnode_keys == expected_asdnode_keys,\
                'Actual keys: {0} - Expected keys: {1}'.format(actual_asdnode_keys, expected_asdnode_keys)

            actual_config_keys = [key for key in Configuration.list('{0}/{1}/config'.format(alba_node_key, alba_node.node_id))]
            expected_config_keys = ['main', 'network']
            assert set(actual_config_keys) == set(expected_config_keys),\
                'Actual keys: {0} - Expected keys: {1}'.format(actual_config_keys, expected_config_keys)
            # @TODO: Add validation for main and network values

        # Validate Arakoon configuration structure
        arakoon_abm_key = '/ovs/arakoon/{0}/config'.format(alba_backend.abm_services[0].service.name).replace('arakoon-', '')
        arakoon_nsm_key = '/ovs/arakoon/{0}/config'.format(alba_backend.nsm_services[0].service.name).replace('arakoon-', '')
        assert Configuration.exists(key=arakoon_abm_key, raw=True) is True,\
            'Configuration key {0} does not exist'.format(arakoon_abm_key)
        assert Configuration.exists(key=arakoon_nsm_key, raw=True) is True,\
            'Configuration key {0} does not exist'.format(arakoon_nsm_key)
        # @TODO: Add validation for config values

        # Validate maintenance agents
        actual_amount_agents = len([service for node_services in [alba_node.client.list_maintenance_services() for alba_node in alba_nodes] for service in node_services])
        expected_amount_agents = 1
        assert actual_amount_agents == expected_amount_agents,\
            'Amount of maintenance agents is incorrect. Found {0} - Expected {1}'.format(actual_amount_agents,
                                                                                         expected_amount_agents)

        # Validate arakoon services
        machine_ids = [sr.machine_id for sr in storagerouters_with_db_role]
        abm_service_name = alba_backend.abm_services[0].service.name
        nsm_service_name = alba_backend.nsm_services[0].service.name
        for storagerouter in storagerouters_with_db_role:
            root_client = SSHClient(endpoint=storagerouter, username='******')
            for service_name in [abm_service_name, nsm_service_name]:
                assert GeneralService.has_service(name=service_name, client=root_client) is True,\
                    'Service {0} not deployed on Storage Router {1}'.format(service_name, storagerouter.name)
                exitcode, output = GeneralService.get_service_status(name=service_name, client=root_client)
                assert exitcode is True,\
                    'Service {0} not running on Storage Router {1} - {2}'.format(service_name, storagerouter.name,
                                                                                 output)
                out, err, _ = General.execute_command('arakoon --who-master -config {0}'.format(Configuration.get_configuration_path('/ovs/arakoon/{0}/config'.format(abm_service_name.replace('arakoon-', '')))))
                assert out.strip() in machine_ids,\
                    'Arakoon master is {0}, but should be 1 of "{1}"'.format(out.strip(), ', '.join(machine_ids))
Пример #15
0
    def check_scrubbing_test():
        """
        Check scrubbing of vdisks test
        """
        initial_counter = 100
        step = 5
        vdisk = None
        vpool_name = General.get_config().get('vpool', 'name')
        vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name)
        assert vpool, "No vpool found where one was expected"

        template_folder = GeneralVMachine.template_target_folder
        image_name = GeneralVMachine.template_image

        disk_name = "scrubdisk"
        GeneralVMachine.logger.info("Starting RAW disk creation")
        out, err, _ = General.execute_command('qemu-img convert -O raw {0}{1} /mnt/{2}/{3}.raw'.format(template_folder, image_name, vpool_name, disk_name))
        if err:
            GeneralVMachine.logger.error("Error while creating raw disk: {0}".format(err))

        def snapshot_vdisk(vdisk):
            metadata = {'label': 'snap-' + vdisk.name,
                        'is_consistent': True,
                        'timestamp': time.time(),
                        'is_automatic': False,
                        'is_sticky': False}
            VDiskController.create_snapshot(vdisk.guid, metadata)

        counter = initial_counter
        while counter and vdisk is None:
            time.sleep(step)
            vdisk = VDiskList.get_by_devicename_and_vpool('/' + disk_name + '.raw', vpool)
            counter -= step
        assert counter > 0, "Vdisk with name {0} didn't appear in the model after 60 seconds".format(disk_name)
        # snapshot disks for the first time
        snapshot_vdisk(vdisk)
        counter = initial_counter
        while counter > 0:
            time.sleep(step)
            out, err, _ = General.execute_command('dd if=/dev/zero of=/mnt/{0}/{1}.raw bs=10K count=1000 conv=notrunc'.format(vpool_name, disk_name))
            counter -= step
            snapshot_vdisk(vdisk)

        # saving disk 'stored' info / the only attribute that is lowered after scrubbing
        vdisk.invalidate_dynamics(['statistics'])
        disk_backend_data = vdisk.statistics['stored']

        # deleting middle snapshots
        for snapshot in vdisk.snapshots[1:-1]:
            VDiskController.delete_snapshot(vdisk.guid, snapshot['guid'])

        # starting scrubber
        try:
            GenericController.execute_scrub()
            # waiting for model to catch up
            counter = initial_counter
            while counter > 0:
                time.sleep(step)
                vdisk.invalidate_dynamics(['statistics'])
                # checking result of scrub work
                if vdisk.statistics['stored'] < disk_backend_data:
                    GeneralVMachine.logger.info("It took {0} seconds for the value to change from {1} to {2}\n".format((initial_counter - counter) * step,
                                                                                                                       disk_backend_data,
                                                                                                                       vdisk.statistics['stored']))
                    break
                counter -= step
        finally:
            # removing vdisk
            GeneralVMachine.logger.info("Removing vpool vdisks from {0} vpool".format(vpool_name))
            out, err, _ = General.execute_command("rm -rf /mnt/{0}/*.raw".format(vpool_name))
            if err:
                GeneralVMachine.logger.error("Error while removing vdisk: {0}".format(err))

        assert counter > 0, "Scrubbing didn't run as expected, backend size of vdisk remained at {0}:\n".format(disk_backend_data)