Esempio n. 1
0
def modify_system_backing(request):
    """
    Issues in this fixture:
    - Hardcoded compute name
        - compute could be in bad state
        - Does not work for CPE lab
    - Lock unlock 6 times    (could be only 3 times)
    - Did not check original storage backing    (lab could be configured with local_lvm by default)

    """
    hostname, storage_backing = request.param

    LOG.fixture_step("Modify {} storage backing to {}".format(
        hostname, storage_backing))
    host_helper.lock_host(hostname)
    host_helper.set_host_storage_backing(hostname,
                                         inst_backing=storage_backing,
                                         lock=False)
    host_helper.unlock_hosts(hostname)

    def revert_host():
        LOG.fixture_step(
            "Revert {} storage backing to local_image".format(hostname))
        host_helper.lock_host(hostname)
        host_helper.set_host_storage_backing(hostname,
                                             inst_backing='local_image',
                                             lock=False)
        host_helper.unlock_hosts(hostname)

    request.addfinalizer(revert_host)

    return storage_backing
def test_set_hosts_storage_backing_min(instance_backing, number_of_hosts):
    """
    Modify hosts storage backing if needed so that system has minimal number of hosts in given instance backing

    Args:
        instance_backing:
        number_of_hosts:

    Test Steps:
        - Calculate the hosts to be configured based on test params
        - Configure hosts to meet given criteria
        - Check number of hosts in given instance backing is as specified

    """
    LOG.tc_step("Determine the hosts to configure")
    hosts = host_helper.get_up_hypervisors()
    hosts_len = len(hosts)
    host_num_mapping = {'all': hosts_len, 'two': 2, 'one': 1}
    number_of_hosts = host_num_mapping[number_of_hosts]

    hosts_with_backing = host_helper.get_hosts_in_storage_backing(
        instance_backing)
    if len(hosts_with_backing) >= number_of_hosts:
        LOG.info("Already have {} hosts in {} backing. Do nothing".format(
            len(hosts_with_backing), instance_backing))
        return

    candidate_hosts = get_candidate_hosts(number_of_hosts=number_of_hosts)

    number_to_config = number_of_hosts - len(hosts_with_backing)
    hosts_to_config = list(set(candidate_hosts) -
                           set(hosts_with_backing))[0:number_to_config]

    LOG.tc_step(
        "Delete vms if any to prepare for system configuration change with best effort"
    )
    vm_helper.delete_vms(fail_ok=True)

    LOG.tc_step("Configure following hosts to {} backing: {}".format(
        hosts_to_config, instance_backing))
    for host in hosts_to_config:
        HostsToRecover.add(host)
        host_helper.set_host_storage_backing(host=host,
                                             inst_backing=instance_backing,
                                             unlock=False,
                                             wait_for_configured=False)

    host_helper.unlock_hosts(hosts_to_config,
                             check_hypervisor_up=True,
                             fail_ok=False)

    LOG.tc_step("Waiting for hosts in {} aggregate".format(instance_backing))
    for host in hosts_to_config:
        host_helper.wait_for_host_in_instance_backing(
            host, storage_backing=instance_backing)

    LOG.tc_step("Check number of {} hosts is at least {}".format(
        instance_backing, number_of_hosts))
    assert number_of_hosts <= len(host_helper.get_hosts_in_storage_backing(instance_backing)), \
        "Number of {} hosts is less than {} after configuration".format(instance_backing, number_of_hosts)
Esempio n. 3
0
 def revert_host():
     LOG.fixture_step(
         "Revert {} storage backing to local_image".format(hostname))
     host_helper.lock_host(hostname)
     host_helper.set_host_storage_backing(hostname,
                                          inst_backing='local_image',
                                          lock=False)
     host_helper.unlock_hosts(hostname)
Esempio n. 4
0
 def revert_host():
     LOG.fixture_step("Revert {} storage backing to {} if needed".format(
         target_host, original_backing))
     host_helper.set_host_storage_backing(target_host,
                                          inst_backing=original_backing,
                                          check_first=True,
                                          lock=True,
                                          unlock=True)
Esempio n. 5
0
        def cleanup():

            if not system_helper.is_storage_system():
                skip("This test requires a storage system")

            profiles_created = self._pop_cleanup_list('profile')
            old_new_types = self._pop_cleanup_list('local_storage_type')

            # Add hosts to module level recovery fixture in case of modify or unlock fail in following class level
            # recovery attempt.
            for item in old_new_types:
                HostsToRecover.add(item[0], scope='module')

            exceptions = []
            try:
                LOG.fixture_step("(class) Delete created storage profiles")
                while profiles_created:
                    storage_helper.delete_storage_profile(
                        profile=profiles_created.pop())

            except Exception as e:
                LOG.exception(e)
                exceptions.append(e)

            try:
                LOG.fixture_step(
                    "(class) Revert local storage backing for {}".format(
                        old_new_types))
                while old_new_types:
                    host_to_revert, old_type, _ = old_new_types.pop()
                    LOG.info("Revert {} local storage to {}".format(
                        host_to_revert, old_type))
                    host_helper.set_host_storage_backing(host=host_to_revert,
                                                         inst_backing=old_type,
                                                         unlock=True)

            except Exception as e:
                LOG.exception(e)
                exceptions.append(e)

            assert not exceptions, "Failure occurred. Errors: {}".format(
                exceptions)
Esempio n. 6
0
def test_something(host_to_modify, storage_backing):
    """
    Test parametrize the test function instead of the test fixture.

    Args:
        host_to_modify (str): fixture that returns the hostname under test
        storage_backing (str): storage backing to configure

    Setups:
        - Select a host and record the storage backing before test starts    (module)

    Test Steps:
        - Modify host storage backing to given storage backing if not already on it
        - Create a flavor with specified storage backing
        - Boot vm from above flavor

    Teardown:
        - Delete created vm, volume, flavor
        - Modify host storage backing to its original config if not already on it     (module)

    """
    # Modify host storage backing withc check_first=True, so it will not modify if already in that backing
    # if lock_host() has to be done inside test case, set swact=True, so it will handle CPE case
    LOG.tc_step(
        "Modify {} storage backing to {} if not already has the matching storage backing"
        .format(host_to_modify, storage_backing))
    host_helper.set_host_storage_backing(host_to_modify,
                                         inst_backing=storage_backing,
                                         check_first=True,
                                         lock=True,
                                         unlock=True)

    LOG.tc_step("Create a flavor with specified storage backing")
    flv_id = nova_helper.create_flavor(name='test_avoid_flv',
                                       storage_backing=storage_backing)[1]
    ResourceCleanup.add(resource_type='flavor', resource_id=flv_id)

    LOG.tc_step("Boot vm from above flavor")
    vm_id = vm_helper.boot_vm(name='test_avoid_vm', flavor=flv_id)[1]
    ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
Esempio n. 7
0
def prepare_hosts(request):
    """
    Setup:
        Attempt to convert all computes to expected storage backing.
        Skip test if unsuccessful.

    Args:
        request: expected host storage backing to run the test

    Returns: hosts storage backing

    Teardown:
        Restore hosts to original state
    """
    expected_storage_backing = request.param
    avail_hosts = host_helper.get_hosts_in_storage_backing(
        storage_backing=expected_storage_backing)
    all_hosts = host_helper.get_hypervisors()
    modified_hosts = {}
    locked_hosts = []
    avail_num = len(avail_hosts)

    # Try to convert all available hypervisor hosts to the expected storage backing
    for host in all_hosts:
        if host not in avail_hosts:
            original_storage = host_helper.get_host_instance_backing(host)
            return_code, msg = host_helper.set_host_storage_backing(
                host=host, inst_backing=expected_storage_backing, fail_ok=True)
            if return_code == 0:
                avail_num += 1
                modified_hosts[host] = original_storage
            elif return_code == 1:  # Host locked, but cannot modify to the expected storage backing
                locked_hosts.append(host)
            else:
                skip("Host {} cannot be locked. Error: {}".format(host, msg))

    # Skip test if config failed
    if avail_num < 2:
        skip("Less than two hosts are successfully modified to {} backing".
             format(expected_storage_backing))

    # Teardown to restore hosts to original storage backing
    def restore_hosts():
        LOG.debug("Modifying hosts backing to original states..")
        host_helper.unlock_hosts(locked_hosts)
        for host in modified_hosts:
            host_helper.set_host_storage_backing(host, modified_hosts[host])

    request.addfinalizer(restore_hosts())

    return request.param
Esempio n. 8
0
 def restore_hosts():
     LOG.debug("Modifying hosts backing to original states..")
     host_helper.unlock_hosts(locked_hosts)
     for host in modified_hosts:
         host_helper.set_host_storage_backing(host, modified_hosts[host])
Esempio n. 9
0
    def setup_local_storage(self, request, get_target_host):
        local_storage = request.param
        host = get_target_host

        def cleanup():

            if not system_helper.is_storage_system():
                skip("This test requires a storage system")

            profiles_created = self._pop_cleanup_list('profile')
            old_new_types = self._pop_cleanup_list('local_storage_type')

            # Add hosts to module level recovery fixture in case of modify or unlock fail in following class level
            # recovery attempt.
            for item in old_new_types:
                HostsToRecover.add(item[0], scope='module')

            exceptions = []
            try:
                LOG.fixture_step("(class) Delete created storage profiles")
                while profiles_created:
                    storage_helper.delete_storage_profile(
                        profile=profiles_created.pop())

            except Exception as e:
                LOG.exception(e)
                exceptions.append(e)

            try:
                LOG.fixture_step(
                    "(class) Revert local storage backing for {}".format(
                        old_new_types))
                while old_new_types:
                    host_to_revert, old_type, _ = old_new_types.pop()
                    LOG.info("Revert {} local storage to {}".format(
                        host_to_revert, old_type))
                    host_helper.set_host_storage_backing(host=host_to_revert,
                                                         inst_backing=old_type,
                                                         unlock=True)

            except Exception as e:
                LOG.exception(e)
                exceptions.append(e)

            assert not exceptions, "Failure occurred. Errors: {}".format(
                exceptions)

        request.addfinalizer(cleanup)

        origin_lvg = host_helper.get_host_instance_backing(host)
        if origin_lvg != local_storage:
            self._add_to_cleanup_list(to_cleanup=(host, origin_lvg,
                                                  local_storage),
                                      cleanup_type='local_storage_type')
            LOG.fixture_step(
                "(class) Set {} local storage backing to {}".format(
                    host, local_storage))
            host_helper.set_host_storage_backing(host,
                                                 inst_backing=local_storage,
                                                 check_first=False)

        return local_storage, host
Esempio n. 10
0
def _test_storage_profile(personality, from_backing, to_backing):
    """
    This test creates a storage profile and then applies it to a node with
    identical hardware, assuming one exists.

    Storage profiles do not apply on controller nodes.  Storage profiles can be
    applied on controller+compute nodes, compute nodes and storage nodes.

    Arguments:
    - personality (string) - controller, compute or storage
    - from_backing (string) - image, remote or None
    - to_backing (string) - image, remote or None

    Test Steps:
    1.  Query system and determine which nodes have compatible hardware.
    2.  Create a storage profile on one of those nodes
    3.  Apply the created storage profile on a compatible node*
    4.  Ensure the storage profiles have been successfully applied.

    * If the node is a compute node or a controller+compute, we will also change
      the backend if required for additional coverage.

    Returns:
    - Nothing
    """

    global PROFILES_TO_DELETE
    PROFILES_TO_DELETE = []

    # Skip if test is not applicable to hardware under test
    if personality == 'controller' and not system_helper.is_aio_system():
        skip("Test does not apply to controller hosts without subtype compute")

    hosts = system_helper.get_hosts(personality=personality)
    if not hosts:
        skip("No hosts of type {} available".format(personality))

    if (from_backing == "remote" or to_backing
            == "remote") and not system_helper.is_storage_system():
        skip("This test doesn't apply to systems without storage hosts")

    LOG.tc_step("Identify hardware compatible hosts")
    hash_to_hosts = get_hw_compatible_hosts(hosts)

    # Pick the hardware group that has the most compatible hosts
    current_size = 0
    candidate_hosts = []
    for value in hash_to_hosts:
        candidate_size = len(hash_to_hosts[value])
        if candidate_size > current_size:
            current_size = candidate_size
            candidate_hosts = hash_to_hosts[value]
    LOG.info(
        "This is the total set of candidate hosts: {}".format(candidate_hosts))

    if len(candidate_hosts) < 2:
        skip("Insufficient hardware compatible hosts to run test")

    # Rsync lab setup dot files between controllers
    con_ssh = ControllerClient.get_active_controller()
    _rsync_files_to_con1(con_ssh=con_ssh, file_to_check="force.txt")

    # Take the hardware compatible hosts and check if any of them already have
    # the backend that we want.  This will save us test time.
    new_to_backing = None
    if personality == "compute":
        from_hosts = []
        to_hosts = []
        for host in candidate_hosts:
            host_backing = host_helper.get_host_instance_backing(host)
            if host_backing == from_backing:
                from_hosts.append(host)
            elif host_backing == to_backing:
                to_hosts.append(host)
            else:
                pass
        LOG.info(
            "Candidate hosts that already have the right from backing {}: {}".
            format(from_backing, from_hosts))
        LOG.info(
            "Candidate hosts that already have the right to backing {}: {}".
            format(to_backing, to_hosts))

        # Determine what hosts to use
        if not from_hosts and to_hosts:
            to_host = random.choice(to_hosts)
            candidate_hosts.remove(to_host)
            from_host = random.choice(candidate_hosts)
        elif not to_hosts and from_hosts:
            from_host = random.choice(from_hosts)
            candidate_hosts.remove(from_host)
            to_host = random.choice(candidate_hosts)
        elif not to_hosts and not from_hosts:
            to_host = random.choice(candidate_hosts)
            candidate_hosts.remove(to_host)
            from_host = random.choice(candidate_hosts)
        else:
            to_host = random.choice(to_hosts)
            from_host = random.choice(from_hosts)

        LOG.info("From host is: {}".format(from_host))
        LOG.info("To host is: {}".format(to_host))

        LOG.tc_step(
            "Check from host backing and convert to {} if necessary".format(
                from_backing))
        host_helper.set_host_storage_backing(from_host, from_backing)
        system_helper.wait_for_host_values(
            from_host,
            availability=HostAvailState.AVAILABLE,
            timeout=120,
            fail_ok=False)

        LOG.tc_step(
            "Check to host backing and convert to {} if necessary".format(
                to_backing))
        new_to_backing = host_helper.set_host_storage_backing(
            to_host, to_backing)
    elif personality == "controller":
        # For now, we don't want to host reinstall controller-0 since it will default to
        # pxeboot, but this could be examined as a possible enhancement.
        from_host = "controller-0"
        to_host = "controller-1"

        LOG.info("From host is: {}".format(from_host))
        LOG.info("To host is: {}".format(to_host))

        LOG.tc_step(
            "Check from host backing and convert to {} if necessary".format(
                from_backing))
        host_helper.set_host_storage_backing(from_host, from_backing)

        LOG.tc_step(
            "Check to host backing and convert to {} if necessary".format(
                to_backing))
        new_to_backing = host_helper.set_host_storage_backing(
            to_host, to_backing)
    else:
        # Backing doesn't apply to storage nodes so just pick from compatible hardware
        from_host = random.choice(candidate_hosts)
        candidate_hosts.remove(from_host)
        to_host = random.choice(candidate_hosts)

    LOG.tc_step(
        "Create storage and interface profiles on the from host {}".format(
            from_host))
    prof_name = 'storprof_{}_{}'.format(
        from_host, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
    storage_helper.create_storage_profile(from_host, profile_name=prof_name)
    PROFILES_TO_DELETE.append(prof_name)

    # Deleting VMs in case the remaining host(s) cannot handle all VMs
    # migrating on lock, particularly important in the case of AIO-DX systems.
    LOG.tc_step(
        "Delete all VMs and lock the host before applying the storage profile")
    vm_helper.delete_vms()
    HostsToRecover.add(to_host, scope='function')
    system_helper.wait_for_host_values(from_host,
                                       availability=HostAvailState.AVAILABLE,
                                       timeout=120,
                                       fail_ok=False)
    system_helper.wait_for_host_values(to_host,
                                       availability=HostAvailState.AVAILABLE,
                                       timeout=120,
                                       fail_ok=False)

    # Negative test #1 - attempt to apply profile on unlocked host (should be rejected)
    LOG.tc_step('Apply the storage-profile {} onto unlocked host:{}'.format(
        prof_name, to_host))
    cmd = 'host-apply-storprofile {} {}'.format(to_host, prof_name)
    rc, msg = cli.system(cmd, fail_ok=True)
    assert rc != 0, msg
    host_helper.lock_host(to_host, swact=True)

    # 3 conditions to watch for: no partitions, ready partitions and in-use
    # partitions on the compute.  If in-use, delete and freshly install host.
    # If ready, delete all ready partitions to make room for potentially new
    # partitions.  If no partitions, just delete nova-local lvg.
    if personality == "compute":

        # Negative test #2 - attempt to apply profile onto host with existing
        # nova-local (should be rejected)
        LOG.tc_step(
            'Apply the storage-profile {} onto host with existing nova-local:{}'
            .format(prof_name, to_host))
        cmd = 'host-apply-storprofile {} {}'.format(to_host, prof_name)
        rc, msg = cli.system(cmd, fail_ok=True)
        assert rc != 0, msg

        # If we were simply switching backing (without applying a storage
        # profile), the nova-local lvg deletion can be omitted according to design
        LOG.tc_step("Delete nova-local lvg on to host {}".format(to_host))
        cli.system("host-lvg-delete {} nova-local".format(to_host))

        in_use = storage_helper.get_host_partitions(to_host, "In-Use")

        if in_use:

            # Negative test #3 - attempt to apply profile onto host with existing
            # in-use partitions (should be rejected)
            LOG.tc_step('Apply the storage-profile {} onto host with existing \
                         in-use partitions:{}'.format(prof_name, to_host))
            cmd = 'host-apply-storprofile {} {}'.format(to_host, prof_name)
            rc, msg = cli.system(cmd, fail_ok=True)
            assert rc != 0, msg

            LOG.tc_step(
                "In-use partitions found.  Must delete the host and freshly install before proceeding."
            )
            LOG.info("Host {} has in-use partitions {}".format(
                to_host, in_use))
            lab = InstallVars.get_install_var("LAB")
            lab.update(create_node_dict(lab['compute_nodes'], 'compute'))
            lab['boot_device_dict'] = create_node_boot_dict(lab['name'])
            install_helper.open_vlm_console_thread(to_host)

            LOG.tc_step("Delete the host {}".format(to_host))
            cli.system("host-bulk-export")
            cli.system("host-delete {}".format(to_host))
            assert len(
                system_helper.get_controllers()) > 1, "Host deletion failed"

            cli.system("host-bulk-add hosts.xml")
            system_helper.wait_for_host_values(
                to_host, timeout=6000, availability=HostAvailState.ONLINE)

            wait_for_disks(to_host)

        ready = storage_helper.get_host_partitions(to_host, "Ready")
        if ready:
            LOG.tc_step(
                "Ready partitions have been found.  Must delete them before profile application"
            )
            LOG.info("Host {} has Ready partitions {}".format(to_host, ready))
            for uuid in reversed(ready):
                storage_helper.delete_host_partition(to_host, uuid)
            # Don't bother restoring in this case since the system should be
            # functional after profile is applied.

        LOG.tc_step('Apply the storage-profile {} onto host:{}'.format(
            prof_name, to_host))
        cli.system('host-apply-storprofile {} {}'.format(to_host, prof_name))

        LOG.tc_step("Unlock to host")
        host_helper.unlock_host(to_host)

        to_host_backing = host_helper.get_host_instance_backing(to_host)
        LOG.info("To host backing was {} and is now {}".format(
            new_to_backing, to_host_backing))
        assert to_host_backing == from_backing, "Host backing was not changed on storage profile application"

    if personality == "storage":
        if not storage_helper.is_ceph_healthy():
            skip("Cannot run test when ceph is not healthy")

        LOG.tc_step("Delete the host {}".format(to_host))
        cli.system("host-bulk-export")
        cli.system("host-delete {}".format(to_host))
        cli.system("host-bulk-add hosts.xml")
        system_helper.wait_for_host_values(to_host,
                                           timeout=6000,
                                           availability=HostAvailState.ONLINE)

        wait_for_disks(to_host)

        LOG.tc_step('Apply the storage-profile {} onto host:{}'.format(
            prof_name, to_host))
        cli.system('host-apply-storprofile {} {}'.format(to_host, prof_name))

        # Re-provision interfaces through lab_setup.sh
        LOG.tc_step("Reprovision the host as necessary")
        files = ['interfaces']
        con_ssh = ControllerClient.get_active_controller()
        delete_lab_setup_files(con_ssh, to_host, files)

        rc, msg = install_helper.run_lab_setup()
        assert rc == 0, msg

        LOG.tc_step("Unlock to host")
        host_helper.unlock_host(to_host)

    if personality == "controller":

        # Note, install helper doesn't work on all labs.  Some labs don't
        # display BIOS type which causes install helper to fail
        lab = InstallVars.get_install_var("LAB")
        lab.update(create_node_dict(lab['controller_nodes'], 'controller'))
        lab['boot_device_dict'] = create_node_boot_dict(lab['name'])
        install_helper.open_vlm_console_thread(to_host)

        LOG.tc_step("Delete the host {}".format(to_host))
        cli.system("host-bulk-export")
        cli.system("host-delete {}".format(to_host))
        assert len(system_helper.get_controllers()) > 1, "Host deletion failed"

        cli.system("host-bulk-add hosts.xml")
        system_helper.wait_for_host_values(to_host,
                                           timeout=6000,
                                           availability=HostAvailState.ONLINE)

        wait_for_disks(to_host)

        LOG.tc_step("Apply the storage-profile {} onto host:{}".format(
            prof_name, to_host))
        cli.system("host-apply-storprofile {} {}".format(to_host, prof_name))

        # Need to re-provision everything on node through lab_setup (except storage)
        LOG.tc_step("Reprovision the host as necessary")
        files = [
            'interfaces', 'cinder_device', 'vswitch_cpus', 'shared_cpus',
            'extend_cgts_vg', 'addresses'
        ]
        con_ssh = ControllerClient.get_active_controller()
        delete_lab_setup_files(con_ssh, to_host, files)

        rc, msg = install_helper.run_lab_setup()
        assert rc == 0, msg

        LOG.tc_step("Unlock to host")
        host_helper.unlock_host(to_host)

        to_host_backing = host_helper.get_host_instance_backing(to_host)
        LOG.info("To host backing was {} and is now {}".format(
            new_to_backing, to_host_backing))
        assert to_host_backing == from_backing, "Host backing was not changed on storage profile application"
Esempio n. 11
0
def test_set_hosts_storage_backing_equal(instance_backing, number_of_hosts):
    """
    Modify hosts storage backing if needed so that system has exact number
    of hosts in given instance backing

    Args:
        instance_backing:
        number_of_hosts:

    Test Steps:
        - Calculate the hosts to be configured based on test params
        - Configure hosts to meet given criteria
        - Check number of hosts in given instance backing is as specified

    """
    host_num_mapping = {'zero': 0, 'one': 1, 'two': 2}
    number_of_hosts = host_num_mapping[number_of_hosts]
    LOG.tc_step("Calculate the hosts to be configured based on test params")
    candidate_hosts = get_candidate_hosts(number_of_hosts=number_of_hosts)

    hosts_with_backing = \
        host_helper.get_hosts_in_storage_backing(instance_backing)
    if len(hosts_with_backing) == number_of_hosts:
        LOG.info("Already have {} hosts in {} backing. Do "
                 "nothing".format(number_of_hosts, instance_backing))
        return

    elif len(hosts_with_backing) < number_of_hosts:
        backing_to_config = instance_backing
        number_to_config = number_of_hosts - len(hosts_with_backing)
        hosts_pool = list(set(candidate_hosts) - set(hosts_with_backing))
    else:
        backing_to_config = 'remote' if 'image' in instance_backing else \
            'local_image'
        number_to_config = len(hosts_with_backing) - number_of_hosts
        hosts_pool = hosts_with_backing

    LOG.tc_step("Delete vms if any to prepare for system configuration "
                "change with best effort")
    vm_helper.delete_vms(fail_ok=True)

    hosts_to_config = hosts_pool[0:number_to_config]
    LOG.tc_step("Configure following hosts to {} backing: "
                "{}".format(hosts_to_config, backing_to_config))

    for host in hosts_to_config:
        host_helper.set_host_storage_backing(host=host,
                                             inst_backing=backing_to_config,
                                             unlock=False,
                                             wait_for_configured=False)
        HostsToRecover.add(host)

    host_helper.unlock_hosts(hosts_to_config,
                             check_hypervisor_up=True,
                             fail_ok=False)

    LOG.tc_step("Waiting for hosts in {} aggregate".format(backing_to_config))
    for host in hosts_to_config:
        host_helper.wait_for_host_in_instance_backing(
            host, storage_backing=backing_to_config)

    LOG.tc_step("Check number of {} hosts is {}".format(
        instance_backing, number_of_hosts))
    assert number_of_hosts == \
        len(host_helper.get_hosts_in_storage_backing(instance_backing)), \
        "Number of {} hosts is not {} after " \
        "configuration".format(instance_backing, number_of_hosts)