Пример #1
0
def test_heat_vm_scale_after_actions(vm_scaling_stack, actions):
    """
    Test VM auto scaling with swact:
        Create heat stack for auto scaling using NestedAutoScale.yaml,  swact and perform vm scale up and down.

    Test Steps:
        - Create a heat stack for auto scaling vm ()
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - live migrate the vm if not sx
        - cold migrate the vm if not sx
        - swact if not sx
        - reboot -f vm host
        - trigger auto scale by boosting cpu usage in the vm (using dd)
        - verify it scale up to the max number of vms (3)
        - trigger scale down by killing dd in the vm
        - verify the vm scale down to min number (1)
        - Delete Heat stack and verify resource deletion
    """
    stack_name, vm_id = vm_scaling_stack
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if not system_helper.is_aio_simplex():
        actions = actions.split('-')
        if "swact" in actions:
            LOG.tc_step("Swact before scale in/out")
            host_helper.swact_host()

        if "live_migrate" in actions:
            LOG.tc_step("live migrate vm before scale in/out")
            vm_helper.live_migrate_vm(vm_id)

        if "cold_migrate" in actions:
            LOG.tc_step("cold migrate vm before scale in/out")
            vm_helper.cold_migrate_vm(vm_id)

    if "host_reboot" in actions:
        if system_helper.is_aio_simplex():
            host_helper.reboot_hosts('controller-0')
            vm_helper.wait_for_vm_status(vm_id,
                                         status=VMStatus.ACTIVE,
                                         timeout=600,
                                         check_interval=10,
                                         fail_ok=False)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id, timeout=VMTimeout.DHCP_RETRY)
        else:
            LOG.tc_step("evacuate vm before scale in/out")
            vm_host = vm_helper.get_vm_host(vm_id=vm_id)
            vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id)

    LOG.tc_step(
        "Wait for {} vms to auto scale out to {} after running dd in vm(s)".
        format(stack_name, 3))
    vm_helper.wait_for_auto_vm_scale_out(stack_name, expt_max=3)

    LOG.tc_step(
        "Wait for {} vms to auto scale in to {} after killing dd processes in vms"
        .format(stack_name, 1))
    vm_helper.wait_for_auto_vm_scale_in(stack_name, expt_min=1)
Пример #2
0
def test_system_persist_over_host_reboot(host_type, stx_openstack_required):
    """
    Validate Inventory summary over reboot of one of the controller see if data persists over reboot

    Test Steps:
        - capture Inventory summary for list of hosts on system service-list and neutron agent-list
        - reboot the current Controller-Active
        - Wait for reboot to complete
        - Validate key items from inventory persist over reboot

    """
    if host_type == 'controller':
        host = system_helper.get_active_controller_name()
    elif host_type == 'compute':
        if system_helper.is_aio_system():
            skip("No compute host for AIO system")

        host = None
    else:
        hosts = system_helper.get_hosts(personality='storage')
        if not hosts:
            skip(msg="Lab has no storage nodes. Skip rebooting storage node.")

        host = hosts[0]

    LOG.tc_step("Pre-check for system status")
    system_helper.wait_for_services_enable()
    up_hypervisors = host_helper.get_up_hypervisors()
    network_helper.wait_for_agents_healthy(hosts=up_hypervisors)

    LOG.tc_step("Launch a vm")
    vm_id = vm_helper.boot_vm(cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Reboot a {} node and wait for reboot completes: {}".format(host_type, host))
    HostsToRecover.add(host)
    host_helper.reboot_hosts(host)
    host_helper.wait_for_hosts_ready(host)

    LOG.tc_step("Check vm is still active and pingable after {} reboot".format(host))
    vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, timeout=VMTimeout.DHCP_RETRY)

    LOG.tc_step("Check neutron agents and system services are in good state after {} reboot".format(host))
    network_helper.wait_for_agents_healthy(up_hypervisors)
    system_helper.wait_for_services_enable()

    if host in up_hypervisors:
        LOG.tc_step("Check {} can still host vm after reboot".format(host))
        if not vm_helper.get_vm_host(vm_id) == host:
            time.sleep(30)
            vm_helper.live_migrate_vm(vm_id, destination_host=host)
Пример #3
0
def test_reboot_standby_controller(no_simplex):
    active, standby = system_helper.get_active_standby_controllers()
    LOG.tc_step("'sudo reboot -f' from {}".format(standby))
    host_helper.reboot_hosts(standby,
                             wait_for_offline=True,
                             wait_for_reboot_finish=True,
                             force_reboot=True)
    system_helper.wait_for_hosts_states(standby,
                                        timeout=360,
                                        check_interval=30,
                                        availability=['available'])
    kube_helper.wait_for_pods_healthy(check_interval=30, all_namespaces=True)
Пример #4
0
def sys_uncontrolled_swact(number_of_times=1):
    """
    This is to identify the storage nodes and turn them off and on via vlm
    :return:
    """
    for i in range(0, number_of_times):
        active, standby = system_helper.get_active_standby_controllers()
        LOG.tc_step("Doing iteration of {} of total iteration {}".format(
            i, number_of_times))
        LOG.tc_step("'sudo reboot -f' from {}".format(standby))
        host_helper.reboot_hosts(hostnames=active)

        LOG.tc_step("Check vms status after controller swact")
        vms = get_all_vms()
        vm_helper.wait_for_vms_values(vms, fail_ok=False, timeout=600)

        for vm in vms:
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
Пример #5
0
def sys_reboot_standby(number_of_times=1):
    """
    This is to identify the storage nodes and turn them off and on via vlm
    :return:
    """
    timeout = VMTimeout.DHCP_RETRY if system_helper.is_aio_system(
    ) else VMTimeout.PING_VM
    for i in range(0, number_of_times):
        active, standby = system_helper.get_active_standby_controllers()
        LOG.tc_step("Doing iteration of {} of total iteration {}".format(
            i, number_of_times))
        LOG.tc_step("'sudo reboot -f' from {}".format(standby))
        host_helper.reboot_hosts(hostnames=standby)

        LOG.tc_step("Check vms status after stanby controller reboot")
        vms = get_all_vms()
        vm_helper.wait_for_vms_values(vms, fail_ok=False, timeout=600)

        for vm in vms:
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm,
                                                       timeout=timeout)
Пример #6
0
def _test_status_firewall_reboot():
    """
    Test iptables status after reboot of controller

    Test Steps:
        - Stop iptables service
        - Confirm iptables service has stopped
        - Reboot the controller being tested
        - Confirm iptables service is online
        - Repeat for second controller
    """
    LOG.tc_step("Getting the controller(s)")
    controllers = system_helper.get_controllers()
    for controller in controllers:
        with host_helper.ssh_to_host(controller) as con_ssh:
            LOG.tc_step("Stopping iptables service")
            cmd = 'service iptables stop'
            con_ssh.exec_sudo_cmd(cmd)
            LOG.tc_step("checking iptables status")
            cmd = 'service iptables status'
            code, output = con_ssh.exec_sudo_cmd(cmd)
            assert 'Active: inactive' or 'Active: failed' in output, "iptables service did not stop running on host {}"\
                .format(controller)

        LOG.tc_step("Rebooting {}".format(controller))
        HostsToRecover.add(controller)
        host_helper.reboot_hosts(controller)

        with host_helper.ssh_to_host(controller) as con_ssh:
            LOG.tc_step(
                "Checking iptables status on host {} after reboot".format(
                    controller))
            cmd = 'service iptables status | grep --color=never Active'
            code, output = con_ssh.exec_sudo_cmd(cmd)
            assert 'active' in output, "iptables service did not start after reboot on host {}".format(
                controller)
Пример #7
0
def reboot_hosting_node(vm_type, vm_id, force_reboot=False):
    host = vm_helper.get_vm_host(vm_id)

    host_helper.reboot_hosts(host, force_reboot=force_reboot)
    rescue_vm(vm_type, vm_id)