Example #1
0
def test_swact_failed_controller_negative(fail_controller):
    """
    TC610_3
    Verify that swacting to a failed controller is rejected

    Test Setup:
        - Reset the standby controller

    Test Steps:
        - Attempt to swact from the active controller
        - Verify that the swact was rejected and the active controller is the same

    Teardown:
        - Wait until the controller is online again

    Returns:

    """
    if not fail_controller:
        skip("Couldn't put controller into failed state.")

    active = system_helper.get_active_controller_name()
    LOG.tc_step("Attempting to swact to failed controller.")
    code, out = host_helper.swact_host(fail_ok=True)
    LOG.tc_step("Verifying that the swact didn't occur.")
    assert 1 == code, "FAIL: The swact wasn't rejected"
    curr_active = system_helper.get_active_controller_name()
    assert curr_active == active, "FAIL: The active controller was changed. " \
                                  "Previous: {} Current: {}".format(active, curr_active)
Example #2
0
def test_swact_controller_host():
    """
    SWACT Controller host - it must fail on simplex
    """
    active_controller_host = system_helper.get_active_controller_name()
    LOG.info(
        "Active Controller Before SWACT: {}".format(active_controller_host))
    standby_controller_host = system_helper.get_standby_controller_name()
    LOG.info(
        "Standby Controller Before SWACT: {}".format(standby_controller_host))

    # On simplex swact must fail
    host_helper.swact_host(fail_ok=system_helper.is_aio_simplex())
    # host_helper.wait_for_swact_complete(before_host=active_controller_host)

    active_controller_host = system_helper.get_active_controller_name()
    LOG.info(
        "Active Controller After SWACT: {}".format(active_controller_host))
    standby_controller_host = system_helper.get_standby_controller_name()
    LOG.info(
        "Standby Controller After SWACT: {}".format(standby_controller_host))

    # Re-SWACT only if duplex
    if not system_helper.is_aio_simplex():
        host_helper.swact_host()
Example #3
0
def upgrade_host_lock_unlock(host, con_ssh=None):
    """
     swact, if required, lock and unlock before upgrade.

    Args:
        host (str): hostname or id in string format
        con_ssh (SSHClient):

    Returns: (return_code(int), msg(str))
        (0, "Host is host is locked/unlocked)
    """
    LOG.info("Checking if host {} is active ....".format(host))

    active_controller = system_helper.get_active_controller_name()
    swact_back = False
    if active_controller == host:
        LOG.tc_step("Swact active controller and ensure active controller is changed")
        exit_code, output = host_helper.swact_host(hostname=active_controller)
        assert 0 == exit_code, "{} is not recognized as active controller".format(active_controller)
        active_controller = system_helper.get_active_controller_name()
        swact_back = True

    LOG.info("Host {}; doing lock/unlock to the host ....".format(host))
    rc, output = host_helper.lock_host(host, con_ssh=con_ssh)
    if rc != 0 and rc != -1:
        err_msg = "Lock host {} rejected".format(host)
        LOG.warn(err_msg)
        return 1, err_msg

    rc, output = host_helper.unlock_host(host, available_only=True, con_ssh=con_ssh)
    if rc != 0:
        err_msg = "Unlock host {} failed: {}".format(host, output)
        return 1, err_msg

    if swact_back:
        time.sleep(60)

        if not system_helper.wait_for_host_values(host, timeout=360, fail_ok=True,
                                                           operational=HostOperState.ENABLED,
                                                           availability=HostAvailState.AVAILABLE):
            err_msg = " Swacting to standby is not possible because {} is not in available state " \
                  "within  the specified timeout".format(host)
            assert False, err_msg
        LOG.tc_step("Swact active controller back and ensure active controller is changed")
        rc, output = host_helper.swact_host(hostname=active_controller)
        if rc != 0:
            err_msg = "Failed to swact back to host {}: {}".format(host, output)
            return 1, err_msg

        LOG.info("Swacted and  {}  has become active......".format(host))

    return 0, "Host {} is  locked and unlocked successfully".format(host)
Example #4
0
def test_reboot_hosts(hostnames):
    LOG.tc_step("Processing hostnames provided...")
    system_hosts = system_helper.get_hosts()

    is_str = False
    if isinstance(hostnames, str):
        is_str = True
        hostnames = [hostnames]

    tmp_hosts = hostnames
    for host in tmp_hosts:
        if host == 'active_controller':
            hostnames.remove(host)
            host = system_helper.get_active_controller_name()
            hostnames.append(host)
        elif host == 'standby_controller':
            hostnames.remove(host)
            host = system_helper.get_standby_controller_name()
            hostnames.append(host)
        if host not in system_hosts:
            skip("Host(s) not found in system. Host(s) requested: {}."
                 "Hosts in system: {}".format(hostnames, system_hosts))

    if is_str:
        hostnames = hostnames[0]

    LOG.tc_step("Rebooting following host(s): {}".format(hostnames))
    results = host_helper.reboot_hosts(hostnames)
    LOG.tc_step("Results: {}".format(results))
    assert results[0] == 0
Example #5
0
def setup_host_install(request, get_patch_name):
    con_ssh = ControllerClient.get_active_controller()
    hosts = host_helper.get_up_hypervisors()
    host = hosts[len(hosts) - 1]
    if host == system_helper.get_active_controller_name():
        host = hosts[len(hosts) - 2]
    host_helper.lock_host(host)

    patch_name = get_patch_name
    LOG.fixture_step("Applying {} to patching controller".format(patch_name))
    con_ssh.exec_sudo_cmd('sw-patch upload test_patches/{}.patch'.format(
        patch_name))
    con_ssh.exec_sudo_cmd('sw-patch apply {}'.format(patch_name))

    def delete_patch():
        LOG.fixture_step("Removing {} from patching controller".format(
            patch_name))
        con_ssh.exec_sudo_cmd('sw-patch remove {}'.format(patch_name))
        con_ssh.exec_sudo_cmd('sw-patch delete {}'.format(patch_name))
        LOG.fixture_step("Reinstalling {} to revert the patch".format(patch_name))
        con_ssh.exec_sudo_cmd('sw-patch host-install {}'.format(host),
                              expect_timeout=timeout.CLI_TIMEOUT)
        host_helper.unlock_host(host)

    request.addfinalizer(delete_patch)
    return patch_name, host
Example #6
0
    def test_lock_with_vms_mig_fail(self, target_hosts_negative):
        """
        Test lock host with vms on it - Negative test. i.e., lock should be rejected

        Args:
            target_hosts_negative: target host(s) to perform lock

        Prerequisites: hosts storage backing are pre-configured to storage backing under test.
            ie., only 1 host should support the storage backing under test.
        Test Setups:
            - Set instances quota to 10 if it was less than 8
            - Determine storage backing(s) under test, i.e., storage backings supported by only 1 host on the system
            - Create flavors with storage extra specs set based on storage backings under test
            - Create vms_to_test that can be live migrated using created flavors
            - Determine target host(s) to perform lock based on which host(s) have the most vms_to_test
        Test Steps:
            - Lock target host
            - Verify lock rejected and vms status unchanged
            - Repeat above steps if more than one target host
        Test Teardown:
            - Delete created vms and volumes
            - Delete created flavors
            - Unlock locked target host(s)

        """
        target_hosts, storages_to_test = target_hosts_negative
        LOG.info(
            "Negative test: host-lock attempt on host(s) with {} storage backing(s). \n"
            "Host(s) to attempt lock: {}".format(storages_to_test,
                                                 target_hosts_negative))
        vms_per_host = vm_helper.get_vms_per_host()
        for host in target_hosts:
            if system_helper.get_active_controller_name() == host:
                host_helper.swact_host(hostname=host)
                host_helper.wait_for_hypervisors_up(host)
                host_helper.wait_for_webservice_up(host)

            vms_on_host = vms_per_host[host]
            pre_vms_status = vm_helper.get_vms_info(vms=vms_on_host,
                                                    fields='Status')

            LOG.tc_step("Lock target host {}...".format(host))
            lock_code, lock_output = host_helper.lock_host(host=host,
                                                           check_first=False,
                                                           fail_ok=True,
                                                           swact=True)

            # Add locked host to cleanup list
            if lock_code in [0, 3]:
                self.hosts_locked.append(host)

            post_vms_status = vm_helper.get_vms_info(vms=vms_on_host,
                                                     fields='Status')

            LOG.tc_step("Verify lock rejected and vms status unchanged.")
            assert lock_code in [1, 2, 4, 5
                                 ], "Unexpected result: {}".format(lock_output)
            assert pre_vms_status == post_vms_status, "VM(s) status has changed after host-lock {}".format(
                host)
Example #7
0
def ensure_host_provisioned(host, con_ssh=None):
    """
    check if host is provisioned.

    Args:
        host (str): hostname or id in string format
        con_ssh (SSHClient):

    Returns: (return_code(int), msg(str))   # 1, 2, 3, 4, 5 only returns when fail_ok=True
        (0, "Host is host is provisioned)
    """
    LOG.info("Checking if host {} is already provisioned ....".format(host))
    if is_host_provisioned(host, con_ssh=None):
        return 0, "Host {} is provisioned"
    active_controller = system_helper.get_active_controller_name()
    conter_swact_back = False
    if active_controller == host:
        LOG.tc_step("Swact active controller and ensure active controller is changed")
        exit_code, output = swact_host(hostname=active_controller)
        assert 0 == exit_code, "{} is not recognized as active controller".format(active_controller)
        active_controller = system_helper.get_active_controller_name()
        conter_swact_back = True

    LOG.info("Host {} not provisioned ; doing lock/unlock to provision the host ....".format(host))
    rc, output = lock_host(host, con_ssh=con_ssh)
    if rc != 0 and rc != -1:
        err_msg = "Lock host {} rejected".format(host)
        raise exceptions.HostError(err_msg)

    rc, output = unlock_host(host, available_only=True, con_ssh=con_ssh)
    if rc != 0:
        err_msg = "Unlock host {} failed: {}".format(host, output)
        raise exceptions.HostError(err_msg)
    if conter_swact_back:
        LOG.tc_step("Swact active controller back and ensure active controller is changed")
        exit_code, output = swact_host(hostname=active_controller)
        assert 0 == exit_code, "{} is not recognized as active controller".format(active_controller)

    LOG.info("Checking if host {} is provisioned after lock/unlock ....".format(host))
    if not is_host_provisioned(host, con_ssh=None):
        raise exceptions.HostError("Failed to provision host {}")
    # Delay for the alarm to clear . Could be improved.
    time.sleep(120)
    return 0, "Host {} is provisioned after lock/unlock".format(host)
def check_host(controller):
    host = system_helper.get_active_controller_name()
    if controller == 'standby':
        controllers = system_helper.get_controllers(availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED,
                                                                  HostAvailState.ONLINE))
        controllers.remove(host)
        if not controllers:
            skip('Standby controller does not exist or not in good state')
        host = controllers[0]
    return host
Example #9
0
def get_process_info(name, cmd='', pid_file='', host='', process_type='sm', con_ssh=None,
                     auth_info=Tenant.get('admin_platform')):
    """
    Get the information of the process with the specified name

    Args:
        name (str):     name of the process
        cmd (str):      path of the executable
        pid_file (str): path of the file containing the process id
        host (str):     host on which the process resides
        process_type (str):  type of service/process, must be one of 'sm', 'pm', 'other'
        con_ssh:        ssh connection/client to the active controller
        auth_info

    Returns:

    """
    LOG.info('name:{} cmd={} pid_file={} host={} process_type={}'.format(
        name, cmd, pid_file, host, process_type))

    active_controller = system_helper.get_active_controller_name(con_ssh=con_ssh, auth_info=auth_info)
    if not host:
        host = active_controller

    if process_type == 'sm':
        LOG.debug('to get_process_info for SM process:{} on host:{}'.format(name, host))

        if host != active_controller:
            LOG.warn('Already swacted? host:{} is not  the active controller now. Active controller is {}'.format(
                host, active_controller))
        pid, name, impact, status, pid_file = get_process_from_sm(name, con_ssh=con_ssh, pid_file=pid_file)
        if status != 'enabled-active':
            LOG.warn('SM process is in status:{}, not "enabled-active"'.format(status))
            if 'disabl' in status:
                LOG.warn('Wrong controller? Or controller already swacted, wait and try on the other controller')
                time.sleep(10)
                return get_process_from_sm(name, pid_file=pid_file)

            return -1, name, impact, status, pid_file
        else:
            return pid, name, impact, status, pid_file

    elif process_type == 'pmon':
        pid = get_pmon_process_id(pid_file, host, con_ssh=con_ssh)
        LOG.info('Found: PID={} for PMON process:{}'.format(pid, name))
        return pid, name

    else:
        LOG.info('Try to find the process:{} using "ps"'.format(name))

        pid = get_ancestor_process(name, host, cmd=cmd, con_ssh=con_ssh)[0]
        if -1 == pid:
            return -1, ''

        return pid, name
Example #10
0
def test_system_persist_over_host_reboot(host_type, stx_openstack_required):
    """
    Validate Inventory summary over reboot of one of the controller see if data persists over reboot

    Test Steps:
        - capture Inventory summary for list of hosts on system service-list and neutron agent-list
        - reboot the current Controller-Active
        - Wait for reboot to complete
        - Validate key items from inventory persist over reboot

    """
    if host_type == 'controller':
        host = system_helper.get_active_controller_name()
    elif host_type == 'compute':
        if system_helper.is_aio_system():
            skip("No compute host for AIO system")

        host = None
    else:
        hosts = system_helper.get_hosts(personality='storage')
        if not hosts:
            skip(msg="Lab has no storage nodes. Skip rebooting storage node.")

        host = hosts[0]

    LOG.tc_step("Pre-check for system status")
    system_helper.wait_for_services_enable()
    up_hypervisors = host_helper.get_up_hypervisors()
    network_helper.wait_for_agents_healthy(hosts=up_hypervisors)

    LOG.tc_step("Launch a vm")
    vm_id = vm_helper.boot_vm(cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Reboot a {} node and wait for reboot completes: {}".format(host_type, host))
    HostsToRecover.add(host)
    host_helper.reboot_hosts(host)
    host_helper.wait_for_hosts_ready(host)

    LOG.tc_step("Check vm is still active and pingable after {} reboot".format(host))
    vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, timeout=VMTimeout.DHCP_RETRY)

    LOG.tc_step("Check neutron agents and system services are in good state after {} reboot".format(host))
    network_helper.wait_for_agents_healthy(up_hypervisors)
    system_helper.wait_for_services_enable()

    if host in up_hypervisors:
        LOG.tc_step("Check {} can still host vm after reboot".format(host))
        if not vm_helper.get_vm_host(vm_id) == host:
            time.sleep(30)
            vm_helper.live_migrate_vm(vm_id, destination_host=host)
Example #11
0
def launch_instances(create_flavour_and_image, create_network_sanity, snapshot_from_instance):
    global VM_IDS
    net_id_list = list()
    net_id_list.append({"net-id": create_network_sanity})
    host = system_helper.get_active_controller_name()
    launch_instances = vm_helper.boot_vm(flavor=create_flavour_and_image["flavor1"],
                                         nics=net_id_list, source="snapshot",
                                         source_id=snapshot_from_instance,
                                         vm_host=host, cleanup="module")[1]
    VM_IDS.append(launch_instances)
    return launch_instances
Example #12
0
def test_lock_host(hostname, force):
    if not system_helper.host_exists(hostname):
        skip("{} does not exist".format(hostname))

    expts = [-1, 0]
    if hostname == system_helper.get_active_controller_name():
        expts = [1]

    rtn_code, msg = host_helper.lock_host(hostname, fail_ok=True, force=force)

    assert rtn_code in expts
Example #13
0
def test_host_status():
    """
    System overview
    """
    active_controller_host = system_helper.get_active_controller_name()
    LOG.info("Active Controller: {}".format(active_controller_host))
    standby_controller_host = system_helper.get_standby_controller_name()
    LOG.info("Standby Controller {}".format(standby_controller_host))
    host_list = system_helper.get_hosts()
    for host in host_list:
        LOG.info("Host: {}".format(host))
Example #14
0
def test_unlock_hosts():
    active = system_helper.get_active_controller_name()
    standby = 'controller-1' if active == 'controller-0' else 'controller-0'
    system_helper.wait_for_hosts_states([standby, 'compute-1'],
                                        availability='available')
    LOG.tc_step("Lock hosts.")
    host_helper.lock_host(standby)
    host_helper.lock_host('compute-1')
    LOG.tc_step("Unlock hosts")
    res = host_helper.unlock_hosts([standby, 'compute-1'])
    LOG.tc_step("Show results")
    LOG.info("Unlock hosts result: {}".format(res))
Example #15
0
def test_swact_standby_controller_negative():
    """
    TC610_2
    Verify that trying to swact a standby controller is rejected

    Test Steps:
        - Get the standby controller
        - Attempt to swact the controller
        - Verify that the swact doesn't happen

    """
    standby = system_helper.get_standby_controller_name()
    active = system_helper.get_active_controller_name()
    LOG.tc_step(
        "Attempting to swact from standby controller {}".format(standby))
    code, out = host_helper.swact_host(standby, fail_ok=True)
    LOG.tc_step("Verifying that the swact didn't occur.")
    assert 0 != code, "FAIL: The swact wasn't rejected"
    curr_active = system_helper.get_active_controller_name()
    assert curr_active == active, "FAIL: The active controller was changed. " \
                                  "Previous: {} Current: {}".format(active, curr_active)
Example #16
0
def lock_(request):
    hosts = system_helper.get_hosts()
    host = hosts[0]
    if hosts[0] == system_helper.get_active_controller_name():
        if not system_helper.is_aio_simplex():
            host = hosts[1]
    host_helper.lock_host(host)

    def unlock_():
        host_helper.unlock_host(host)

    request.addfinalizer(unlock_)
    return host
def launch_instances(create_flavors_and_images, create_network_sanity):
    net_id_list = list()
    net_id_list.append({"net-id": create_network_sanity[0]})
    host = system_helper.get_active_controller_name()
    vm_id = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor1"],
                              nics=net_id_list,
                              source="image",
                              source_id=create_flavors_and_images["image"],
                              vm_host=host,
                              cleanup="module")[1]
    # TODO check power state RUNING
    VM_IDS.append(vm_id)
    return vm_id
Example #18
0
def _test_firewall_rules_custom(remove_custom_firewall):
    """
    Verify specified ports from the custom firewall rules are open and non-specified ports are closed.

    Skip Condition:
        - N/A

    Test Setup:
        - SCP iptables.rules from test server to lab

    Test Steps:
        - Install custom firewall rules
        - Check ports that should be both open and closed based on the custom firewall rules
        - Swact and check ports that should be both open and closed based on the custom firewall rules
        - Remove custom firewall rules
        - Check ports that are in the custom firewall rules are no longer open
        - Swact and check ports that are in the custom firewall rules are no longer open
    """
    # The following ports must be in the iptables.rules file or the test will fail
    custom_ports, firewall_rules_path = remove_custom_firewall

    LOG.tc_step("Installing custom firewall rules")
    _modify_firewall_rules(firewall_rules_path)

    active_controller, standby_controller = system_helper.get_active_standby_controllers(
    )
    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Verify custom ports on {}".format(active_controller))
    for port in custom_ports:
        # Verifying ports that are in the iptables file are open
        _verify_port_from_natbox(con_ssh, port, port_expected_open=True)

        # Verifying ports that are not in the iptables file are still closed
        _verify_port_from_natbox(con_ssh, port + 1, port_expected_open=False)

    if standby_controller:
        LOG.tc_step("Swact {}".format(active_controller))
        host_helper.swact_host(active_controller)
        active_controller = system_helper.get_active_controller_name()
        con_ssh = ControllerClient.get_active_controller()

        LOG.tc_step("Verify custom ports on {}".format(active_controller))
        for port in custom_ports:
            # Verifying ports that are in the iptables file are open after swact
            _verify_port_from_natbox(con_ssh, port, port_expected_open=True)

            # Verifying ports that are not in the iptables file are still closed after swact
            _verify_port_from_natbox(con_ssh,
                                     port + 1,
                                     port_expected_open=False)
Example #19
0
def test_kpi_cyclictest_hypervisor(collect_kpi, prepare_test_session,
                                   get_hypervisor):
    if not collect_kpi:
        skip("KPI only test.  Skip due to kpi collection is not enabled")

    global testable_hypervisors
    chosen_hypervisor = get_hypervisor
    cpu_info = testable_hypervisors[chosen_hypervisor]
    cpu_info['for_host_test'] = True

    LOG.info(
        'Hypervisor chosen to run cyclictest: {}'.format(chosen_hypervisor))
    active_controller_name = system_helper.get_active_controller_name()
    program = os.path.join(os.path.normpath(CYCLICTEST_DIR),
                           os.path.basename(CYCLICTEST_EXE))
    LOG.debug('program={}'.format(program))

    with host_helper.ssh_to_host(chosen_hypervisor) as target_ssh:
        prep_test_on_host(target_ssh, chosen_hypervisor, program,
                          active_controller_name)
        run_log, hist_file = run_cyclictest(target_ssh,
                                            program,
                                            chosen_hypervisor,
                                            cpu_info=cpu_info)

        LOG.info("Process and upload test results")
        local_run_log, local_hist_file = fetch_results_from_target(
            target_ssh=target_ssh,
            target_host=chosen_hypervisor,
            active_con_name=active_controller_name,
            run_log=run_log,
            hist_file=hist_file)

    testable_hypervisors[chosen_hypervisor]['for_host_test'] = False

    avg_val, six_nines_val = calculate_results(run_log=local_run_log,
                                               hist_file=local_hist_file,
                                               cores_to_ignore=None,
                                               num_cores=len(
                                                   cpu_info['vm_cores']))

    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=CyclicTest.NAME_HYPERVISOR_AVG,
                              kpi_val=six_nines_val,
                              uptime=15,
                              unit=CyclicTest.UNIT)
    kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                              kpi_name=CyclicTest.NAME_HYPERVISOR_6_NINES,
                              kpi_val=six_nines_val,
                              uptime=15,
                              unit=CyclicTest.UNIT)
Example #20
0
def wait_for_sm_dump_services_active(timeout=60, fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin_platform')):
    """
    Wait for all services
    Args:
        timeout:
        fail_ok:
        con_ssh:
        auth_info

    Returns:

    """
    active_controller = system_helper.get_active_controller_name(con_ssh=con_ssh, auth_info=auth_info)
    return host_helper.wait_for_sm_dump_desired_states(controller=active_controller, timeout=timeout, fail_ok=fail_ok)
Example #21
0
def download_patches(lab, server, patch_dir, conn_ssh=None):
    """

    Args:
        lab:
        server:
        patch_dir:
        conn_ssh:

    Returns:

    """

    patches = {}

    rc, output = server.ssh_conn.exec_cmd(
        "ls -1 --color=none {}/*.patch".format(patch_dir))
    assert rc == 0, "Failed to list patch files in directory path {}.".format(
        patch_dir)

    if output is not None:
        patch_dest_dir = HostLinuxUser.get_home() + "patches/"
        active_controller = system_helper.get_active_controller_name()
        dest_server = lab[active_controller + ' ip']
        ssh_port = None
        pre_opts = 'sshpass -p "{0}"'.format(HostLinuxUser.get_password())

        server.ssh_conn.rsync(patch_dir + "/*.patch",
                              dest_server,
                              patch_dest_dir,
                              ssh_port=ssh_port,
                              pre_opts=pre_opts)

        if conn_ssh is None:
            conn_ssh = ControllerClient.get_active_controller()

        rc, output = conn_ssh.exec_cmd(
            "ls -1  {}/*.patch".format(patch_dest_dir))
        assert rc == 0, "Failed to list downloaded patch files in directory path {}.".format(
            patch_dest_dir)

        if output is not None:
            for item in output.splitlines():
                patches[os.path.splitext(os.path.basename(item))[0]] = item

            patch_ids = " ".join(patches.keys())
            LOG.info("List of patches:\n {}".format(patch_ids))

    return patches
Example #22
0
def test_swact_no_standby_negative(lock_controller):
    """
    TC610_4
    Verify swact without standby controller is rejected

    Test Setup:
        - Lock the standby controller

    Test Steps:
        - Attempt to swact when no standby controller available

    Teardown:
        - Unlock the controller

    """
    LOG.tc_step("Attempting to swact when no standby controller available")
    active = system_helper.get_active_controller_name()
    code, out = host_helper.swact_host(hostname=active, fail_ok=True)

    LOG.tc_step("Verifying that the swact didn't occur.")
    assert 1 == code, "FAIL: The swact wasn't rejected"
    curr_active = system_helper.get_active_controller_name()
    assert curr_active == active, "FAIL: The active controller was changed. " \
                                  "Previous: {} Current: {}".format(active, curr_active)
Example #23
0
def upgrade_hosts(hosts, timeout=HostTimeout.UPGRADE, fail_ok=False, con_ssh=None,
                  auth_info=Tenant.get('admin_platform'), lock=False, unlock=False):
    """
    Upgrade given hosts list one by one
    Args:
        hosts (list): list of hostname of hosts to be upgraded
        timeout (int): MAX seconds to wait for host to become online after upgrading
        fail_ok (bool):
        con_ssh (SSHClient):
        lock (bool):
        auth_info (str):
        unlock (bool):

    Returns (tuple):
        (0, "Hosts are upgraded and in online state.")
        (1, "Upgrade on host failed. applicable if fail_ok

    """
    LOG.info("Upgrading {}...".format(hosts))
    active_controller = system_helper.get_active_controller_name()
    if active_controller in hosts:
        hosts.remove(active_controller)

    LOG.info("Checking if active controller {} is already upgraded ....".format(active_controller))

    if get_hosts_upgrade_target_release(active_controller) in get_hosts_upgrade_target_release(hosts):
        message = " Active controller {} is not upgraded.  Must be upgraded first".format(active_controller)
        LOG.info(message)
        return 1, message
    # keep original host

    controllers = sorted([h for h in hosts if "controller" in h])
    storages = sorted([h for h in hosts if "storage" in h])
    computes = sorted([h for h in hosts if h not in storages and h not in controllers])
    hosts_to_upgrade = controllers + storages + computes

    for host in hosts_to_upgrade:
        rc, output = upgrade_host(host, timeout=timeout, fail_ok=fail_ok, con_ssh=con_ssh,
                                  auth_info=auth_info, lock=lock, unlock=unlock)
        if rc != 0:
            if fail_ok:
                return rc, output
            else:
                raise exceptions.HostError(output)
        else:
            LOG.info("Host {} upgrade completed".format(host))

    return 0, "hosts {} upgrade done ".format(hosts_to_upgrade)
Example #24
0
def _test_telnet_ldap_admin_access(user_name):
    """
    Args:
        user_name: username of the ldap user should be admin for thist test

    Test Steps:
        - telnet to active controller
        - login as admin password admin.
        - verify that it can ls /home/sysadmin

    Teardowns:
        - Disconnect telnet
    """

    if ProjVar.get_var('COLLECT_TELNET'):
        skip(
            'Telnet is in use for collect log. This test which require telnet will be skipped'
        )

    lab = ProjVar.get_var('LAB')
    nodes_info = node.create_node_dict(lab['controller_nodes'], 'controller')
    hostname = system_helper.get_active_controller_name()
    controller_node = nodes_info[hostname]
    password = "******"
    new_password = "******"

    telnet = TelnetClient(controller_node.telnet_ip,
                          port=controller_node.telnet_port,
                          hostname=hostname,
                          user=user_name,
                          password=new_password,
                          timeout=10)
    try:
        LOG.tc_step("Telnet to lab as {} user with password {}".format(
            user_name, password))
        telnet.login(expect_prompt_timeout=30, handle_init_login=True)

        code, output = telnet.exec_cmd('ls {}'.format(
            HostLinuxUser.get_home()),
                                       fail_ok=False)
        LOG.info('output from test {}'.format(output))
        assert '*** forbidden' not in output, \
            'not able to ls to {} as admin user'.format(
                HostLinuxUser.get_home())
    finally:
        telnet.send('exit')
        telnet.close()
Example #25
0
def restore_sysadmin_password(current_password=None, target_password=None):
    global _host_users
    old_passwords = _host_users[('active-controller', 'sysadmin')]
    LOG.info('Restoring password for sysadmin, old_passwords:{}\n'.format(
        old_passwords))

    if not old_passwords or len(
            old_passwords) <= 1 or current_password == target_password:
        LOG.info('Password for sysadmin did not change, no need to restore')
        return

    current_password = old_passwords[
        -1] if current_password is None else current_password
    current_host = system_helper.get_active_controller_name()
    exclude_list = deque(old_passwords[0 - MAX_NUM_PASSWORDS_TRACKED:],
                         MAX_NUM_PASSWORDS_TRACKED)

    for n in range(1, MAX_NUM_PASSWORDS_TRACKED + 1):
        new_password = security_helper.gen_linux_password(
            exclude_list=list(exclude_list), length=PASSWORD_LEGNTH)
        LOG.info('chaning password {} times: from:{} to:{}\n'.format(
            n, current_password, new_password))

        security_helper.change_linux_user_password(current_password,
                                                   new_password,
                                                   host=current_host)
        current_password = new_password
        exclude_list.append(new_password)

        LOG.info('wait after chaning password of sysadmin\n')
        wait_after_change_sysadmin_password()

    # original_password = old_passwords[0] if old_passwords else 'Li69nux*'
    original_password = '******' if target_password is None else target_password

    LOG.info('Restore password of sysadmin to:{}'.format(original_password))

    security_helper.change_linux_user_password(current_password,
                                               original_password,
                                               user=HostLinuxUser.get_user(),
                                               host=current_host)
    LOG.info(
        'Password for sysadmin is restored to:{}'.format(original_password))

    return original_password
Example #26
0
def swact_host_after_reset_sysadmin(active_controller):

    current_host = system_helper.get_active_controller_name()
    LOG.info('swact host:{}'.format(current_host))
    command = 'system host-swact {}'.format(current_host)
    try:
        code, output = active_controller.exec_cmd(command)
        LOG.info('after send host-swact, got output:{}, code:{}\n'.format(
            output, code))
    except Exception as e:
        LOG.info('ignore the exception for now, error:{}'.format(e))

    LOG.info('Close the current connection to the active-controller')

    wait_time = 180
    LOG.info('wait {} seconds after host-swact {}'.format(
        wait_time, current_host))
    time.sleep(wait_time)
Example #27
0
def check_host_file_for_vm(vm_id, expecting=True, host=None, fail_ok=True):
    LOG.info('Verify the file for vTPM exists on the hosting node for VM:' +
             vm_id)
    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    active_controller_name = system_helper.get_active_controller_name()

    instance_name = vm_helper.get_vm_instance_name(vm_id)
    vtpm_file = vtpm_base_dir.format(
        vm_id=vm_id, instance_name=instance_name) + '/' + vtpm_file_name

    if host != active_controller_name:
        hosting_node = host
    else:
        hosting_node = active_controller_name

    with host_helper.ssh_to_host(hosting_node) as ssh_client:
        if ssh_client.file_exists(vtpm_file):
            LOG.info('OK, found the file for vTPM:{} on host:{}'.format(
                vtpm_file, host))
            assert expecting is True or fail_ok is True, \
                'FAIL, the files supporting vTPM are NOT found on the {} as expected'.format(host)

            if expecting is True:
                LOG.info('-this is expected')
            else:
                LOG.info('-this is NOT expected')

            return True, expecting

        else:
            LOG.info('Cannot find the file for vTPM:{} on host:{}'.format(
                vtpm_file, host))
            assert expecting is False or fail_ok is True, \
                'FAIL, the files should be cleared as expected'

            if expecting is False:
                LOG.info('-this is expected')
            else:
                LOG.info('-this is NOT expected')

            return False, expecting
Example #28
0
def _test_firewall_rules_default():
    """
    Verify default ports are open.

    Test Steps:
        - Confirm iptables service is running on active controller
        - Check if lab is http(s), add corresponding port to check
        - Confirm the default ports are open
        - Swact and repeat the above steps
    """
    # Cannot test connecting to the ports as they are in use.

    default_ports = [
        123, 161, 199, 5000, 6080, 6385, 8000, 8003, 8004, 8041, 8774, 8776,
        8778, 9292, 9696, 15491
    ]

    from consts.proj_vars import ProjVar
    region = ProjVar.get_var('REGION')
    if region != 'RegionOne' and region in MULTI_REGION_MAP:
        default_ports.remove(5000)
        default_ports.remove(9292)

    default_ports.append(8443) if CliAuth.get_var(
        'HTTPS') else default_ports.append(8080)

    active_controller = system_helper.get_active_controller_name()
    con_ssh = ControllerClient.get_active_controller()

    _verify_iptables_status(con_ssh, active_controller)
    _check_ports_with_netstat(con_ssh, active_controller, default_ports)

    active_controller, new_active = system_helper.get_active_standby_controllers(
    )
    if new_active:
        LOG.tc_step(
            "Swact {} and verify firewall rules".format(active_controller))
        host_helper.swact_host(active_controller)
        con_ssh = ControllerClient.get_active_controller()

        _verify_iptables_status(con_ssh, new_active)
        _check_ports_with_netstat(con_ssh, new_active, default_ports)
Example #29
0
def test_lock_unlock_active_controller():
    """
    Lock - Unlock an active controller
    """
    active_conroller_host = system_helper.get_active_controller_name()
    LOG.info("Active Controller Host: {}".format(active_conroller_host))
    if system_helper.is_aio_simplex():
        host_helper.lock_host(host=active_conroller_host, fail_ok=False)
        rc, output = host_helper.unlock_host(host=active_conroller_host,
                                             fail_ok=True)
        if rc == 1 and "Not patch current" in output:
            con_ssh = ControllerClient.get_active_controller()
            cmd = "sw-patch host-install controller-0"
            con_ssh.exec_sudo_cmd(cmd=cmd)
            host_helper.unlock_host(host=active_conroller_host, fail_ok=False)
    else:
        rc, output = host_helper.lock_host(host=active_conroller_host,
                                           fail_ok=True)
        assert rc == 1
        assert "Can not lock an active controller" in output
Example #30
0
def pre_system_clone():

    lab = InstallVars.get_install_var('LAB')

    LOG.info("Preparing lab for system clone....")

    if 'compute_nodes' in lab.keys() or 'storage_nodes' in lab.keys():
        skip(
            "The system {} is not All-in-one; clone is supported only for AIO systems"
            .format(lab))

    assert system_helper.get_active_controller_name(
    ) == 'controller-0', "controller-0 is not the active controller"
    LOG.fixture_step(
        "Checking if a USB flash drive is plugged in controller-0 node... ")
    usb_device = install_helper.get_usb_device_name()
    assert usb_device, "No USB found in controller-0"

    usb_size = install_helper.get_usb_disk_size(usb_device)
    LOG.info("Size of {} = {}".format(usb_device, usb_size))
    if not (usb_size >= 5):
        skip("Insufficient size in {} which is {}; at least 8G is required.".
             format(usb_device, usb_size))