Пример #1
0
def test_upload_charts_via_helm_upload(copy_test_apps):
    """
    Test upload helm charts via helm-upload cmd directly. i.e., without
    using sysinv cmd.
    Args:
        copy_test_apps:

    Setups:
        - Copy test files from test server to tis system (module)

    Test Steps:
        - Upload helm charts from given controller via 'helm-upload <tar_file>'
        - Verify the charts appear at /www/pages/helm_charts/ on both
            controllers (if applicable)

    """
    app_dir = copy_test_apps

    LOG.tc_step(
        "Upload helm charts via helm-upload cmd from active controller "
        "and check charts are in /www/pages/")
    file_path = container_helper.upload_helm_charts(tar_file=os.path.join(
        app_dir, HELM_TAR),
                                                    delete_first=True)[1]

    if system_helper.get_standby_controller_name():
        LOG.tc_step("Swact active controller and verify uploaded charts "
                    "are synced over")
        host_helper.swact_host()
        con_ssh = ControllerClient.get_active_controller()
        charts_exist = con_ssh.file_exists(file_path)
        assert charts_exist, "{} does not exist after swact to {}".format(
            file_path, con_ssh.get_hostname())
        LOG.info("{} successfully synced after swact".format(file_path))
Пример #2
0
def test_swact_controller_platform(wait_for_con_drbd_sync_complete):
    """
    Verify swact active controller

    Test Steps:
        - Swact active controller
        - Verify standby controller and active controller are swapped
        - Verify nodes are ready in kubectl get nodes

    """
    if system_helper.is_aio_simplex():
        skip("Simplex system detected")

    if not wait_for_con_drbd_sync_complete:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    LOG.tc_step('retrieve active and available controllers')
    pre_active_controller, pre_standby_controller = \
        system_helper.get_active_standby_controllers()
    assert pre_standby_controller, "No standby controller available"

    LOG.tc_step("Swact active controller and ensure active controller "
                "is changed")
    host_helper.swact_host(hostname=pre_active_controller)

    LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact")
    kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller,
                                            pre_standby_controller),
                                     timeout=30)
Пример #3
0
def test_swact_host(hostname, timeout, fail_ok):
    LOG.tc_step("wait for previous swact complete")
    host_helper._wait_for_openstack_cli_enable()
    system_helper.wait_for_host_values('controller-0',
                                       timeout=60,
                                       fail_ok=False,
                                       task='')
    system_helper.wait_for_host_values('controller-1',
                                       timeout=60,
                                       fail_ok=False,
                                       task='')

    LOG.tc_step("swact host")

    if fail_ok:
        code, msg = host_helper.swact_host(hostname=hostname,
                                           swact_start_timeout=timeout,
                                           fail_ok=fail_ok)
        if timeout == 1:
            assert code == 3
            host_helper.wait_for_swact_complete(hostname, fail_ok=False)
        else:
            assert code in [-1, 0, 1, 2]

    else:
        if timeout == 1:
            with raises(exceptions.HostPostCheckFailed):
                host_helper.swact_host(hostname=hostname,
                                       swact_start_timeout=1,
                                       fail_ok=False)
            host_helper.wait_for_swact_complete(hostname, fail_ok=False)
        else:
            host_helper.swact_host(hostname=hostname,
                                   swact_start_timeout=timeout,
                                   fail_ok=False)
Пример #4
0
    def remove():
        LOG.fixture_step("Removing custom firewall rules")
        user_file_dir = ProjVar.get_var('USER_FILE_DIR')
        empty_path = user_file_dir + "iptables-empty.rules"
        client = get_cli_client(central_region=True)
        client.exec_cmd('touch {}'.format(empty_path))
        _modify_firewall_rules(empty_path)

        active, standby = system_helper.get_active_standby_controllers()
        con_ssh = ControllerClient.get_active_controller()
        LOG.fixture_step("Verify custom ports on {}".format(active))
        for port in custom_ports:
            # Verifying ports that are in the iptables file are closed
            _verify_port_from_natbox(con_ssh, port, port_expected_open=False)

        if standby:
            LOG.fixture_step("Swact {}".format(active))
            host_helper.swact_host(active)

            LOG.fixture_step("Verify custom ports on {}".format(standby))
            for port in custom_ports:
                # Verifying ports that are in the iptables file are closed after swact
                _verify_port_from_natbox(con_ssh,
                                         port,
                                         port_expected_open=False)
Пример #5
0
def test_swact_controller_host():
    """
    SWACT Controller host - it must fail on simplex
    """
    active_controller_host = system_helper.get_active_controller_name()
    LOG.info(
        "Active Controller Before SWACT: {}".format(active_controller_host))
    standby_controller_host = system_helper.get_standby_controller_name()
    LOG.info(
        "Standby Controller Before SWACT: {}".format(standby_controller_host))

    # On simplex swact must fail
    host_helper.swact_host(fail_ok=system_helper.is_aio_simplex())
    # host_helper.wait_for_swact_complete(before_host=active_controller_host)

    active_controller_host = system_helper.get_active_controller_name()
    LOG.info(
        "Active Controller After SWACT: {}".format(active_controller_host))
    standby_controller_host = system_helper.get_standby_controller_name()
    LOG.info(
        "Standby Controller After SWACT: {}".format(standby_controller_host))

    # Re-SWACT only if duplex
    if not system_helper.is_aio_simplex():
        host_helper.swact_host()
Пример #6
0
def test_heat_vm_scale_after_actions(vm_scaling_stack, actions):
    """
    Test VM auto scaling with swact:
        Create heat stack for auto scaling using NestedAutoScale.yaml,  swact and perform vm scale up and down.

    Test Steps:
        - Create a heat stack for auto scaling vm ()
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - live migrate the vm if not sx
        - cold migrate the vm if not sx
        - swact if not sx
        - reboot -f vm host
        - trigger auto scale by boosting cpu usage in the vm (using dd)
        - verify it scale up to the max number of vms (3)
        - trigger scale down by killing dd in the vm
        - verify the vm scale down to min number (1)
        - Delete Heat stack and verify resource deletion
    """
    stack_name, vm_id = vm_scaling_stack
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if not system_helper.is_aio_simplex():
        actions = actions.split('-')
        if "swact" in actions:
            LOG.tc_step("Swact before scale in/out")
            host_helper.swact_host()

        if "live_migrate" in actions:
            LOG.tc_step("live migrate vm before scale in/out")
            vm_helper.live_migrate_vm(vm_id)

        if "cold_migrate" in actions:
            LOG.tc_step("cold migrate vm before scale in/out")
            vm_helper.cold_migrate_vm(vm_id)

    if "host_reboot" in actions:
        if system_helper.is_aio_simplex():
            host_helper.reboot_hosts('controller-0')
            vm_helper.wait_for_vm_status(vm_id,
                                         status=VMStatus.ACTIVE,
                                         timeout=600,
                                         check_interval=10,
                                         fail_ok=False)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id, timeout=VMTimeout.DHCP_RETRY)
        else:
            LOG.tc_step("evacuate vm before scale in/out")
            vm_host = vm_helper.get_vm_host(vm_id=vm_id)
            vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id)

    LOG.tc_step(
        "Wait for {} vms to auto scale out to {} after running dd in vm(s)".
        format(stack_name, 3))
    vm_helper.wait_for_auto_vm_scale_out(stack_name, expt_max=3)

    LOG.tc_step(
        "Wait for {} vms to auto scale in to {} after killing dd processes in vms"
        .format(stack_name, 1))
    vm_helper.wait_for_auto_vm_scale_in(stack_name, expt_min=1)
Пример #7
0
    def test_lock_with_vms_mig_fail(self, target_hosts_negative):
        """
        Test lock host with vms on it - Negative test. i.e., lock should be rejected

        Args:
            target_hosts_negative: target host(s) to perform lock

        Prerequisites: hosts storage backing are pre-configured to storage backing under test.
            ie., only 1 host should support the storage backing under test.
        Test Setups:
            - Set instances quota to 10 if it was less than 8
            - Determine storage backing(s) under test, i.e., storage backings supported by only 1 host on the system
            - Create flavors with storage extra specs set based on storage backings under test
            - Create vms_to_test that can be live migrated using created flavors
            - Determine target host(s) to perform lock based on which host(s) have the most vms_to_test
        Test Steps:
            - Lock target host
            - Verify lock rejected and vms status unchanged
            - Repeat above steps if more than one target host
        Test Teardown:
            - Delete created vms and volumes
            - Delete created flavors
            - Unlock locked target host(s)

        """
        target_hosts, storages_to_test = target_hosts_negative
        LOG.info(
            "Negative test: host-lock attempt on host(s) with {} storage backing(s). \n"
            "Host(s) to attempt lock: {}".format(storages_to_test,
                                                 target_hosts_negative))
        vms_per_host = vm_helper.get_vms_per_host()
        for host in target_hosts:
            if system_helper.get_active_controller_name() == host:
                host_helper.swact_host(hostname=host)
                host_helper.wait_for_hypervisors_up(host)
                host_helper.wait_for_webservice_up(host)

            vms_on_host = vms_per_host[host]
            pre_vms_status = vm_helper.get_vms_info(vms=vms_on_host,
                                                    fields='Status')

            LOG.tc_step("Lock target host {}...".format(host))
            lock_code, lock_output = host_helper.lock_host(host=host,
                                                           check_first=False,
                                                           fail_ok=True,
                                                           swact=True)

            # Add locked host to cleanup list
            if lock_code in [0, 3]:
                self.hosts_locked.append(host)

            post_vms_status = vm_helper.get_vms_info(vms=vms_on_host,
                                                     fields='Status')

            LOG.tc_step("Verify lock rejected and vms status unchanged.")
            assert lock_code in [1, 2, 4, 5
                                 ], "Unexpected result: {}".format(lock_output)
            assert pre_vms_status == post_vms_status, "VM(s) status has changed after host-lock {}".format(
                host)
Пример #8
0
def test_enable_tpm(swact_first):
    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step('Check if TPM is already configured')
    code, cert_id, cert_type = get_tpm_status(con_ssh)

    if code == 0:
        LOG.info('TPM already configured on the lab, cert_id:{}, cert_type:{}'.
                 format(cert_id, cert_type))

        LOG.tc_step('disable TPM first in order to test enabling TPM')
        code, output = remove_cert_from_tpm(con_ssh,
                                            fail_ok=False,
                                            check_first=False)
        assert 0 == code, 'failed to disable TPM'
        time.sleep(30)

        LOG.info('Waiting alarm: out-of-config cleaned up')
        system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE)

    else:
        LOG.info('TPM is NOT configured on the lab')
        LOG.info('-code:{}, cert_id:{}, cert_type:{}'.format(
            code, cert_id, cert_type))

    if swact_first:
        LOG.tc_step('Swact the active controller as instructed')

        if len(system_helper.get_controllers()) < 2:
            LOG.info('Less than 2 controllers, skip swact')
        else:
            host_helper.swact_host(fail_ok=False)
            copy_config_from_local(
                con_ssh, local_conf_backup_dir,
                os.path.join(HostLinuxUser.get_home(), conf_backup_dir))

    LOG.tc_step('Install HTTPS Certificate into TPM')
    code, output = store_cert_into_tpm(
        con_ssh,
        check_first=False,
        fail_ok=False,
        pem_password=HostLinuxUser.get_password())
    assert 0 == code, 'Failed to instll certificate into TPM, cert-file'

    LOG.info('OK, certificate is installed into TPM')

    LOG.info('Wait the out-of-config alarm cleared')
    system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE)

    LOG.tc_step(
        'Verify the configurations changes for impacted components, expecting all changes exit'
    )
    verify_configuration_changes(expected=True, connection=con_ssh)
Пример #9
0
def upgrade_host_lock_unlock(host, con_ssh=None):
    """
     swact, if required, lock and unlock before upgrade.

    Args:
        host (str): hostname or id in string format
        con_ssh (SSHClient):

    Returns: (return_code(int), msg(str))
        (0, "Host is host is locked/unlocked)
    """
    LOG.info("Checking if host {} is active ....".format(host))

    active_controller = system_helper.get_active_controller_name()
    swact_back = False
    if active_controller == host:
        LOG.tc_step("Swact active controller and ensure active controller is changed")
        exit_code, output = host_helper.swact_host(hostname=active_controller)
        assert 0 == exit_code, "{} is not recognized as active controller".format(active_controller)
        active_controller = system_helper.get_active_controller_name()
        swact_back = True

    LOG.info("Host {}; doing lock/unlock to the host ....".format(host))
    rc, output = host_helper.lock_host(host, con_ssh=con_ssh)
    if rc != 0 and rc != -1:
        err_msg = "Lock host {} rejected".format(host)
        LOG.warn(err_msg)
        return 1, err_msg

    rc, output = host_helper.unlock_host(host, available_only=True, con_ssh=con_ssh)
    if rc != 0:
        err_msg = "Unlock host {} failed: {}".format(host, output)
        return 1, err_msg

    if swact_back:
        time.sleep(60)

        if not system_helper.wait_for_host_values(host, timeout=360, fail_ok=True,
                                                           operational=HostOperState.ENABLED,
                                                           availability=HostAvailState.AVAILABLE):
            err_msg = " Swacting to standby is not possible because {} is not in available state " \
                  "within  the specified timeout".format(host)
            assert False, err_msg
        LOG.tc_step("Swact active controller back and ensure active controller is changed")
        rc, output = host_helper.swact_host(hostname=active_controller)
        if rc != 0:
            err_msg = "Failed to swact back to host {}: {}".format(host, output)
            return 1, err_msg

        LOG.info("Swacted and  {}  has become active......".format(host))

    return 0, "Host {} is  locked and unlocked successfully".format(host)
Пример #10
0
def test_reboot_active_controller(no_simplex):
    active, standby = system_helper.get_active_standby_controllers()
    LOG.tc_step("'sudo reboot -f' from {}".format(active))
    host_helper.reboot_hosts(active,
                             wait_for_offline=True,
                             wait_for_reboot_finish=True,
                             force_reboot=True)
    system_helper.wait_for_hosts_states(standby,
                                        timeout=360,
                                        check_interval=30,
                                        availability=['available'])
    kube_helper.wait_for_pods_healthy(check_interval=30, all_namespaces=True)
    host_helper.swact_host(hostname=standby)
Пример #11
0
def _test_firewall_rules_custom(remove_custom_firewall):
    """
    Verify specified ports from the custom firewall rules are open and non-specified ports are closed.

    Skip Condition:
        - N/A

    Test Setup:
        - SCP iptables.rules from test server to lab

    Test Steps:
        - Install custom firewall rules
        - Check ports that should be both open and closed based on the custom firewall rules
        - Swact and check ports that should be both open and closed based on the custom firewall rules
        - Remove custom firewall rules
        - Check ports that are in the custom firewall rules are no longer open
        - Swact and check ports that are in the custom firewall rules are no longer open
    """
    # The following ports must be in the iptables.rules file or the test will fail
    custom_ports, firewall_rules_path = remove_custom_firewall

    LOG.tc_step("Installing custom firewall rules")
    _modify_firewall_rules(firewall_rules_path)

    active_controller, standby_controller = system_helper.get_active_standby_controllers(
    )
    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Verify custom ports on {}".format(active_controller))
    for port in custom_ports:
        # Verifying ports that are in the iptables file are open
        _verify_port_from_natbox(con_ssh, port, port_expected_open=True)

        # Verifying ports that are not in the iptables file are still closed
        _verify_port_from_natbox(con_ssh, port + 1, port_expected_open=False)

    if standby_controller:
        LOG.tc_step("Swact {}".format(active_controller))
        host_helper.swact_host(active_controller)
        active_controller = system_helper.get_active_controller_name()
        con_ssh = ControllerClient.get_active_controller()

        LOG.tc_step("Verify custom ports on {}".format(active_controller))
        for port in custom_ports:
            # Verifying ports that are in the iptables file are open after swact
            _verify_port_from_natbox(con_ssh, port, port_expected_open=True)

            # Verifying ports that are not in the iptables file are still closed after swact
            _verify_port_from_natbox(con_ssh,
                                     port + 1,
                                     port_expected_open=False)
Пример #12
0
def test_swact_controller_platform(wait_for_con_drbd_sync_complete,
                                   collect_kpi):
    """
    Verify swact active controller

    Test Steps:
        - Swact active controller
        - Verify standby controller and active controller are swapped
        - Verify nodes are ready in kubectl get nodes

    """
    if system_helper.is_aio_simplex():
        skip("Simplex system detected")

    if not wait_for_con_drbd_sync_complete:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    LOG.tc_step('retrieve active and available controllers')
    pre_active_controller, pre_standby_controller = system_helper.get_active_standby_controllers(
    )
    assert pre_standby_controller, "No standby controller available"

    collect_kpi = None if container_helper.is_stx_openstack_deployed(
    ) else collect_kpi
    init_time = None
    if collect_kpi:
        init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)

    LOG.tc_step(
        "Swact active controller and ensure active controller is changed")
    host_helper.swact_host(hostname=pre_active_controller)

    LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact")
    kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller,
                                            pre_standby_controller),
                                     timeout=30)

    if collect_kpi:
        kpi_name = SwactPlatform.NAME
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi,
                                  kpi_name=kpi_name,
                                  init_time=init_time,
                                  log_path=SwactPlatform.LOG_PATH,
                                  end_pattern=SwactPlatform.END,
                                  host=pre_standby_controller,
                                  start_host=pre_active_controller,
                                  start_pattern=SwactPlatform.START,
                                  start_path=SwactPlatform.START_PATH,
                                  uptime=1,
                                  fail_ok=False)
Пример #13
0
def test_modify_timezone_sys_event_timestamp():
    """
    Test alarm timestamps line up with a timezone change

    Prerequisites
        - N/A
    Test Setups
        - Get a random timezone for testing
    Test Steps
        - Get the UUID and timestamp from the most recent event
        - Change the timezone and wait until the change is complete
        - Wait for out_of_date alarms to clear
        - Compare the timestamp from the event using the UUID
        - Verify the timestamp changed with the timezone change
    Test Teardown
        - N/A
    """
    LOG.tc_step("Gathering pre-modify timestamp for last event in system event-list")
    event = system_helper.get_events(fields=('UUID', 'Event Log ID', 'Entity Instance ID', 'State', 'Time Stamp'),
                                     limit=1, combine_entries=False)[0]
    event_uuid, event_log_id, entity_instance_id, event_state, pre_timestamp = event

    # Pick a random timezone to test that is not the current timezone
    timezone_to_test = __select_diff_timezone()

    LOG.tc_step("Modify timezone to {}".format(timezone_to_test))
    system_helper.modify_timezone(timezone=timezone_to_test)

    LOG.tc_step("Waiting for timezone for previous event to change in system event-list")
    timeout = time.time() + 60
    post_timestamp = None
    while time.time() < timeout:
        post_timestamp = system_helper.get_events(fields='Time Stamp', event_id=event_log_id, uuid=event_uuid,
                                                  entity_id=entity_instance_id, state=event_state)[0][0]
        if pre_timestamp != post_timestamp:
            break

        time.sleep(5)
    else:
        assert pre_timestamp != post_timestamp, "Timestamp did not change with timezone change."

    if not system_helper.is_aio_simplex():
        LOG.tc_step("Swact and verify timezone persists")
        host_helper.swact_host()
        post_swact_timezone = system_helper.get_timezone()
        assert post_swact_timezone == timezone_to_test

        post_swact_timestamp = system_helper.get_events(fields='Time Stamp', event_id=event_log_id, uuid=event_uuid,
                                                        entity_id=entity_instance_id, state=event_state)[0][0]
        assert post_swact_timestamp == post_timestamp
def test_admin_password(scenario, less_than_two_cons, _revert_admin_pw):
    """
    Test the admin password change

    Test Steps:
        - lock standby controller change password and unlock
        - change passowrd and swact
        - check alarams

    """
    if 'swact' in scenario and less_than_two_cons:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    host = system_helper.get_standby_controller_name()
    assert host, "No standby controller on system"

    if scenario == "lock_standby_change_pswd":
        # lock the standby
        LOG.tc_step("Attempting to lock {}".format(host))
        res, out = host_helper.lock_host(host=host)
        LOG.tc_step("Result of the lock was: {}".format(res))

    # change password
    prev_pswd = Tenant.get('admin')['password']
    post_pswd = '!{}9'.format(prev_pswd)

    LOG.tc_step('Changing admin password to {}'.format(post_pswd))
    code, output = keystone_helper.set_user('admin', password=post_pswd, auth_info=Tenant.get(
        'admin_platform'))

    # assert "Warning: 'admin' password changed. Please wait 5 minutes before Locking/Unlocking
    # the controllers" in output
    LOG.tc_step("Sleep for 180 seconds after admin password change")
    time.sleep(180)  # CGTS-6928

    LOG.tc_step("Check admin password is updated in keyring")
    assert post_pswd == security_helper.get_admin_password_in_keyring()

    if scenario == "change_pswd_swact":
        LOG.tc_step("Swact active controller")
        host_helper.swact_host()
    else:
        LOG.tc_step("Unlock host {}".format(host))
        res = host_helper.unlock_host(host)
        LOG.info("Unlock hosts result: {}".format(res))

    LOG.tc_step("Check admin password is updated in keyring")
    assert post_pswd == security_helper.get_admin_password_in_keyring()
Пример #15
0
def test_swact_failed_controller_negative(fail_controller):
    """
    TC610_3
    Verify that swacting to a failed controller is rejected

    Test Setup:
        - Reset the standby controller

    Test Steps:
        - Attempt to swact from the active controller
        - Verify that the swact was rejected and the active controller is the same

    Teardown:
        - Wait until the controller is online again

    Returns:

    """
    if not fail_controller:
        skip("Couldn't put controller into failed state.")

    active = system_helper.get_active_controller_name()
    LOG.tc_step("Attempting to swact to failed controller.")
    code, out = host_helper.swact_host(fail_ok=True)
    LOG.tc_step("Verifying that the swact didn't occur.")
    assert 1 == code, "FAIL: The swact wasn't rejected"
    curr_active = system_helper.get_active_controller_name()
    assert curr_active == active, "FAIL: The active controller was changed. " \
                                  "Previous: {} Current: {}".format(active, curr_active)
Пример #16
0
def test_reapply_stx_openstack_no_change(stx_openstack_applied_required, check_nodes, controller):
    """
    Args:
        stx_openstack_applied_required:

    Pre-requisite:
        - stx-openstack application in applied state

    Test Steps:
        - Re-apply stx-openstack application
        - Check openstack pods healthy

    """
    # if controller == 'controller-1':
    #     skip("CGTS-10708")

    if system_helper.is_aio_simplex() and controller != 'controller-0':
        skip('Simplex system only has controller-0')

    active, standby = system_helper.get_active_standby_controllers()
    if active != controller:
        if not standby:
            skip('{} is not ready to take over'.format(controller))

        LOG.tc_step("Swact active controller to test reapply from {}".format(controller))
        host_helper.swact_host()
        time.sleep(60)

    LOG.info("helm list before reapply after swact")
    from utils.clients.ssh import ControllerClient
    con_ssh = ControllerClient.get_active_controller()
    end_time = time.time() + 180
    while time.time() < end_time:
        code = con_ssh.exec_cmd('helm list', expect_timeout=60)[0]
        if code == 0:
            break
        time.sleep(30)

    LOG.tc_step("Re-apply stx-openstack application")
    container_helper.apply_app(app_name='stx-openstack')

    LOG.tc_step("Check openstack pods in good state on all controllers after stx-openstack "
                "re-applied")
    for host in get_valid_controllers():
        check_openstack_pods_healthy(host=host, timeout=120)
Пример #17
0
def test_host_operations_with_custom_kubectl_app(deploy_delete_kubectl_app):
    """
    Test create, delete custom app via kubectl run cmd
    Args:
        deploy_delete_kubectl_app: fixture

    Setups:
        - Create kubectl app via kubectl run

    Test Steps:
        - If duplex: swact and verify pod still Running
        - Lock/unlock controller and verify pod still Running

    Teardown:
        - Delete kubectl deployment and service
        - Verify pod is removed

    """
    app_name, pod_name = deploy_delete_kubectl_app
    active, standby = system_helper.get_active_standby_controllers()

    if standby:
        LOG.tc_step("Swact active controller and verify {} test app is "
                    "running ".format(pod_name))
        host_helper.swact_host()
        kube_helper.wait_for_pods_status(pod_names=pod_name,
                                         namespace='default',
                                         fail_ok=False)

    LOG.tc_step("Lock/unlock {} and verify {} test app is "
                "running.".format(active, pod_name))
    HostsToRecover.add(active)
    host_helper.lock_host(active, swact=False)

    # wait for services to stabilize before unlocking
    time.sleep(20)

    host_helper.unlock_host(active)
    pod_name = kube_helper.get_pods(field='NAME',
                                    namespace='default',
                                    name=app_name,
                                    strict=False)[0]
    kube_helper.wait_for_pods_status(pod_names=pod_name,
                                     namespace=None,
                                     fail_ok=False)
Пример #18
0
def sys_controlled_swact(number_of_times=1):
    """
    This is to identify the storage nodes and turn them off and on via vlm
    :return:
    """
    for i in range(0, number_of_times):
        active, standby = system_helper.get_active_standby_controllers()
        LOG.tc_step("Doing iteration of {} of total iteration {}".format(
            i, number_of_times))
        LOG.tc_step("'sudo reboot -f' from {}".format(standby))
        host_helper.swact_host(hostname=active)

        LOG.tc_step("Check vms status after controller swact")
        vms = get_all_vms()
        vm_helper.wait_for_vms_values(vms, fail_ok=False, timeout=600)

        for vm in vms:
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
Пример #19
0
def ensure_host_provisioned(host, con_ssh=None):
    """
    check if host is provisioned.

    Args:
        host (str): hostname or id in string format
        con_ssh (SSHClient):

    Returns: (return_code(int), msg(str))   # 1, 2, 3, 4, 5 only returns when fail_ok=True
        (0, "Host is host is provisioned)
    """
    LOG.info("Checking if host {} is already provisioned ....".format(host))
    if is_host_provisioned(host, con_ssh=None):
        return 0, "Host {} is provisioned"
    active_controller = system_helper.get_active_controller_name()
    conter_swact_back = False
    if active_controller == host:
        LOG.tc_step("Swact active controller and ensure active controller is changed")
        exit_code, output = swact_host(hostname=active_controller)
        assert 0 == exit_code, "{} is not recognized as active controller".format(active_controller)
        active_controller = system_helper.get_active_controller_name()
        conter_swact_back = True

    LOG.info("Host {} not provisioned ; doing lock/unlock to provision the host ....".format(host))
    rc, output = lock_host(host, con_ssh=con_ssh)
    if rc != 0 and rc != -1:
        err_msg = "Lock host {} rejected".format(host)
        raise exceptions.HostError(err_msg)

    rc, output = unlock_host(host, available_only=True, con_ssh=con_ssh)
    if rc != 0:
        err_msg = "Unlock host {} failed: {}".format(host, output)
        raise exceptions.HostError(err_msg)
    if conter_swact_back:
        LOG.tc_step("Swact active controller back and ensure active controller is changed")
        exit_code, output = swact_host(hostname=active_controller)
        assert 0 == exit_code, "{} is not recognized as active controller".format(active_controller)

    LOG.info("Checking if host {} is provisioned after lock/unlock ....".format(host))
    if not is_host_provisioned(host, con_ssh=None):
        raise exceptions.HostError("Failed to provision host {}")
    # Delay for the alarm to clear . Could be improved.
    time.sleep(120)
    return 0, "Host {} is provisioned after lock/unlock".format(host)
Пример #20
0
def _test_firewall_rules_default():
    """
    Verify default ports are open.

    Test Steps:
        - Confirm iptables service is running on active controller
        - Check if lab is http(s), add corresponding port to check
        - Confirm the default ports are open
        - Swact and repeat the above steps
    """
    # Cannot test connecting to the ports as they are in use.

    default_ports = [
        123, 161, 199, 5000, 6080, 6385, 8000, 8003, 8004, 8041, 8774, 8776,
        8778, 9292, 9696, 15491
    ]

    from consts.proj_vars import ProjVar
    region = ProjVar.get_var('REGION')
    if region != 'RegionOne' and region in MULTI_REGION_MAP:
        default_ports.remove(5000)
        default_ports.remove(9292)

    default_ports.append(8443) if CliAuth.get_var(
        'HTTPS') else default_ports.append(8080)

    active_controller = system_helper.get_active_controller_name()
    con_ssh = ControllerClient.get_active_controller()

    _verify_iptables_status(con_ssh, active_controller)
    _check_ports_with_netstat(con_ssh, active_controller, default_ports)

    active_controller, new_active = system_helper.get_active_standby_controllers(
    )
    if new_active:
        LOG.tc_step(
            "Swact {} and verify firewall rules".format(active_controller))
        host_helper.swact_host(active_controller)
        con_ssh = ControllerClient.get_active_controller()

        _verify_iptables_status(con_ssh, new_active)
        _check_ports_with_netstat(con_ssh, new_active, default_ports)
Пример #21
0
def test_dc_swact_host(swact_precheck, check_central_alarms):
    """
    Test host swact on central region
    Args:
        swact_precheck(fixture): check subclouds managed and online
    Setup:
        - Ensure primary subcloud is managed
    Test Steps:
        - Unmanage primary subcloud
        - Swact the host
        - Verify subclouds are managed
    Teardown:
        - Manage unmanaged subclouds
    """
    primary_subcloud, managed_subcloud = swact_precheck
    ssh_central = ControllerClient.get_active_controller(name="RegionOne")

    LOG.tc_step("Unmanage {}".format(primary_subcloud))
    dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True)

    LOG.tc_step("Swact host on central region")
    central_auth = Tenant.get('admin_platform', dc_region='RegionOne')
    host_helper.swact_host(auth_info=central_auth)

    LOG.tc_step("Check subclouds after host swact on central region")
    for managed_subcloud in managed_subcloud:
        dc_helper.wait_for_subcloud_status(subcloud=managed_subcloud,
                                           avail=SubcloudStatus.AVAIL_ONLINE,
                                           mgmt=SubcloudStatus.MGMT_MANAGED,
                                           sync=SubcloudStatus.SYNCED,
                                           con_ssh=ssh_central)

    LOG.tc_step("Manage {}".format(primary_subcloud))
    dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=True)
    dc_helper.wait_for_subcloud_status(subcloud=primary_subcloud,
                                       avail=SubcloudStatus.AVAIL_ONLINE,
                                       mgmt=SubcloudStatus.MGMT_MANAGED,
                                       sync=SubcloudStatus.SYNCED,
                                       con_ssh=ssh_central)
Пример #22
0
def test_disable_tpm(swact_first):
    ssh_client = ControllerClient.get_active_controller()

    LOG.tc_step('Check if TPM is already configured')
    code, cert_id, cert_type = get_tpm_status(ssh_client)

    if code == 0:
        LOG.info('TPM is configured on the lab')

        if swact_first:
            LOG.tc_step('Swact the active controller as instructed')
            if len(system_helper.get_controllers()) < 2:
                LOG.info('Less than 2 controllers, skip swact')
            else:
                host_helper.swact_host(fail_ok=False)
                copy_config_from_local(
                    ssh_client, local_conf_backup_dir,
                    os.path.join(HostLinuxUser.get_home(), conf_backup_dir))

        LOG.tc_step('Disabling TPM')
        code, output = remove_cert_from_tpm(ssh_client,
                                            fail_ok=False,
                                            check_first=False)
        assert 0 == code, 'failed to disable TPM'

        LOG.info('Wait the out-of-config alarm cleared')
        system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE)

        LOG.tc_step(
            'Verify the configurations changes for impacted components, DO NOT expect any of the changes'
        )
        verify_configuration_changes(expected=False, connection=ssh_client)

    else:
        LOG.info('TPM is NOT configured on the lab, skip the test')
        skip('TPM is NOT configured on the lab, skip the test')
Пример #23
0
def test_swact_compute_negative():
    """
    TC610_5
    Verify that trying to swact a compute node is rejected

    Test Steps:
        - Attempt to swact from a compute

    """
    computes = system_helper.get_hosts(personality='compute')
    if not computes:
        skip("No compute host in system")

    for compute in computes:
        LOG.tc_step("Attempting to swact from {}".format(compute))
        code, out = host_helper.swact_host(compute, fail_ok=True)
        LOG.tc_step("Verifying that the swact didn't occur.")
        assert 1 == code, "FAIL: Swacting {} wasn't rejected".format(compute)
Пример #24
0
def test_swact_standby_controller_negative():
    """
    TC610_2
    Verify that trying to swact a standby controller is rejected

    Test Steps:
        - Get the standby controller
        - Attempt to swact the controller
        - Verify that the swact doesn't happen

    """
    standby = system_helper.get_standby_controller_name()
    active = system_helper.get_active_controller_name()
    LOG.tc_step(
        "Attempting to swact from standby controller {}".format(standby))
    code, out = host_helper.swact_host(standby, fail_ok=True)
    LOG.tc_step("Verifying that the swact didn't occur.")
    assert 0 != code, "FAIL: The swact wasn't rejected"
    curr_active = system_helper.get_active_controller_name()
    assert curr_active == active, "FAIL: The active controller was changed. " \
                                  "Previous: {} Current: {}".format(active, curr_active)
Пример #25
0
def test_swact_no_standby_negative(lock_controller):
    """
    TC610_4
    Verify swact without standby controller is rejected

    Test Setup:
        - Lock the standby controller

    Test Steps:
        - Attempt to swact when no standby controller available

    Teardown:
        - Unlock the controller

    """
    LOG.tc_step("Attempting to swact when no standby controller available")
    active = system_helper.get_active_controller_name()
    code, out = host_helper.swact_host(hostname=active, fail_ok=True)

    LOG.tc_step("Verifying that the swact didn't occur.")
    assert 1 == code, "FAIL: The swact wasn't rejected"
    curr_active = system_helper.get_active_controller_name()
    assert curr_active == active, "FAIL: The active controller was changed. " \
                                  "Previous: {} Current: {}".format(active, curr_active)
Пример #26
0
def test_dc_modify_timezone(prev_check):
    """
    Test timezone modify on system controller and subcloud. Ensure timezone change is not
    propagated.
    Setups:
        - Ensure both central and subcloud regions are configured with UTC
        - Get the timestamps for host created_at before timezone modify

    Test Steps
        - Change the timezone in central region and wait until the change is applied
        - Change the timezone to a different zone in subcloud and wait until the change is applied
        - Verify host created_at timestamp updated according to the local timezone for the region
        - Swact on subcloud and ensure timezone and host created_at timestamp persists locally
        - Swact central controller and ensure timezone and host created_at timestamp persists
          in central and subcloud

    Teardown
        - Change timezone to UTC in both central and subcloud regions
        - Ensure host created_at timestamp is reverted to original

    """
    prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, subcloud_auth, \
        subcloud = prev_check

    LOG.tc_step("Modify timezone to {} in central region".format(central_zone))
    system_helper.modify_timezone(timezone=central_zone,
                                  auth_info=central_auth)

    LOG.tc_step(
        "Waiting for timestamp for host created_at to update in central region"
    )
    post_central_time = wait_for_timestamp_update(
        prev_timestamp=prev_central_time, auth_info=central_auth)
    assert post_central_time != prev_central_time, \
        "host created_at timestamp did not update after timezone changed " \
        "to {} in central region".format(central_zone)

    LOG.tc_step("Modify timezone to {} in {}".format(sub_zone, subcloud))
    system_helper.modify_timezone(timezone=sub_zone, auth_info=subcloud_auth)

    LOG.tc_step(
        "Waiting for timestamp for same host created_at to update in {}".
        format(subcloud))
    post_sub_time = wait_for_timestamp_update(prev_timestamp=prev_sub_time,
                                              auth_info=subcloud_auth)
    assert post_sub_time != prev_sub_time, \
        "host created_at timestamp did not update after timezone changed to {} " \
        "in {}".format(sub_zone, subcloud)
    assert post_sub_time != post_central_time, \
        "Host created_at timestamp is the same on central and {} when configured with different " \
        "timezones".format(subcloud)

    LOG.tc_step(
        "Ensure host created_at timestamp does not change after subcloud sync audit"
    )
    dc_helper.wait_for_sync_audit(subclouds=subcloud,
                                  fail_ok=True,
                                  timeout=660)
    post_sync_sub_time = system_helper.get_host_values(
        host='controller-0', fields='created_at', auth_info=subcloud_auth)[0]
    assert post_sub_time == post_sync_sub_time, \
        "Host created_at timestamp changed after sync audit on {}".format(subcloud)

    if not system_helper.is_aio_simplex():
        LOG.tc_step(
            "Swact in {} region and verify timezone persists locally".format(
                subcloud))
        host_helper.swact_host(auth_info=subcloud_auth)
        post_swact_sub_zone = system_helper.get_timezone(
            auth_info=subcloud_auth)
        assert post_swact_sub_zone == sub_zone

        post_swact_sub_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=subcloud_auth)[0]
        assert post_swact_sub_time == post_sub_time

    if system_helper.get_standby_controller_name(auth_info=central_auth):
        LOG.tc_step(
            "Swact in central region, and ensure timezone persists locally in central"
            " and subcloud")
        host_helper.swact_host(auth_info=central_auth)

        # Verify central timezone persists
        post_swact_central_zone = system_helper.get_timezone(
            auth_info=central_auth)
        assert post_swact_central_zone == central_zone
        post_swact_central_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=central_auth)[0]
        assert post_swact_central_time == post_central_time

        # Verify subcloud timezone persists
        post_central_swact_sub_zone = system_helper.get_timezone(
            auth_info=subcloud_auth)
        assert post_central_swact_sub_zone == sub_zone
        post_central_swact_sub_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=subcloud_auth)[0]
        assert post_central_swact_sub_time == post_sub_time
Пример #27
0
def test_ntfs(stx_openstack_required, host_type="controller"):
    """
    This test will test NTFS mount and NTFS formatted device creation on a TiS
    system.

    Arguments:
    - host_type (string) - host type to be tested, e.g. controller, compute,
      storage

    Returns:
    - Nothing

    Test Steps:
    1.  Check if desired host has USB inserted.  If not, skip
    2.  Wipe USB
    3.  Change label of device
    4.  Create partitions on NTFS device
    5.  Format partitions
    4.  Copy large image to NTFS mount point
    5.  Test mount and big file creation on NTFS mounted device
    """

    # Could pass these in through parametrize instead
    mount_type = "ntfs"
    mount_point = "/media/ntfs/"
    guest_os = 'win_2012'
    boot_source = "image"

    host, usb_device = locate_usb(host_type, min_size=13)
    if not host:
        skip("No USB hardware found on {} host type".format(host_type))

    hosts_with_image_backing = host_helper.get_hosts_in_storage_backing(storage_backing='image')
    if len(hosts_with_image_backing) == 0:
        skip("No hosts with image backing present")

    # if the host with the USB is not the active controler, swact controllers
    con_ssh = ControllerClient.get_active_controller()
    active_controller = system_helper.get_active_controller_name(con_ssh)
    if host != active_controller:
        host_helper.swact_host()

    with host_helper.ssh_to_host(host) as host_ssh:
        wipe_usb(host_ssh, usb_device)
        umount_usb(host_ssh, mount_point=mount_point)
        create_usb_label(host_ssh, usb_device, label="msdos")
        create_usb_partition(host_ssh, usb_device, startpt="0", endpt="2048")
        format_usb(host_ssh, usb_device, partition="1")
        create_usb_partition(host_ssh, usb_device, startpt="2049", endpt="100%")
        format_usb(host_ssh, usb_device, partition="2")
        mount_usb(host_ssh, usb_device, partition="2", mount_type=mount_type, mount_point=mount_point)

    LOG.tc_step("Copy the windows guest image to the mount point")
    src_img = glance_helper.scp_guest_image(img_os=guest_os, dest_dir=mount_point, con_ssh=con_ssh)

    LOG.tc_step("Create flavor for windows guest image")
    flv_id = nova_helper.create_flavor(name=guest_os, vcpus=4, ram=8192, storage_backing="local_image",
                                       guest_os=guest_os)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: "dedicated"})
    ResourceCleanup.add("flavor", flv_id)

    LOG.tc_step("Import image into glance")
    glance_helper.create_image(name=guest_os, source_image_file=src_img, disk_format="qcow2",
                               container_format="bare", con_ssh=con_ssh, cleanup="function")

    LOG.tc_step("Boot VM")
    vm_id = vm_helper.boot_vm(name=guest_os, flavor=flv_id, guest_os=guest_os, source=boot_source, cleanup="function")[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        output = vm_ssh.exec_cmd('pwd', fail_ok=False)[1]
        LOG.info(output)
Пример #28
0
def test_swact_100_times():
    """
    Skip Condition:
        - Less than two controllers on system

    Test Steps:
        - Boot a vm and ensure it's pingable
        - Start writing from pre-existed vm before swacting
        - Repeat following steps 100 times:
            - ensure system has standby controller
            - system host-swact
            - ensure all services are active in sudo sm-dump on new active controller
            - ensure pre-existed vm is still pingable from NatBox
            - ensure writing did not stop on pre-existed vm
            - ensure new vm can be launched in 2 minutes
            - ensure newly booted vm is pingable from NatBox
            - delete newly booted vm

    Teardown:
        - delete vms, volumes

    """
    if len(system_helper.get_controllers()) < 2:
        skip("Less than two controllers on system")

    if not system_helper.get_standby_controller_name():
        assert False, "No standby controller on system"

    LOG.tc_step("Boot a vm and ensure it's pingable")
    vm_base = vm_helper.boot_vm(name='pre_swact', cleanup='function')[1]

    LOG.tc_step("Start writing from pre-existed vm before swacting")
    end_event = Events("End write in base vm")
    base_vm_thread = vm_helper.write_in_vm(vm_base, end_event=end_event, expect_timeout=40, thread_timeout=60*100)

    try:
        for i in range(100):
            iter_str = "Swact iter{}/100 - ".format(i+1)

            LOG.tc_step("{}Ensure system has standby controller".format(iter_str))
            standby = system_helper.get_standby_controller_name()
            assert standby

            LOG.tc_step("{}Swact active controller and ensure active controller is changed".format(iter_str))
            host_helper.swact_host()

            LOG.tc_step("{}Check all services are up on active controller via sudo sm-dump".format(iter_str))
            host_helper.wait_for_sm_dump_desired_states(controller=standby, fail_ok=False)

            LOG.tc_step("{}Ensure pre-existed vm still pingable post swact".format(iter_str))
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_base, timeout=45)

            time.sleep(5)
            LOG.tc_step("{}Ensure writing from pre-existed vm resumes after swact".format(iter_str))
            assert base_vm_thread.res is True, "Writing in pre-existed vm stopped after {}".format(iter_str.lower())

            LOG.tc_step("{}Attempt to boot new vm after 2 minutes of post swact and ensure it's pingable".
                        format(iter_str))
            time.sleep(60)
            for j in range(3):
                code, vm_new, msg = vm_helper.boot_vm(name='post_swact', fail_ok=True, cleanup='function')

                if code == 0:
                    break

                LOG.warning("VM failed to boot - attempt{}".format(j+1))
                vm_helper.delete_vms(vms=vm_new)
                assert j < 2, "No vm can be booted 2+ minutes after swact"

                LOG.tc_step("{}VM{} failed to boot, wait for 30 seconds and retry".format(j+1, iter_str))
                time.sleep(30)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_new)

            LOG.tc_step("{}Delete the vm created".format(iter_str))
            vm_helper.delete_vms(vms=vm_new)
    except:
        raise
    finally:
        LOG.tc_step("End the base_vm_thread")
        end_event.set()
        base_vm_thread.wait_for_thread_end(timeout=20)

    post_standby = system_helper.get_standby_controller_name()
    assert post_standby, "System does not have standby controller after last swact"
Пример #29
0
def test_dc_modify_https(revert_https):
    """
    Test enable/disable https

    Test Steps:
        - Ensure central region and subcloud admin endpoint are https
        - Ensure central region https to be different than subcloud
        - Wait for subcloud sync audit and ensure subcloud https is not changed
        - Verify cli's in subcloud and central region
        - Modify https on central and subcloud
        - Verify cli's in subcloud and central region
        - swact central and subcloud
        - Ensure central region and subcloud admin endpoint are https

    Teardown:
        - Revert https config on central and subcloud

    """
    origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname = revert_https
    subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD')

    LOG.tc_step(
        "Before testing, Ensure central region and subcloud admin internal endpoint are https")
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
        "Central region admin internal endpoint is not https"
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
        "Subcloud admin internal endpoint is not https"

    new_https_sub = not origin_https_sub
    new_https_central = not origin_https_central

    LOG.tc_step("Ensure central region https to be different than {}".format(subcloud))
    security_helper.modify_https(enable_https=new_https_sub, auth_info=central_auth)

    LOG.tc_step('Check public endpoints accessibility for central region')
    security_helper.check_services_access(region='RegionOne', auth_info=central_auth,
                                          use_dnsname=use_dnsname)
    LOG.tc_step('Check platform horizon accessibility')
    security_helper.check_platform_horizon_access(use_dnsname=use_dnsname)

    LOG.tc_step("Wait for subcloud sync audit with best effort and ensure {} https is not "
                "changed".format(subcloud))
    dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660)
    assert origin_https_sub == keystone_helper.is_https_enabled(auth_info=sub_auth), \
        "HTTPS config changed in subcloud"

    LOG.tc_step("Verify cli's in {} and central region".format(subcloud))
    verify_cli(sub_auth, central_auth)

    if new_https_central != new_https_sub:
        LOG.tc_step("Set central region https to {}".format(new_https_central))
        security_helper.modify_https(enable_https=new_https_central, auth_info=central_auth)
        LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https")
        assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
            "Central region admin internal endpoint is not https"
        assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
            "Subcloud admin internal endpoint is not https"
        LOG.tc_step('Check public endpoints accessibility for central region')
        security_helper.check_services_access(region='RegionOne', auth_info=central_auth,
                                              use_dnsname=use_dnsname)
        LOG.tc_step('Check platform horizon accessibility')
        security_helper.check_platform_horizon_access(use_dnsname=use_dnsname)

    LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub))
    security_helper.modify_https(enable_https=new_https_sub, auth_info=sub_auth)
    LOG.tc_step('Check public endpoints accessibility for {} region'.format(subcloud))
    security_helper.check_services_access(region=subcloud, auth_info=sub_auth,
                                          use_dnsname=use_dnsname)

    LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https")
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
        "Central region admin internal endpoint is not https"
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
        "Subcloud admin internal endpoint is not https"

    LOG.tc_step("Verify cli's in {} and central region after https modify on "
                "subcloud".format(subcloud))
    verify_cli(sub_auth, central_auth)

    LOG.tc_step("Swact on central region")
    host_helper.swact_host(auth_info=central_auth)

    LOG.tc_step(
        "Verify cli's in {} and central region after central region swact" .format(subcloud))
    verify_cli(sub_auth, central_auth)

    if not system_helper.is_aio_simplex(auth_info=sub_auth):
        LOG.tc_step("Swact on subcloud {}".format(subcloud))
        host_helper.swact_host(auth_info=sub_auth)
        LOG.tc_step("Verify cli's in {} and central region after subcloud swact".format(subcloud))
        verify_cli(sub_auth, central_auth)

    LOG.tc_step("Ensure after swact, central region and subcloud admin internal endpoint are https")
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \
        "Central region admin internal endpoint is not https"
    assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \
        "Subcloud admin internal endpoint is not https"
Пример #30
0
def test_swact_controllers(stx_openstack_required,
                           wait_for_con_drbd_sync_complete):
    """
    Verify swact active controller

    Test Steps:
        - Boot a vm on system and check ping works
        - Swact active controller
        - Verify standby controller and active controller are swapped
        - Verify vm is still pingable

    """
    if not wait_for_con_drbd_sync_complete:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    LOG.tc_step('retrieve active and available controllers')
    pre_active_controller, pre_standby_controller = system_helper.get_active_standby_controllers(
    )
    assert pre_standby_controller, "No standby controller available"

    pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable(
        timeout=20, fail_ok=True)
    up_hypervisors = host_helper.get_up_hypervisors()
    pre_res_neutron, pre_msg_neutron = network_helper.wait_for_agents_healthy(
        up_hypervisors, timeout=20, fail_ok=True)

    LOG.tc_step("Boot a vm from image and ping it")
    vm_id_img = vm_helper.boot_vm(name='swact_img',
                                  source='image',
                                  cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img)

    LOG.tc_step("Boot a vm from volume and ping it")
    vm_id_vol = vm_helper.boot_vm(name='swact', cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol)

    LOG.tc_step(
        "Swact active controller and ensure active controller is changed")
    host_helper.swact_host(hostname=pre_active_controller)

    LOG.tc_step("Verify standby controller and active controller are swapped")
    post_active_controller = system_helper.get_active_controller_name()
    post_standby_controller = system_helper.get_standby_controller_name()

    assert pre_standby_controller == post_active_controller, \
        "Prev standby: {}; Post active: {}".format(
            pre_standby_controller, post_active_controller)
    assert pre_active_controller == post_standby_controller, \
        "Prev active: {}; Post standby: {}".format(
            pre_active_controller, post_standby_controller)

    LOG.tc_step("Check boot-from-image vm still pingable after swact")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img, timeout=30)
    LOG.tc_step("Check boot-from-volume vm still pingable after swact")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol, timeout=30)

    LOG.tc_step(
        "Check system services and neutron agents after swact from {}".format(
            pre_active_controller))
    post_res_sys, post_msg_sys = system_helper.wait_for_services_enable(
        fail_ok=True)
    post_res_neutron, post_msg_neutron = network_helper.wait_for_agents_healthy(
        hosts=up_hypervisors, fail_ok=True)

    assert post_res_sys, \
        "\nPost-evac system services stats: {}\nPre-evac system services stats: {}". \
        format(post_msg_sys, pre_msg_sys)
    assert post_res_neutron, \
        "\nPost evac neutron agents stats: {}\nPre-evac neutron agents stats: {}". \
        format(pre_msg_neutron, post_msg_neutron)

    LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact")
    kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller,
                                            pre_standby_controller),
                                     timeout=30)