예제 #1
0
    def login_as_ldap_user(self,
                           user_name,
                           password,
                           host=None,
                           pre_store=False,
                           disconnect_after=False):
        """
        Login as the specified user name and password onto the specified host

        Args:
            user_name (str):        user name
            password (str):         password
            host (str):             host to login to
            pre_store (bool):
                    True    -       pre-store keystone user credentials for
                    session
                    False   -       chose 'N' (by default) meaning do not
                    pre-store keystone user credentials
            disconnect_after (bool):
                    True    -       disconnect the logged in session
                    False   -       keep the logged in session

        Returns (tuple):
            logged_in (bool)    -   True if successfully logged into the
            specified host
                                    using the specified user/password
            password (str)      -   the password used to login
            ssh_con (object)    -   the ssh session logged in
        """
        if not host:
            host = 'controller-1'
            if system_helper.is_aio_simplex():
                host = 'controller-0'

        prompt_keystone_user_name = r'Enter Keystone username \[{}\]: '.format(
            user_name)
        cmd_expected = (
            (
                'ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format(
                    user_name, host),
                (r'Are you sure you want to continue connecting \(yes/no\)\?',
                 ),
                ('ssh: Could not resolve hostname {}: Name or service not '
                 'known'.format(host), ),
            ),
            (
                'yes',
                (r'{}@{}\'s password: '******'{}'.format(password),
                (
                    prompt_keystone_user_name,
                    Prompt.CONTROLLER_PROMPT,
                ),
                (r'Permission denied, please try again\.', ),
            ),
        )

        logged_in = False
        self.ssh_con.flush()
        for i in range(len(cmd_expected)):
            cmd, expected, errors = cmd_expected[i]
            LOG.info('cmd={}\nexpected={}\nerrors={}\n'.format(
                cmd, expected, errors))
            self.ssh_con.send(cmd)

            index = self.ssh_con.expect(blob_list=list(expected + errors))
            if len(expected) <= index:
                break
            elif 3 == i:
                if expected[index] == prompt_keystone_user_name:
                    assert pre_store, \
                        'pre_store is False, while selecting "y" to ' \
                        '"Pre-store Keystone user credentials ' \
                        'for this session!"'
                else:
                    logged_in = True
                    break
        else:
            logged_in = True

        if logged_in:
            if disconnect_after:
                self.ssh_con.send('exit')

        return logged_in, password, self.ssh_con
예제 #2
0
def test_dc_modify_timezone(prev_check):
    """
    Test timezone modify on system controller and subcloud. Ensure timezone change is not propagated.

    Setups:
        - Ensure both central and subcloud regions are configured with UTC
        - Get the timestamps for host created_at before timezone modify

    Test Steps
        - Change the timezone in central region and wait until the change is applied
        - Change the timezone to a different zone in subcloud and wait until the change is applied
        - Verify host created_at timestamp updated according to the local timezone for the region
        - Swact on subcloud and ensure timezone and host created_at timestamp persists locally
        - Swact central controller and ensure timezone and host created_at timestamp persists in central and subcloud

    Teardown
        - Change timezone to UTC in both central and subcloud regions
        - Ensure host created_at timestamp is reverted to original

    """
    prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, subcloud_auth, subcloud = prev_check

    LOG.tc_step("Modify timezone to {} in central region".format(central_zone))
    system_helper.modify_timezone(timezone=central_zone,
                                  auth_info=central_auth)

    LOG.tc_step(
        "Waiting for timestamp for host created_at to update in central region"
    )
    post_central_time = wait_for_timestamp_update(
        prev_timestamp=prev_central_time, auth_info=central_auth)
    assert post_central_time != prev_central_time, "host created_at timestamp did not update after timezone changed " \
                                                   "to {} in central region".format(central_zone)

    LOG.tc_step("Modify timezone to {} in {}".format(sub_zone, subcloud))
    system_helper.modify_timezone(timezone=sub_zone, auth_info=subcloud_auth)

    LOG.tc_step(
        "Waiting for timestamp for same host created_at to update in {}".
        format(subcloud))
    post_sub_time = wait_for_timestamp_update(prev_timestamp=prev_sub_time,
                                              auth_info=subcloud_auth)
    assert post_sub_time != prev_sub_time, "host created_at timestamp did not update after timezone changed to {} " \
                                           "in {}".format(sub_zone, subcloud)
    assert post_sub_time != post_central_time, \
        "Host created_at timestamp is the same on central and {} when configured with different " \
        "timezones".format(subcloud)

    LOG.tc_step(
        "Ensure host created_at timestamp does not change after subcloud sync audit"
    )
    dc_helper.wait_for_sync_audit(subclouds=subcloud)
    post_sync_sub_time = system_helper.get_host_values(
        host='controller-0', fields='created_at', auth_info=subcloud_auth)[0]
    assert post_sub_time == post_sync_sub_time, "Host created_at timestamp changed after sync audit on {}" \
        .format(subcloud)

    if not system_helper.is_aio_simplex():
        LOG.tc_step(
            "Swact in {} region and verify timezone persists locally".format(
                subcloud))
        host_helper.swact_host(auth_info=subcloud_auth)
        post_swact_sub_zone = system_helper.get_timezone(
            auth_info=subcloud_auth)
        assert post_swact_sub_zone == sub_zone

        post_swact_sub_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=subcloud_auth)[0]
        assert post_swact_sub_time == post_sub_time

    if system_helper.get_standby_controller_name(auth_info=central_auth):
        LOG.tc_step(
            "Swact in central region, and ensure timezone persists locally in central and subcloud"
        )
        host_helper.swact_host(auth_info=central_auth)

        # Verify central timezone persists
        post_swact_central_zone = system_helper.get_timezone(
            auth_info=central_auth)
        assert post_swact_central_zone == central_zone
        post_swact_central_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=central_auth)[0]
        assert post_swact_central_time == post_central_time

        # Verify subcloud timezone persists
        post_central_swact_sub_zone = system_helper.get_timezone(
            auth_info=subcloud_auth)
        assert post_central_swact_sub_zone == sub_zone
        post_central_swact_sub_time = system_helper.get_host_values(
            host='controller-0', fields='created_at',
            auth_info=subcloud_auth)[0]
        assert post_central_swact_sub_time == post_sub_time
예제 #3
0
def test_lock_unlock_host(host_type, collect_kpi):
    """
    Verify lock unlock host

    Test Steps:
        - Select a host per given type. If type is controller, select standby controller.
        - Lock selected host and ensure it is successfully locked
        - Unlock selected host and ensure it is successfully unlocked

    """
    init_time = None
    if collect_kpi:
        init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)

    LOG.tc_step("Select a {} node from system if any".format(host_type))
    if host_type == 'controller':
        if system_helper.is_aio_simplex():
            host = 'controller-0'
        else:
            host = system_helper.get_standby_controller_name()
            assert host, "No standby controller available"

    else:
        if host_type == 'compute' and (system_helper.is_aio_duplex()
                                       or system_helper.is_aio_simplex()):
            skip("No compute host on AIO system")
        elif host_type == 'storage' and not system_helper.is_storage_system():
            skip("System does not have storage nodes")

        hosts = system_helper.get_hosts(personality=host_type,
                                        availability=HostAvailState.AVAILABLE,
                                        operational=HostOperState.ENABLED)

        assert hosts, "No good {} host on system".format(host_type)
        host = hosts[0]

    LOG.tc_step(
        "Lock {} host - {} and ensure it is successfully locked".format(
            host_type, host))
    HostsToRecover.add(host)
    host_helper.lock_host(host, swact=False)

    # wait for services to stabilize before unlocking
    time.sleep(20)

    # unlock standby controller node and verify controller node is successfully unlocked
    LOG.tc_step(
        "Unlock {} host - {} and ensure it is successfully unlocked".format(
            host_type, host))
    host_helper.unlock_host(host)

    LOG.tc_step("Check helm list after host unlocked")
    con_ssh = ControllerClient.get_active_controller()
    con_ssh.exec_cmd('helm list', fail_ok=False)

    if collect_kpi:
        lock_kpi_name = HostLock.NAME.format(host_type)
        unlock_kpi_name = HostUnlock.NAME.format(host_type)
        unlock_host_type = host_type
        if container_helper.is_stx_openstack_deployed():
            if system_helper.is_aio_system():
                unlock_host_type = 'compute'
        else:
            lock_kpi_name += '_platform'
            unlock_kpi_name += '_platform'
            if unlock_host_type == 'compute':
                unlock_host_type = 'compute_platform'

        LOG.info("Collect kpi for lock/unlock {}".format(host_type))
        code_lock, out_lock = kpi_log_parser.record_kpi(
            local_kpi_file=collect_kpi,
            kpi_name=lock_kpi_name,
            host=None,
            log_path=HostLock.LOG_PATH,
            end_pattern=HostLock.END.format(host),
            start_pattern=HostLock.START.format(host),
            start_path=HostLock.START_PATH,
            init_time=init_time)

        time.sleep(30)  # delay in sysinv log vs nova hypervisor list
        code_unlock, out_unlock = kpi_log_parser.record_kpi(
            local_kpi_file=collect_kpi,
            kpi_name=unlock_kpi_name,
            host=None,
            log_path=HostUnlock.LOG_PATH,
            end_pattern=HostUnlock.END[unlock_host_type].format(host),
            init_time=init_time,
            start_pattern=HostUnlock.START.format(host),
            start_path=HostUnlock.START_PATH)

        assert code_lock == 0, 'Failed to collect kpi for host-lock {}. ' \
                               'Error: \n'.format(host, out_lock)
        assert code_unlock == 0, 'Failed to collect kpi for host-unlock {}. ' \
                                 'Error: \n'.format(host, out_lock)
def test_system_upgrade_simplex(upgrade_setup,
                                check_system_health_query_upgrade):
    """
     This script starts the upgrade with creating a backup file which is wipes the disk at the end of the execution .
      to complete the upgrade test_upgrade_simplex_restore.py need to be executed with the backup file path.
    Args:
        upgrade_setup:   This will check parameters ftp upload load and patches
        check_system_health_query_upgrade: Check the health of system for upgrade
    Example
        To Execute

         check_system_health_query_upgrade: Checks the upgrade health .
        steps:

         1. FTP load and patches and loads to system.
         2. Checks the health of the upgrade
         3. Start upgrade
         4. Checks the backup files.
         5. Backup the volume and images
         6. Execute host-upgrade
         7. Ftp backup files

    teardown:
         flush ssh.

    """
    lab = upgrade_setup['lab']

    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']

    if not system_helper.is_aio_simplex():
        assert False, "This lab is not simplex to start upgrade"
    force = False
    controller0 = lab['controller-0']

    backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')
    backup_dest_full_path = '{}/{}/'.format(backup_dest_path,
                                            lab['short_name'])
    date = time.strftime(BACKUP_FILE_DATE_STR)
    build_id = system_helper.get_build_info()['BUILD_ID']
    lab_system_name = lab['name']
    backup_file_name = "{}{}_{}_{}".format(PREFIX_BACKUP_FILE, date, build_id,
                                           lab_system_name)
    print('Backup_File_Name', backup_file_name)
    # ssh to test server
    test_server_attr = dict()
    test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0]
    test_server_attr['server_ip'] = TestFileServer.get_server()
    test_server_attr['prompt'] = r'\[{}@{} {}\]\$ ' \
        .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user())

    test_server_conn = install_helper.establish_ssh_connection(
        test_server_attr['name'],
        user=TestFileServer.get_user(),
        password=TestFileServer.get_password(),
        initial_prompt=test_server_attr['prompt'])

    test_server_conn.set_prompt(test_server_attr['prompt'])
    test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    test_server_attr['ssh_conn'] = test_server_conn
    test_server_obj = Server(**test_server_attr)
    dest_server = test_server_obj
    # test if backup path for the lab exist in Test server
    if test_server_conn.exec_cmd(
            "test -e {}".format(backup_dest_full_path))[0]:
        test_server_conn.exec_cmd("mkdir -p {}".format(backup_dest_full_path))
    LOG.tc_step("Checking system health for upgrade .....")
    if check_system_health_query_upgrade[0] == 0:
        LOG.info("System health OK for upgrade......")
    if check_system_health_query_upgrade[0] == 1:
        assert False, "System health query upgrade failed: {}".format(
            check_system_health_query_upgrade[1])

    if check_system_health_query_upgrade[
            0] == 3 or check_system_health_query_upgrade[0] == 2:
        LOG.info(
            "System health indicate minor alarms; using --force option to start upgrade......"
        )
        force = True

    vol_ids = cinder_helper.get_volumes(auth_info=Tenant.get('admin'))
    if len(vol_ids) > 0:
        LOG.info("Exporting cinder volumes: {}".format(vol_ids))
        exported = install_helper.export_cinder_volumes(
            backup_dest='local',
            backup_dest_path=backup_dest_full_path,
            dest_server=dest_server)

        assert len(exported) > 0, "Fail to export all volumes"
        assert len(exported) == len(
            vol_ids), "Some volumes failed export: {}".format(
                set(vol_ids) - set(exported))
    else:
        LOG.info(
            "No cinder volumes are avaialbe in the system; skipping cinder volume export..."
        )

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    upgrade_helper.wait_for_upgrade_states('started',
                                           timeout=1360,
                                           check_interval=30,
                                           fail_ok=True)

    LOG.info("upgrade started successfully......")

    # scp backup files to test server
    LOG.tc_step("SCP system and image tgz file into test server {} ",
                backup_dest_full_path)

    source_file = '/opt/backups/upgrade_data_*system.tgz '
    backup_dest_full_path_image = backup_dest_full_path
    backup_dest_full_path = backup_dest_full_path + "/" + backup_file_name + "_system.tgz"
    common.scp_from_active_controller_to_test_server(source_file,
                                                     backup_dest_full_path,
                                                     is_dir=False)
    backup_dest_full_path_image = backup_dest_full_path_image + "/" + backup_file_name + "_images.tgz"
    source_file = '/opt/backups/upgrade_data_*images.tgz '
    common.scp_from_active_controller_to_test_server(
        source_file, backup_dest_full_path_image, is_dir=False)
    LOG.info("Starting {} upgrade.....".format(controller0.name))
    # Below line will wipe disk
    # upgrade_helper.upgrade_host(controller0.name, lock=True)

    LOG.tc_step(
        "Host Upgrade executed .This will wipe the disk reboot controller-0 .")
    time.sleep(3)
    # open vlm console for controller-0 for boot through mgmt interface
    LOG.info(
        "Upgrade simpelx backup is complete . Resotore script should be run on this backup to compelte  upgrade "
    )
예제 #5
0
 def go_to_device_usage_tab(self):
     if system_helper.is_aio_simplex():
         self.go_to_tab(1)
     else:
         self.go_to_tab(self.DEVICE_USAGE_TAB_INDEX)
예제 #6
0
def test_vm_with_config_drive(hosts_per_stor_backing):
    """
    Skip Condition:
        - no host with local_image backend

    Test Steps:
        - Launch a vm using config drive
        - Add test data to config drive on vm
        - Do some operations (reboot vm for simplex, cold migrate and lock host for non-simplex) and
            check test data persisted in config drive after each operation
    Teardown:
        - Delete created vm, volume, flavor

    """
    guest_os = 'cgcs-guest'
    # guest_os = 'tis-centos-guest'  # CGTS-6782
    img_id = glance_helper.get_guest_image(guest_os)
    hosts_num = len(hosts_per_stor_backing.get('local_image', []))
    if hosts_num < 1:
        skip("No host with local_image storage backing")

    volume_id = cinder_helper.create_volume(name='vol_inst1',
                                            source_id=img_id,
                                            guest_image=guest_os)[1]
    ResourceCleanup.add('volume', volume_id, scope='function')

    block_device = {
        'source': 'volume',
        'dest': 'volume',
        'id': volume_id,
        'device': 'vda'
    }
    vm_id = vm_helper.boot_vm(name='config_drive',
                              config_drive=True,
                              block_device=block_device,
                              cleanup='function',
                              guest_os=guest_os,
                              meta={'foo': 'bar'})[1]

    LOG.tc_step("Confirming the config drive is set to True in vm ...")
    assert str(vm_helper.get_vm_values(vm_id, "config_drive")[0]) == 'True', \
        "vm config-drive not true"

    LOG.tc_step("Add date to config drive ...")
    check_vm_config_drive_data(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    instance_name = vm_helper.get_vm_instance_name(vm_id)
    LOG.tc_step("Check config_drive vm files on hypervisor after vm launch")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)

    if not system_helper.is_aio_simplex():
        LOG.tc_step("Cold migrate VM")
        vm_helper.cold_migrate_vm(vm_id)

        LOG.tc_step("Check config drive after cold migrate VM...")
        check_vm_config_drive_data(vm_id)

        LOG.tc_step("Lock the compute host")
        compute_host = vm_helper.get_vm_host(vm_id)
        HostsToRecover.add(compute_host)
        host_helper.lock_host(compute_host, swact=True)

        LOG.tc_step("Check config drive after locking VM host")
        check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY)
        vm_host = vm_helper.get_vm_host(vm_id)

    else:
        LOG.tc_step("Reboot vm")
        vm_helper.reboot_vm(vm_id)

        LOG.tc_step("Check config drive after vm rebooted")
        check_vm_config_drive_data(vm_id)

    LOG.tc_step("Check vm files exist after nova operations")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)
예제 #7
0
def lock_unlock_host(backup_info, con_ssh, vms):
    """
    Do lock & unlock hosts test before system backup.

    Args:
        backup_info:
            - options for system backup

        con_ssh:
            - current ssh connection to the target

        vms:
            - VMs on which their host to test
    Return:
        None
    """

    active_controller_name = system_helper.get_active_controller_name()

    target_vm = random.choice(vms)
    LOG.info('lock and unlock the host of VM:{}'.format(target_vm))

    target_host = vm_helper.get_vm_host(target_vm, con_ssh=con_ssh)
    if target_host == active_controller_name:
        if not system_helper.is_aio_simplex():
            LOG.warning(
                'Attempt to lock the active controller on a non-simplex system'
            )
            host_helper.swact_host()

    active_controller_name = system_helper.get_active_controller_name()

    LOG.info('lock and unlock:{}'.format(target_host))

    host_helper.lock_host(target_host)
    if not system_helper.is_aio_simplex():
        LOG.info('check if the VM is pingable')
        vm_helper.ping_vms_from_natbox(target_vm)
    else:
        LOG.info(
            'skip pinging vm after locking the only node in a simlex system')

    LOG.info('unlock:{}'.format(target_host))
    host_helper.unlock_host(target_host)

    system_helper.wait_for_host_values(target_host,
                                       administrative='unlocked',
                                       availability='available',
                                       vim_progress_status='services-enabled')
    for tried in range(5):
        pingable, message = vm_helper.ping_vms_from_natbox(target_vm,
                                                           fail_ok=(tried < 4))
        if pingable:
            LOG.info('failed to ping VM:{}, try again in 20 seconds'.format(
                target_vm))
            time.sleep(20)
        else:
            LOG.info('Succeeded to ping VM:{}'.format(target_vm))
            break
    if backup_info.get('dest', 'local') == 'usb':
        if active_controller_name != 'controller-0':
            LOG.info(
                'current active_controller: ' + active_controller_name +
                ', restore to controller-0 in case it was not after swact')
            host_helper.swact_host()
            active_controller_name = system_helper.get_active_controller_name()
            LOG.info(
                'current active_controller should be restored to controller-0, actual:'
                + active_controller_name)
예제 #8
0
def test_modify_mtu_oam_interface(mtu_range):
    """

    of the 2016-04-04 sysinv_test_plan.pdf
    20) Change the MTU value of the OAM interface using CLI

    Verify that MTU on oam interfaces on both standby and active controller can be modified by cli

    Args:
        mtu_range (str): A string that contain the mtu want to be tested

    Setup:
        - Nothing

    Test Steps:
        - lock standby controller
        - modify the imtu value of the controller
        - unlock the controller
        - revert and oam mtu of the controller and check system is still healthy
        - swact the controller
        - lock the controller
        - modify the imtu value of the controller
        - unlock the controller
        - check the controllers have expected mtu
        - revert the oam mtu of the controller and check system is still healthy

    Teardown:
        - Nothing

    """
    is_sx = system_helper.is_aio_simplex()
    origin_active, origin_standby = system_helper.get_active_standby_controllers()
    if not origin_standby and not is_sx:
        skip("Standby controller unavailable. Cannot lock controller.")

    mtu = __get_mtu_to_mod(providernet_name='-ext', mtu_range=mtu_range)
    first_host = origin_active if is_sx else origin_standby
    max_mtu, cur_mtu, nic_name = get_max_allowed_mtus(host=first_host, network_type='oam')
    LOG.info('OK, the max MTU for {} is {}'.format(nic_name, max_mtu))

    expecting_pass = not max_mtu or mtu <= max_mtu
    if not expecting_pass:
        LOG.warn('Expecting to fail in changing MTU: changing to:{}, max-mtu:{}'.format(mtu, max_mtu))

    oam_attributes = host_helper.get_host_interfaces(host=first_host, field='attributes', name='oam', strict=False)

    # sample attributes: [MTU=9216,AE_MODE=802.3ad]
    pre_oam_mtu = int(oam_attributes[0].split(',')[0].split('=')[1])
    is_stx_openstack_applied = container_helper.is_stx_openstack_deployed(applied_only=True)

    if not is_sx:
        HostsToRecover.add(origin_standby)
        prev_bad_pods = kube_helper.get_unhealthy_pods(all_namespaces=True)

        LOG.tc_step("Modify {} oam interface MTU from {} to {} on standby controller, and "
                    "ensure it's applied successfully after unlock".format(origin_standby, pre_oam_mtu, mtu))
        if mtu == cur_mtu:
            LOG.info('Setting to same MTU: from:{} to:{}'.format(mtu, cur_mtu))

        code, res = host_helper.modify_mtu_on_interfaces(origin_standby, mtu_val=mtu, network_type='oam',
                                                         lock_unlock=True, fail_ok=True)

        LOG.tc_step("Revert OAM MTU to original value: {}".format(pre_oam_mtu))
        code_revert, res_revert = host_helper.modify_mtu_on_interfaces(origin_standby, mtu_val=pre_oam_mtu,
                                                                       network_type='oam',
                                                                       lock_unlock=True, fail_ok=True)
        if 0 == code:
            assert expecting_pass, "OAM MTU is not modified successfully. Result: {}".format(res)
        else:
            assert not expecting_pass, "OAM MTU WAS modified unexpectedly. Result: {}".format(res)

        assert 0 == code_revert, "OAM MTU is not reverted successfully. Result: {}".format(res_revert)

        LOG.tc_step("Check openstack cli, application and pods status after modify and revert {} oam mtu".
                    format(origin_standby))
        check_containers(prev_bad_pods, check_app=is_stx_openstack_applied)

        LOG.tc_step("Ensure standby controller is in available state and attempt to swact active controller to {}".
                    format(origin_standby))
        system_helper.wait_for_hosts_states(origin_active, availability=['available'])
        host_helper.swact_host(fail_ok=False)
        host_helper.wait_for_webservice_up(origin_standby)

    prev_bad_pods = kube_helper.get_unhealthy_pods(all_namespaces=True)
    HostsToRecover.add(origin_active)
    LOG.tc_step("Modify {} oam interface MTU to: {}, and "
                "ensure it's applied successfully after unlock".format(origin_active, mtu))
    code, res = host_helper.modify_mtu_on_interfaces(origin_active,
                                                     mtu_val=mtu, network_type='oam', lock_unlock=True,
                                                     fail_ok=True)
    LOG.tc_step("Revert OAM MTU to original value: {}".format(pre_oam_mtu))
    code_revert, res_revert = host_helper.modify_mtu_on_interfaces(origin_active, mtu_val=pre_oam_mtu,
                                                                   network_type='oam',
                                                                   lock_unlock=True, fail_ok=True)
    if 0 == code:
        assert expecting_pass, "OAM MTU is not modified successfully. Result: {}".format(res)
    else:
        assert not expecting_pass, "OAM MTU WAS modified unexpectedly. Result: {}".format(res)

    assert 0 == code_revert, "OAM MTU is not reverted successfully. Result: {}".format(res_revert)

    LOG.tc_step("Check openstack cli, application and pods after modify and revert {} oam mtu".format(origin_active))
    check_containers(prev_bad_pods, check_app=is_stx_openstack_applied)
예제 #9
0
def no_simplex():
    LOG.fixture_step("(Session) Skip if Simplex")
    if system_helper.is_aio_simplex():
        skip(SkipSysType.SIMPLEX_SYSTEM)
예제 #10
0
def simplex_only():
    LOG.fixture_step("(Session) Skip if not Simplex")
    if not system_helper.is_aio_simplex():
        skip(SkipSysType.SIMPLEX_ONLY)
예제 #11
0
def test_swact_controllers(wait_for_con_drbd_sync_complete):
    """
    Verify swact active controller

    Test Steps:
        - Boot a vm on system and check ping works
        - Swact active controller
        - Verify standby controller and active controller are swapped
        - Verify vm is still pingable

    """
    if system_helper.is_aio_simplex():
        skip("Simplex system detected")

    if not wait_for_con_drbd_sync_complete:
        skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS)

    LOG.tc_step('retrieve active and available controllers')
    pre_active_controller, pre_standby_controller = \
        system_helper.get_active_standby_controllers()
    assert pre_standby_controller, "No standby controller available"

    pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable(
        timeout=20, fail_ok=True)
    up_hypervisors = host_helper.get_up_hypervisors()
    pre_res_neutron, pre_msg_neutron = network_helper.wait_for_agents_healthy(
        up_hypervisors, timeout=20, fail_ok=True)

    LOG.tc_step("Boot a vm from image and ping it")
    vm_id_img = vm_helper.boot_vm(name='swact_img',
                                  source='image',
                                  cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img)

    LOG.tc_step("Boot a vm from volume and ping it")
    vm_id_vol = vm_helper.boot_vm(name='swact', cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol)

    LOG.tc_step("Swact active controller and ensure active controller is "
                "changed")
    host_helper.swact_host(hostname=pre_active_controller)

    LOG.tc_step("Verify standby controller and active controller are swapped")
    post_active_controller = system_helper.get_active_controller_name()
    post_standby_controller = system_helper.get_standby_controller_name()

    assert pre_standby_controller == post_active_controller, \
        "Prev standby: {}; Post active: {}".format(
            pre_standby_controller, post_active_controller)
    assert pre_active_controller == post_standby_controller, \
        "Prev active: {}; Post standby: {}".format(
            pre_active_controller, post_standby_controller)

    LOG.tc_step("Check boot-from-image vm still pingable after swact")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img, timeout=30)
    LOG.tc_step("Check boot-from-volume vm still pingable after swact")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol, timeout=30)

    LOG.tc_step("Check system services and neutron agents after swact "
                "from {}".format(pre_active_controller))
    post_res_sys, post_msg_sys = \
        system_helper.wait_for_services_enable(fail_ok=True)
    post_res_neutron, post_msg_neutron = \
        network_helper.wait_for_agents_healthy(hosts=up_hypervisors,
                                               fail_ok=True)

    assert post_res_sys, "\nPost-evac system services stats: {}" \
                         "\nPre-evac system services stats: {}". \
        format(post_msg_sys, pre_msg_sys)
    assert post_res_neutron, "\nPost evac neutron agents stats: {}" \
                             "\nPre-evac neutron agents stats: {}". \
        format(pre_msg_neutron, post_msg_neutron)

    LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact")
    kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller,
                                            pre_standby_controller),
                                     timeout=30)
예제 #12
0
def delete_resources(resource_names=None,
                     select_all=None,
                     resource_types='pod',
                     namespace=None,
                     recursive=None,
                     labels=None,
                     con_ssh=None,
                     fail_ok=False,
                     post_check=True,
                     check_both_controllers=True):
    """
    Delete pods via kubectl delete
    Args:
        resource_names (None|str|list|tuple):
        select_all (None|bool):
        resource_types (str|list|tuple):
        namespace (None|str):
        recursive (bool):
        labels (None|dict):
        con_ssh:
        fail_ok:
        post_check (bool): Whether to check if resources are gone after deletion
        check_both_controllers (bool):

    Returns (tuple):
        (0, None)   # pods successfully deleted
        (1, <std_err>)
        (2, <undeleted_resources>(list of dict))    # pod(s) still exist in
        kubectl after deletion
        (3, <undeleted_resources_on_other_controller>(list of dict))    #
        pod(s) still exist on the other controller

    """
    arg_dict = {
        '--all': select_all,
        '-l': labels,
        '--recursive': recursive,
    }

    arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',')
    if resource_types:
        if isinstance(resource_types, str):
            resource_types = [resource_types]
        arg_str = '{} {}'.format(','.join(resource_types), arg_str).strip()

    if resource_names:
        if isinstance(resource_names, str):
            resource_names = [resource_names]
        arg_str = '{} {}'.format(arg_str, ' '.join(resource_names))

    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()
    code, output = exec_kube_cmd(sub_cmd='delete',
                                 args=arg_str,
                                 con_ssh=con_ssh,
                                 fail_ok=fail_ok)
    if code > 0:
        return 1, output

    if post_check:

        def __wait_for_resources_gone(ssh_client):
            final_remaining = []
            if resource_types:
                for resource_type in resource_types:
                    res, remaining_res = wait_for_resources_gone(
                        resource_names=resource_names,
                        resource_type=resource_type,
                        namespace=namespace,
                        con_ssh=ssh_client,
                        fail_ok=fail_ok)
                    if not res:
                        final_remaining += remaining_res
            else:
                res, final_remaining = wait_for_resources_gone(
                    resource_names=resource_names,
                    namespace=namespace,
                    con_ssh=ssh_client,
                    fail_ok=fail_ok)
            return final_remaining

        LOG.info("Check pod is not running on current host")

        remaining = __wait_for_resources_gone(con_ssh)
        if remaining:
            return 2, remaining

        if check_both_controllers and not system_helper.is_aio_simplex(
                con_ssh=con_ssh):
            LOG.info("Check pod is running on the other controller as well")
            con_name = 'controller-1' if \
                con_ssh.get_hostname() == 'controller-0' else 'controller-0'
            from keywords import host_helper
            with host_helper.ssh_to_host(hostname=con_name,
                                         con_ssh=con_ssh) as other_con:
                remaining = __wait_for_resources_gone(other_con)
                if remaining:
                    return 3, remaining

    LOG.info("{} are successfully removed.".format(resource_names))
    return 0, None
예제 #13
0
def apply_pod(file_path,
              pod_name,
              namespace=None,
              recursive=None,
              select_all=None,
              labels=None,
              con_ssh=None,
              fail_ok=False,
              check_both_controllers=True):
    """
    Apply a pod from given file via kubectl apply
    Args:
        file_path (str):
        pod_name (str):
        namespace (None|str):
        recursive (None|bool):
        select_all (None|bool):
        labels (dict|str|list|tuple|None): key value pairs
        con_ssh:
        fail_ok:
        check_both_controllers (bool):

    Returns (tuple):
        (0, <pod_info>(dict))
        (1, <std_err>)
        (2, <pod_info>)    # pod is not running after apply
        (3, <pod_info>)    # pod if not running on the other controller after
        apply

    """
    arg_dict = {
        '--all': select_all,
        '-l': labels,
        '--recursive': recursive,
    }

    arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',')
    arg_str += ' -f {}'.format(file_path)

    if not con_ssh:
        con_ssh = ControllerClient.get_active_controller()
    code, output = exec_kube_cmd(sub_cmd='apply',
                                 args=arg_str,
                                 con_ssh=con_ssh,
                                 fail_ok=fail_ok)
    if code > 0:
        return 1, output

    LOG.info("Check pod is running on current host")
    res = wait_for_pods_status(pod_names=pod_name,
                               namespace=namespace,
                               status=PodStatus.RUNNING,
                               con_ssh=con_ssh,
                               fail_ok=fail_ok)
    if not res:
        return 2, "Pod {} is not running after apply on active " \
                  "controller".format(pod_name)

    if check_both_controllers and not system_helper.is_aio_simplex(
            con_ssh=con_ssh):
        LOG.info("Check pod is running on the other controller as well")
        con_name = 'controller-1' if con_ssh.get_hostname() == 'controller-0' \
            else 'controller-0'
        from keywords import host_helper
        with host_helper.ssh_to_host(hostname=con_name,
                                     con_ssh=con_ssh) as other_con:
            res, pods_info = wait_for_pods_status(pod_names=pod_name,
                                                  namespace=namespace,
                                                  con_ssh=other_con,
                                                  fail_ok=fail_ok)
            if not res:
                return 3, "Pod {} is not running after apply on standby " \
                          "controller".format(pod_name)

    LOG.info("{} pod is successfully applied and running".format(pod_name))
    return 0, pod_name
예제 #14
0
    def test_multiports_on_same_network_vm_actions(self, vifs,
                                                   check_avs_pattern,
                                                   base_setup):
        """
        Test vm actions on vm with multiple ports with given vif models on the same tenant network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm with
                specified (vif_mode, pci_address)
            base_setup (list): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant
                network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = base_setup

        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        for vm_actions in [['auto_recover'], ['cold_migrate'],
                           ['pause', 'unpause'], ['suspend', 'resume'],
                           ['hard_reboot']]:
            if vm_actions[0] == 'auto_recover':
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery complete, "
                    "then verify ping from base vm over management and data networks"
                )
                vm_helper.set_vm_state(vm_id=vm_under_test,
                                       error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=True,
                                             timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    if 'migrate' in action and system_helper.is_aio_simplex():
                        continue

                    kwargs = {}
                    if action == 'hard_reboot':
                        action = 'reboot'
                        kwargs['hard'] = True
                    kwargs['action'] = action

                    vm_helper.perform_action_on_vm(vm_under_test, **kwargs)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

            # LOG.tc_step("Verify vm pci address preserved after {}".format(vm_actions))
            # check_helper.check_vm_pci_addr(vm_under_test, nics)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management and data "
                "networks still works after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm,
                                       net_types=['mgmt', 'data'])
예제 #15
0
def test_vm_actions_secure_boot_vm():
    """
    This is to test a vm that is booted with secure boot and do the vm actions such as reboot,
    migrations

    :return:

    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]

    _check_secure_boot_on_vm(vm_id=vm_id)
    if system_helper.is_aio_simplex():
        vm_actions_list = ('reboot', ['pause',
                                      'unpause'], ['suspend', 'resume'])
    else:
        vm_actions_list = ('reboot', ['pause',
                                      'unpause'], ['suspend', 'resume'],
                           'live_migrate', 'cold_migrate', 'cold_mig_revert')

    for vm_actions in vm_actions_list:
        if isinstance(vm_actions, str):
            vm_actions = (vm_actions, )
        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step(
            "Verifying Secure boot is still enabled after vm action {}".format(
                vm_actions))
        _check_secure_boot_on_vm(vm_id=vm_id)