コード例 #1
0
def test_restore_simplex_install(install_setup):
    """
     Complete fresh_install steps for a simplex lab
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Boot controller-0
         - Run restore controller-0
         - Unlock controller-0
     """
    lab = install_setup["lab"]
    controller0_node = lab["controller-0"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]

    do_boot_c0 = RestoreVars.get_restore_var('RESTORE_PRE_BOOT_CONTROLLER0')
    stop_before_ansible_restore =\
        RestoreVars.get_restore_var('STOP_BEFORE_ANSIBLE_RESTORE')

    if do_boot_c0:
        fresh_install_helper.install_controller(
            sys_type=SysType.AIO_SX,
            patch_dir=patch_dir,
            patch_server_conn=patch_server.ssh_conn,
            init_global_vars=True)
    else:
        LOG.tc_step("Skipping controller-0 install")

    if stop_before_ansible_restore:
        skip("Stopping test before restoring")

    if InstallVars.get_install_var('IPV6_OAM'):
        restore_helper.setup_ipv6_oam(controller0_node)

    restore_helper.restore_platform()

    fresh_install_helper.unlock_active_controller(controller0_node)
    controller0_node.telnet_conn.hostname = r"controller\-[01]"
    controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)

    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(
            controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    if lab.get("floating ip"):
        setup_tis_ssh(lab)

    fresh_install_helper.wait_for_hosts_ready(controller0_node.name, lab=lab)

    fresh_install_helper.reset_global_vars()
コード例 #2
0
def test_duplex_plus_install(install_setup):
    """
     Complete fresh_install steps for a duplex plus (AIO+) lab
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Determine active controller
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Download configuration files, heat templates, images, and licenses
         - Configure controller-0, run lab_setup, and unlock controller-0
         - Add the standby controller
         - Run the lab_setup.sh script
         - Re-add the standby controller
         - Run the lab_setup.sh script
         - Install the Standby Controller
         - Run the lab_setup.sh script twice
         - Unlock the standby controller
         - Run the lab_setup.sh script
     """
    lab = install_setup["lab"]
    hosts = lab["hosts"]
    boot_device = lab["boot_device_dict"]
    controller0_node = lab["controller-0"]
    final_step = install_setup["control"]["stop"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]
    guest_server = install_setup["servers"]["guest"]
    install_subcloud = install_setup.get("install_subcloud")
    lab_files_server = install_setup["servers"]["lab_files"]
    helm_chart_server = install_setup["servers"]["helm_charts"]

    if final_step == '0' or final_step == "setup":
        skip("stopping at install step: {}".format(LOG.test_step))

    fresh_install_helper.install_controller(
        sys_type=SysType.AIO_DX,
        patch_dir=patch_dir,
        patch_server_conn=patch_server.ssh_conn,
        init_global_vars=True)
    # controller0_node.telnet_conn.login()
    # controller0_node.telnet_conn.flush()
    # fresh_install_helper.set_software_version_var(use_telnet=True, con_telnet=controller0_node.telnet_conn)

    build_server = install_setup["servers"]["build"]
    fresh_install_helper.download_lab_files(
        lab_files_server=lab_files_server,
        build_server=build_server,
        guest_server=guest_server,
        load_path=InstallVars.get_install_var("TIS_BUILD_DIR"),
        license_path=InstallVars.get_install_var("LICENSE"),
        guest_path=InstallVars.get_install_var('GUEST_IMAGE'),
        helm_chart_server=helm_chart_server)

    if install_subcloud:
        fresh_install_helper.configure_subcloud(controller0_node,
                                                lab_files_server,
                                                subcloud=install_subcloud,
                                                final_step=final_step)
    else:
        fresh_install_helper.configure_controller_(controller0_node)

    deploy_mgr = fresh_install_helper.use_deploy_manager(controller0_node, lab)
    if not deploy_mgr:

        fresh_install_helper.check_ansible_configured_mgmt_interface(
            controller0_node, lab)

        #fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_INTITIAL)
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_active_controller(controller0_node)

    else:
        fresh_install_helper.wait_for_deploy_mgr_controller_config(
            controller0_node, lab=lab)

    controller0_node.telnet_conn.hostname = r"controller\-[01]"
    controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)
    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.establish_ssh_connection(
            controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    if not deploy_mgr:
        fresh_install_helper.bulk_add_hosts(lab=lab,
                                            con_ssh=controller0_node.ssh_conn)
    else:
        fresh_install_helper.wait_for_deployment_mgr_to_bulk_add_hosts(
            controller0_node, lab=lab)

    fresh_install_helper.boot_hosts(boot_device)
    #fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_INTERIM)
    if not deploy_mgr:
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_hosts(["controller-1"],
                                          con_ssh=controller0_node.ssh_conn)

    else:
        fresh_install_helper.wait_for_deploy_mgr_lab_config(controller0_node,
                                                            lab=lab)

    fresh_install_helper.wait_for_hosts_ready(hosts, lab=lab)
    container_helper.wait_for_apps_status(apps='platform-integ-apps',
                                          timeout=1800,
                                          con_ssh=controller0_node.ssh_conn,
                                          status='applied')
    fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)

    if lab.get("floating ip"):
        collect_sys_net_info(lab)
        setup_tis_ssh(lab)

    #fresh_install_helper.check_heat_resources(con_ssh=controller0_node.ssh_conn)
    if not deploy_mgr:
        fresh_install_helper.collect_lab_config_yaml(
            lab, build_server, stage=fresh_install_helper.DEPLOY_LAST)

    fresh_install_helper.attempt_to_run_post_install_scripts()

    fresh_install_helper.reset_global_vars()

    fresh_install_helper.verify_install_uuid(lab)
    if deploy_mgr:
        fresh_install_helper.validate_deployment_mgr_install(
            controller0_node, lab)
コード例 #3
0
def test_standard_restore_install(install_setup):
    """
     Configure the active controller

     Prerequisites:
         - pxeboot has been setup.
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Determine active controller
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Unlock controller-0
         - Boot the other hosts
         - Unlock the other hosts
     """
    lab = install_setup["lab"]
    hosts = lab["hosts"]
    boot_device = lab['boot_device_dict']
    controller0_node = lab["controller-0"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]

    # Power off that which is NOT Controller-0
    hostnames = [
        hostname for hostname in lab['hosts'] if 'controller-0' not in hostname
    ]
    vlm_helper.power_off_hosts(hostnames, lab=lab, count=2)

    do_boot_c0 = RestoreVars.get_restore_var('RESTORE_PRE_BOOT_CONTROLLER0')
    stop_before_ansible_restore = \
        RestoreVars.get_restore_var('STOP_BEFORE_ANSIBLE_RESTORE')

    if do_boot_c0:
        fresh_install_helper.install_controller(
            sys_type=SysType.REGULAR,
            patch_dir=patch_dir,
            patch_server_conn=patch_server.ssh_conn,
            init_global_vars=True)
    else:
        LOG.tc_step("Skipping controller-0 install")

    if stop_before_ansible_restore:
        skip("Stopping test before restoring")

    if InstallVars.get_install_var('IPV6_OAM'):
        restore_helper.setup_ipv6_oam(controller0_node)

    restore_helper.restore_platform()

    fresh_install_helper.unlock_active_controller(controller0_node)

    controller0_node.telnet_conn.hostname = r"controller\-[01]"
    controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)
    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(
            controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    # Boot that which is Not Controller-0
    fresh_install_helper.restore_boot_hosts(boot_device)

    # Unlock controller-1
    fresh_install_helper.unlock_hosts(['controller-1'],
                                      con_ssh=controller0_node.ssh_conn)

    # Unlock computes
    fresh_install_helper.unlock_hosts(
        [host_ for host_ in hosts if 'compute' in host_],
        con_ssh=controller0_node.ssh_conn)

    fresh_install_helper.send_arp_cmd()

    if lab.get("floating ip"):
        collect_sys_net_info(lab)
        setup_tis_ssh(lab)

    fresh_install_helper.reset_global_vars()
    fresh_install_helper.verify_install_uuid(lab)
コード例 #4
0
def test_simplex_install(install_setup):
    """
     Complete fresh_install steps for a simplex lab
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Download configuration files, heat templates, images, and licenses
         - Configure controller-0, run lab_setup, and unlock controller-0
         - Run lab setup script if specified
         - Setup heat resources
     """
    lab = install_setup["lab"]
    controller0_node = lab["controller-0"]
    final_step = install_setup["control"]["stop"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]
    guest_server = install_setup["servers"]["guest"]
    install_subcloud = install_setup.get("install_subcloud")
    helm_chart_server = install_setup["servers"]["helm_charts"]

    if final_step == '0' or final_step == "setup":
        skip("stopping at install step: {}".format(LOG.test_step))

    fresh_install_helper.install_controller(sys_type=SysType.AIO_SX, patch_dir=patch_dir,
                                            patch_server_conn=patch_server.ssh_conn, init_global_vars=True)
    # controller0_node.telnet_conn.login()
    # controller0_node.telnet_conn.flush()
    # fresh_install_helper.set_software_version_var(use_telnet=True, con_telnet=controller0_node.telnet_conn)

    lab_files_server = install_setup["servers"]["lab_files"]
    build_server = install_setup["servers"]["build"]
    fresh_install_helper.download_lab_files(lab_files_server=lab_files_server, build_server=build_server,
                                            guest_server=guest_server,
                                            load_path=InstallVars.get_install_var("TIS_BUILD_DIR"),
                                            license_path=InstallVars.get_install_var("LICENSE"),
                                            guest_path=InstallVars.get_install_var('GUEST_IMAGE'),
                                            helm_chart_server=helm_chart_server)

    if install_subcloud:
        fresh_install_helper.configure_subcloud(controller0_node, install_setup["dc_system_controller"],
                                                subcloud=install_subcloud, final_step=final_step)
    else:
        fresh_install_helper.configure_controller_(controller0_node)

    deploy_mgr = fresh_install_helper.use_deploy_manager(controller0_node, lab)
    if not deploy_mgr:
        # fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_INTITIAL)

        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_active_controller(controller0_node)
        controller0_node.telnet_conn.hostname = r"controller\-[01]"
        controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)
    else:
        fresh_install_helper.wait_for_deploy_mgr_controller_config(controller0_node, lab=lab)

    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    container_helper.wait_for_apps_status(apps='platform-integ-apps', timeout=1800,
                                          con_ssh=controller0_node.ssh_conn, status='applied')
    fresh_install_helper.run_lab_setup(controller0_node.ssh_conn)

    if lab.get("floating ip"):
        setup_tis_ssh(lab)

    fresh_install_helper.wait_for_hosts_ready(controller0_node.name, lab=lab)

    # fresh_install_helper.check_heat_resources(con_ssh=controller0_node.ssh_conn)
    if not deploy_mgr:
        fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_LAST)

    if install_subcloud:
        fresh_install_helper.wait_for_subcloud_to_be_managed(install_subcloud, install_setup["dc_system_controller"],
                                                             lab=lab)
    fresh_install_helper.attempt_to_run_post_install_scripts()

    fresh_install_helper.reset_global_vars()
コード例 #5
0
ファイル: test_restore.py プロジェクト: pvaduva/auto_test
def test_restore(restore_setup):
    controller1 = 'controller-1'
    controller0 = 'controller-0'

    lab = restore_setup["lab"]
    is_aio_lab = lab.get('system_type', 'Standard') == 'CPE'
    is_sx = is_aio_lab and (len(lab['controller_nodes']) < 2)

    tis_backup_files = restore_setup['tis_backup_files']
    backup_src = RestoreVars.get_restore_var('backup_src'.upper())
    backup_src_path = RestoreVars.get_restore_var('backup_src_path'.upper())

    controller_node = lab[controller0]
    con_ssh = ControllerClient.get_active_controller(name=lab['short_name'],
                                                     fail_ok=True)
    sys_prompt = Prompt.TIS_NODE_PROMPT_BASE.format('.*' +
                                                    lab['name'].split('_')[0])
    controller_prompt = '{}|{}'.format(sys_prompt, Prompt.CONTROLLER_0)
    controller_node.telnet_conn.set_prompt(controller_prompt)

    if not con_ssh:
        LOG.info("Establish ssh connection with {}".format(controller0))
        controller_node.ssh_conn = install_helper.ssh_to_controller(
            controller_node.host_ip, initial_prompt=controller_prompt)
        controller_node.ssh_conn.deploy_ssh_key()
        con_ssh = controller_node.ssh_conn
        ControllerClient.set_active_controller(con_ssh)

    LOG.info("Restore system from backup....")
    system_backup_file = [
        file for file in tis_backup_files if "system.tgz" in file
    ].pop()
    images_backup_file = [
        file for file in tis_backup_files if "images.tgz" in file
    ].pop()

    LOG.tc_step("Restoring {}".format(controller0))

    LOG.info("System config restore from backup file {} ...".format(
        system_backup_file))

    if backup_src.lower() == 'usb':
        system_backup_path = "{}/{}".format(BackupRestore.USB_BACKUP_PATH,
                                            system_backup_file)
    else:
        system_backup_path = "{}{}".format(HostLinuxUser.get_home(),
                                           system_backup_file)

    compute_configured = install_helper.restore_controller_system_config(
        system_backup=system_backup_path, is_aio=is_aio_lab)[2]

    # return

    LOG.info('re-connect to the active controller using ssh')
    con_ssh.close()
    controller_node.ssh_conn = install_helper.ssh_to_controller(
        controller_node.host_ip, initial_prompt=controller_prompt)
    LOG.info("Source Keystone user admin environment ...")
    LOG.info("set prompt to:{}, telnet_conn:{}".format(
        controller_prompt, controller_node.telnet_conn))

    controller_node.telnet_conn.exec_cmd("cd; source /etc/platform/openrc")
    con_ssh = install_helper.ssh_to_controller(controller_node.host_ip)
    controller_node.ssh_conn = con_ssh
    ControllerClient.set_active_controller(con_ssh)

    make_sure_all_hosts_locked(con_ssh)

    if backup_src.lower() == 'local':
        images_backup_path = "{}{}".format(HostLinuxUser.get_home(),
                                           images_backup_file)
        common.scp_from_test_server_to_active_controller(
            "{}/{}".format(backup_src_path, images_backup_file),
            HostLinuxUser.get_home())
    else:
        images_backup_path = "{}/{}".format(BackupRestore.USB_BACKUP_PATH,
                                            images_backup_file)

    LOG.info(
        "Images restore from backup file {} ...".format(images_backup_file))

    new_prompt = r'{}.*~.*\$ |controller\-0.*~.*\$ '.format(
        lab['name'].split('_')[0])
    LOG.info('set prompt to:{}'.format(new_prompt))
    con_ssh.set_prompt(new_prompt)

    install_helper.restore_controller_system_images(
        images_backup=images_backup_path,
        tel_net_session=controller_node.telnet_conn)
    # this is a workaround for CGTS-8190
    install_helper.update_auth_url(con_ssh)

    LOG.tc_step(
        "Verifying  restoring controller-0 is complete and is in available state ..."
    )
    LOG.debug('Wait for system ready in 60 seconds')
    time.sleep(60)

    timeout = HostTimeout.REBOOT + 60
    availability = HostAvailState.AVAILABLE
    is_available = system_helper.wait_for_hosts_states(
        controller0,
        availability=HostAvailState.AVAILABLE,
        fail_ok=True,
        timeout=timeout)
    if not is_available:
        LOG.warn(
            'After {} seconds, the first node:{} does NOT reach {}'.format(
                timeout, controller0, availability))
        LOG.info('Check if drbd is still synchronizing data')
        con_ssh.exec_sudo_cmd('drbd-overview')
        is_degraded = system_helper.wait_for_hosts_states(
            controller0,
            availability=HostAvailState.DEGRADED,
            fail_ok=True,
            timeout=300)
        if is_degraded:
            LOG.warn('Node: {} is degraded: {}'.format(
                controller0, HostAvailState.DEGRADED))
            con_ssh.exec_sudo_cmd('drbd-overview')
        else:
            LOG.fatal('Node:{} is NOT in Available nor Degraded status')
            # the customer doc does have wording regarding this situation, continue
            # assert False, 'Node:{} is NOT in Available nor Degraded status'

    # delete the system backup files from sysadmin home
    LOG.tc_step("Copying backup files to /opt/backups ... ")
    if backup_src.lower() == 'local':
        con_ssh.exec_cmd("rm -f {} {}".format(system_backup_path,
                                              images_backup_path))

        cmd_rm_known_host = r'sed -i "s/^[^#]\(.*\)"/#\1/g /etc/ssh/ssh_known_hosts; \sync'
        con_ssh.exec_sudo_cmd(cmd_rm_known_host)

        # transfer all backup files to /opt/backups from test server
        with con_ssh.login_as_root():
            con_ssh.scp_on_dest(source_user=TestFileServer.get_user(),
                                source_ip=TestFileServer.get_server(),
                                source_pswd=TestFileServer.get_password(),
                                source_path=backup_src_path + "/*",
                                dest_path=StxPath.BACKUPS + '/',
                                timeout=1200)

    else:
        # copy all backupfiles from USB to /opt/backups
        cmd = " cp  {}/* {}".format(BackupRestore.USB_BACKUP_PATH,
                                    StxPath.BACKUPS)
        con_ssh.exec_sudo_cmd(cmd, expect_timeout=600)

    LOG.tc_step("Checking if backup files are copied to /opt/backups ... ")
    assert int(con_ssh.exec_cmd("ls {} | wc -l".format(StxPath.BACKUPS))[1]) >= 2, \
        "Missing backup files in {}".format(StxPath.BACKUPS)

    if is_aio_lab:
        LOG.tc_step("Restoring Cinder Volumes ...")
        restore_volumes()

        LOG.tc_step('Run restore-complete (CGTS-9756)')
        cmd = 'echo "{}" | sudo -S config_controller --restore-complete'.format(
            HostLinuxUser.get_password())
        controller_node.telnet_conn.login()
        controller_node.telnet_conn.exec_cmd(
            cmd, extra_expects=[' will reboot on completion'])

        LOG.info('- wait untill reboot completes, ')
        time.sleep(120)
        LOG.info('- confirm the active controller is actually back online')
        controller_node.telnet_conn.login()

        LOG.tc_step(
            "reconnecting to the active controller after restore-complete")
        con_ssh = install_helper.ssh_to_controller(controller_node.host_ip)

        if not compute_configured:
            LOG.tc_step(
                'Latest 18.07 EAR1 or Old-load on AIO/CPE lab: config its '
                'compute functionalities')
            # install_helper.run_cpe_compute_config_complete(controller_node, controller0)

            # LOG.info('closing current ssh connection')
            # con_ssh.close()

            LOG.tc_step('Run restore-complete (CGTS-9756)')
            controller_node.telnet_conn.login()

            cmd = 'echo "{}" | sudo -S config_controller --restore-complete'.\
                format(HostLinuxUser.get_password())
            controller_node.telnet_conn.exec_cmd(cmd,
                                                 extra_expects=' will reboot ')
            controller_node.telnet_conn.close()

            LOG.info(
                'Wait until "config_controller" reboot the active controller')
            time.sleep(180)

            controller_node.telnet_conn = install_helper.open_telnet_session(
                controller_node)
            controller_node.telnet_conn.login()
            time.sleep(120)

            con_ssh = install_helper.ssh_to_controller(controller_node.host_ip)
            controller_node.ssh_conn = con_ssh

            ControllerClient.set_active_controller(con_ssh)

            host_helper.wait_for_hosts_ready(controller0)

        LOG.tc_step('Install the standby controller: {}'.format(controller1))
        if not is_sx:
            install_non_active_node(controller1, lab)

    elif len(lab['controller_nodes']) >= 2:
        LOG.tc_step('Install the standby controller: {}'.format(controller1))
        install_non_active_node(controller1, lab)

        boot_interfaces = lab['boot_device_dict']

        hostnames = system_helper.get_hosts()
        storage_hosts = [host for host in hostnames if 'storage' in host]
        compute_hosts = [
            host for host in hostnames
            if 'storage' not in host and 'controller' not in host
        ]

        if len(storage_hosts) > 0:
            # con_ssh.exec_sudo_cmd('touch /etc/ceph/ceph.client.None.keyring')
            for storage_host in storage_hosts:
                LOG.tc_step("Restoring {}".format(storage_host))
                install_helper.open_vlm_console_thread(
                    storage_host,
                    boot_interface=boot_interfaces,
                    vlm_power_on=True)

                LOG.info(
                    "Verifying {} is Locked, Diabled and Online ...".format(
                        storage_host))
                system_helper.wait_for_hosts_states(
                    storage_host,
                    administrative=HostAdminState.LOCKED,
                    operational=HostOperState.DISABLED,
                    availability=HostAvailState.ONLINE)

                LOG.info("Unlocking {} ...".format(storage_host))
                rc, output = host_helper.unlock_host(storage_host,
                                                     available_only=True)
                assert rc == 0, "Host {} failed to unlock: rc = {}, msg: {}".format(
                    storage_host, rc, output)

            LOG.info("Veryifying the Ceph cluster is healthy ...")
            storage_helper.wait_for_ceph_health_ok(timeout=600)

            LOG.info("Importing images ...")
            image_backup_files = install_helper.get_backup_files(
                IMAGE_BACKUP_FILE_PATTERN, StxPath.BACKUPS, con_ssh)
            LOG.info("Image backup found: {}".format(image_backup_files))
            imported = install_helper.import_image_from_backup(
                image_backup_files)
            LOG.info("Images successfully imported: {}".format(imported))

        LOG.tc_step("Restoring Cinder Volumes ...")
        restore_volumes()

        LOG.tc_step('Run restore-complete (CGTS-9756), regular lab')
        controller_node.telnet_conn.login()
        cmd = 'echo "{}" | sudo -S config_controller --restore-complete'.format(
            HostLinuxUser.get_password())
        controller_node.telnet_conn.exec_cmd(
            cmd, extra_expects='controller-0 login:'******'rebuild ssh connection')
        con_ssh = install_helper.ssh_to_controller(controller_node.host_ip)
        controller_node.ssh_conn = con_ssh

        LOG.tc_step("Restoring Compute Nodes ...")
        if len(compute_hosts) > 0:
            for compute_host in compute_hosts:
                LOG.tc_step("Restoring {}".format(compute_host))
                install_helper.open_vlm_console_thread(
                    compute_host,
                    boot_interface=boot_interfaces,
                    vlm_power_on=True)

                LOG.info(
                    "Verifying {} is Locked, Diabled and Online ...".format(
                        compute_host))
                system_helper.wait_for_hosts_states(
                    compute_host,
                    administrative=HostAdminState.LOCKED,
                    operational=HostOperState.DISABLED,
                    availability=HostAvailState.ONLINE)
                LOG.info("Unlocking {} ...".format(compute_host))
                rc, output = host_helper.unlock_host(compute_host,
                                                     available_only=True)
                assert rc == 0, "Host {} failed to unlock: rc = {}, msg: {}".format(
                    compute_host, rc, output)

        LOG.info("All nodes {} are restored ...".format(hostnames))
    else:
        LOG.warn('Only 1 controller, but not AIO lab!!??')

    LOG.tc_step("Delete backup files from {} ....".format(StxPath.BACKUPS))
    con_ssh.exec_sudo_cmd("rm -rf {}/*".format(StxPath.BACKUPS))

    LOG.tc_step('Perform post-restore testing/checking')
    post_restore_test(con_ssh)

    LOG.tc_step("Waiting until all alarms are cleared ....")
    timeout = 300
    healthy, alarms = system_helper.wait_for_all_alarms_gone(timeout=timeout,
                                                             fail_ok=True)
    if not healthy:
        LOG.warn('Alarms exist: {}, after waiting {} seconds'.format(
            alarms, timeout))
        rc, message = con_ssh.exec_sudo_cmd('drbd-overview')

        if rc != 0 or (r'[===>' not in message
                       and r'] sync\'ed: ' not in message):
            LOG.warn('Failed to get drbd-overview information')

        LOG.info('Wait for the system to be ready in {} seconds'.format(
            HostTimeout.REBOOT))
        system_helper.wait_for_all_alarms_gone(timeout=HostTimeout.REBOOT,
                                               fail_ok=False)

    LOG.tc_step("Verifying system health after restore ...")
    rc, failed = system_helper.get_system_health_query(con_ssh=con_ssh)
    assert rc == 0, "System health not OK: {}".format(failed)

    collect_logs()
コード例 #6
0
def test_standard_install(install_setup):
    """
     Configure the active controller

     Prerequisites:
         - pxeboot has been setup.
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Determine active controller
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Download configuration files, heat templates, images, and licenses
         - Configure controller-0, run lab_setup, and unlock controller-0
         - Add the other hosts
         - Boot the other hosts
         - Run lab setup
         - Unlock the other hosts
         - Run lab setup
         - Setup heat resources and clear any install related alarms
     """
    lab = install_setup["lab"]
    hosts = lab["hosts"]
    boot_device = lab['boot_device_dict']
    controller0_node = lab["controller-0"]
    final_step = install_setup["control"]["stop"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]
    guest_server = install_setup["servers"]["guest"]
    install_subcloud = install_setup.get("install_subcloud")
    helm_chart_server = install_setup["servers"]["helm_charts"]

    if final_step == '0' or final_step == "setup":
        skip("stopping at install step: {}".format(LOG.test_step))

    fresh_install_helper.install_controller(sys_type=SysType.REGULAR, patch_dir=patch_dir,
                                            patch_server_conn=patch_server.ssh_conn,
                                            init_global_vars=True)

    lab_files_server = install_setup["servers"]["lab_files"]
    lab_files_dir = install_setup["directories"]["lab_files"]
    build_server = install_setup["servers"]["build"]
    fresh_install_helper.download_lab_files(lab_files_server=lab_files_server,
                                            build_server=build_server,
                                            guest_server=guest_server, lab_files_dir=lab_files_dir,
                                            load_path=InstallVars.get_install_var("TIS_BUILD_DIR"),
                                            license_path=InstallVars.get_install_var("LICENSE"),
                                            guest_path=InstallVars.get_install_var('GUEST_IMAGE'),
                                            helm_chart_server=helm_chart_server)

    if install_subcloud:
        fresh_install_helper.configure_subcloud(controller0_node, install_setup["dc_system_controller"],
                                                subcloud=install_subcloud, final_step=final_step)
        deploy_mgr = True
    else:
        fresh_install_helper.configure_controller_(controller0_node)

        deploy_mgr = fresh_install_helper.use_deploy_manager(controller0_node, lab)

    if not deploy_mgr:
        fresh_install_helper.check_ansible_configured_mgmt_interface(controller0_node, lab)

        # fresh_install_helper.collect_lab_config_yaml(lab, build_server,
        #                                              stage=fresh_install_helper.DEPLOY_INTITIAL)

        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_active_controller(controller0_node)

        controller0_node.telnet_conn.hostname = r"controller\-[01]"
        controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)

    else:
        fresh_install_helper.wait_for_deploy_mgr_controller_config(controller0_node, lab=lab)

    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    if not deploy_mgr:
        fresh_install_helper.bulk_add_hosts(lab=lab, con_ssh=controller0_node.ssh_conn)
    else:
        fresh_install_helper.wait_for_deployment_mgr_to_bulk_add_hosts(controller0_node, lab=lab)

    fresh_install_helper.boot_hosts(boot_device)
    # fresh_install_helper.collect_lab_config_yaml(lab, build_server,
    #                                              stage=fresh_install_helper.DEPLOY_INTERIM)
    if not deploy_mgr:

        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)

        # Unlock controller-1
        fresh_install_helper.unlock_hosts(['controller-1'], con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)

        # Unlock computes
        fresh_install_helper.unlock_hosts([host_ for host_ in hosts if 'compute' in host_],
                                          con_ssh=controller0_node.ssh_conn)

        fresh_install_helper.send_arp_cmd()
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)

    else:
        fresh_install_helper.wait_for_deploy_mgr_lab_config(controller0_node, lab=lab)

        fresh_install_helper.wait_for_hosts_ready(hosts, lab=lab)
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)

    container_helper.wait_for_apps_status(apps='platform-integ-apps', timeout=1200,
                                          con_ssh=controller0_node.ssh_conn, status='applied')

    if lab.get("floating ip"):
        collect_sys_net_info(lab)
        setup_tis_ssh(lab)

    # fresh_install_helper.check_heat_resources(con_ssh=controller0_node.ssh_conn)
    if not deploy_mgr:
        fresh_install_helper.collect_lab_config_yaml(lab, build_server,
                                                     stage=fresh_install_helper.DEPLOY_LAST)
    if install_subcloud:
        fresh_install_helper.wait_for_subcloud_to_be_managed(install_subcloud, install_setup["dc_system_controller"],
                                                             lab=lab)
    fresh_install_helper.attempt_to_run_post_install_scripts()

    fresh_install_helper.reset_global_vars()
    fresh_install_helper.verify_install_uuid(lab)

    if deploy_mgr:
        fresh_install_helper.validate_deployment_mgr_install(controller0_node, lab)