def install_clone_setup():
    lab = InstallVars.get_install_var('LAB')
    LOG.info("Lab info; {}".format(lab))
    install_cloned_info = {
        'usb_verified': False,
        'build_server': None,
        'hostnames': [k for k, v in lab.items() if isinstance(v, node.Node)],
        'system_mode':
        'duplex' if len(lab['controller_nodes']) == 2 else "simplex"
    }

    controller_node = lab['controller-0']
    controller_conn = None
    extra_controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format(
        lab['name'].split('_')[0]) + '|' + Prompt.CONTROLLER_0
    local_client = LocalHostClient(connect=True)
    if local_client.ping_server(controller_node.host_ip,
                                fail_ok=True)[0] == 100:
        try:
            controller_conn = install_helper.ssh_to_controller(
                controller_node.host_ip,
                fail_ok=True,
                initial_prompt=extra_controller_prompt)
        except:
            LOG.info("SSH connection to {} not yet available yet ..".format(
                controller_node.name))

    if controller_conn:
        LOG.info("Connection established with controller-0 ....")
        ControllerClient.set_active_controller(ssh_client=controller_conn)
        if verify_usb(controller_conn):
            install_cloned_info['usb_verified'] = True

    bld_server = get_build_server_info(
        InstallVars.get_install_var('BUILD_SERVER'))

    LOG.info("Connecting to Build Server {} ....".format(bld_server['name']))
    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    bld_server_attr['prompt'] = r'{}@{}\:(.*)\$ '.format(
        TestFileServer.get_user(), bld_server['name'])

    bld_server_conn = install_helper.establish_ssh_connection(
        bld_server_attr['name'],
        user=TestFileServer.get_user(),
        password=TestFileServer.get_password(),
        initial_prompt=bld_server_attr['prompt'])

    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    install_cloned_info['build_server'] = bld_server_obj

    return install_cloned_info
예제 #2
0
def pytest_collectstart():
    """
    Set up the ssh session at collectstart. Because skipif condition is evaluated at the collecting test cases phase.
    """
    global con_ssh
    con_ssh = setups.setup_tis_ssh(InstallVars.get_install_var("LAB"))
    InstallVars.set_install_var(con_ssh=con_ssh)
    auth = setups.get_auth_via_openrc(con_ssh)
    if auth:
        CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh))

    Tenant.set_platform_url(CliAuth.get_var('OS_AUTH_URL'))
    Tenant.set_region(CliAuth.get_var('OS_REGION_NAME'))
예제 #3
0
def install_setup(request):
    lab = InstallVars.get_install_var("LAB")
    install_type = ProjVar.get_var('SYS_TYPE')
    if install_type != SysType.AIO_PLUS:
        skip(
            "The specified lab is not {} type. It is {} and use the appropriate test install script"
            .format(SysType.AIO_PLUS, install_type))

    hosts = vlm_helper.get_hostnames_from_consts(lab)
    lab["hosts"] = [h for h in hosts if 'storage' not in h]
    barcodes = vlm_helper.get_barcodes_from_hostnames(lab["hosts"])

    active_con = lab["controller-0"]

    LOG.tc_setup_start("{} install".format(install_type))

    LOG.fixture_step("Reserve hosts")
    hosts = lab["hosts"]
    LOG.info("Un-reserving {}".format(hosts))
    vlm_helper.force_unreserve_hosts(hosts)
    LOG.info("Reserving {}".format(hosts))
    for barcode in barcodes:
        vlm_helper._reserve_vlm_console(barcode, "AUTO: lab installation")

    unused_barcodes = vlm_helper.get_unused_barcodes(lab=lab)
    if len(unused_barcodes) > 0:
        vlm_helper.power_off_unused_nodes(unused_barcodes)

    LOG.fixture_step("Attempt to reset port on controller-0")
    fresh_install_helper.reset_controller_telnet_port(active_con)

    def install_cleanup():
        fresh_install_helper.install_teardown(lab, active_con)

    request.addfinalizer(install_cleanup)

    is_subcloud = InstallVars.get_install_var("INSTALL_SUBCLOUD") is not None

    _install_setup = fresh_install_helper.setup_fresh_install(
        lab, subcloud=is_subcloud)
    if InstallVars.get_install_var("RESUME"):
        try:
            if active_con.ssh_conn is None:
                active_con.ssh_conn = install_helper.establish_ssh_connection(
                    active_con.host_ip)
        except:
            pass

    return _install_setup
예제 #4
0
def get_ipv4_controller_0():
    lab = InstallVars.get_install_var('LAB')

    if InstallVars.get_install_var('IPV6_OAM'):
        ipv6 = lab['controller-0 ip']
        con0_v4_ip = ipv6.rsplit(':', maxsplit=1)[-1]

        if len(con0_v4_ip) == 4 and con0_v4_ip.startswith('1'):
            con0_v4_ip = '128.224.151.{}'.format(con0_v4_ip[-3:].lstrip("0"))
        else:
            con0_v4_ip = '128.224.150.{}'.format(con0_v4_ip[-3:])

        return con0_v4_ip
    else:
        return lab['controller-0 ip']
예제 #5
0
def setup_test_session(global_setup, request):
    """
    Setup primary tenant  before the first test gets executed.
    TIS ssh was already set up at collecting phase.
    """
    ProjVar.set_var(PRIMARY_TENANT='admin')
    ProjVar.set_var(SOURCE_OPENRC=True)
    setups.setup_primary_tenant('admin')

    con_ssh = setups.setup_tis_ssh(InstallVars.get_install_var("LAB"))
    ControllerClient.set_active_controller(ssh_client=con_ssh)

    # set build id to be used to upload/write test results
    setups.set_session(con_ssh=con_ssh)

    def set_build_vars():
        try:
            setups.copy_test_files()

            # set build id to be used to upload/write test results
            setups.set_build_info(con_ssh)
        except:
            LOG.warning('Unable to set BUILD info')
            pass

    set_build_vars()
    request.addfinalizer(set_build_vars)
예제 #6
0
    def select(self,
               telnet_conn=None,
               index=None,
               pattern=None,
               tag=None,
               curser_move=1):
        if isinstance(tag, str):
            tag_dict = {
                "os": "centos",
                "security": "standard",
                "type": None,
                "console": "serial"
            }

            if "security" in tag or "extended" in tag:
                tag_dict["security"] = "extended"
                if InstallVars.get_install_var("LOW_LATENCY"):
                    tag_dict["type"] = "lowlatency"
                else:
                    install_type = ProjVar.get_var("SYS_TYPE")
                    if install_type == SysType.AIO_SX or install_type == SysType.AIO_DX:
                        tag_dict["type"] = "cpe"
                    elif install_type == SysType.REGULAR or install_type == SysType.STORAGE:
                        tag_dict["type"] = "standard"
            else:
                tag_dict["type"] = tag
            tag = tag_dict

        super().select(telnet_conn=telnet_conn,
                       index=index,
                       pattern=pattern,
                       tag=tag,
                       curser_move=curser_move)
예제 #7
0
def check_for_upgrade_abort():
    upgrade_info = dict()
    lab = InstallVars.get_install_var('LAB')
    upgrade_info['LAB'] = lab
    table_ = upgrade_helper.system_upgrade_show()[1]
    print("Upgrade show {}".format(table_))
    if "No upgrade in progress" in table_:
        LOG.warning("No upgrade in progress, cannot be aborted")
        return 1, None

    upgrade_release = table_parser.get_value_two_col_table(
        table_, "to_release")
    current_release = table_parser.get_value_two_col_table(
        table_, "from_release")
    upgraded_hostnames = upgrade_helper.get_upgraded_host_names(
        upgrade_release)
    upgraded = len(upgraded_hostnames)
    upgrade_info['current_release'] = current_release
    upgrade_info['upgrade_release'] = upgrade_release
    upgrade_info['upgraded_hostnames'] = upgraded_hostnames

    if upgraded >= 2:
        LOG.warning(
            "Both controllers are upgraded; Full system installation required to abort"
            ": {} ".format(upgraded_hostnames))
        return 2, upgrade_info
    elif upgraded == 1:
        LOG.warning(
            "Only one  controller is upgraded; In service abort is possible: "
            "{} ".format(upgraded_hostnames))
        return 0, upgrade_info
    else:
        LOG.warning("No host is upgraded. ")
        return 3, upgrade_info
예제 #8
0
def pytest_runtest_teardown():
    lab = InstallVars.get_install_var('LAB')
    hostnames = [k for k, v in lab.items() if isinstance(v, node.Node)]
    vlm_helper.unreserve_hosts(hostnames)
    con_ssh = ControllerClient.get_active_controller()
    # Delete any backup files from /opt/backups
    con_ssh.exec_sudo_cmd("rm -rf /opt/backups/*")
    con_ssh.flush()
예제 #9
0
def install_setup(request):
    lab = InstallVars.get_install_var("LAB")
    install_type = ProjVar.get_var('SYS_TYPE')
    if install_type != SysType.AIO_DX:
        skip(
            "The specified lab is not {} type. It is {} and use the appropriate test install script"
            .format(SysType.AIO_DX, install_type))

    lab["hosts"] = vlm_helper.get_hostnames_from_consts(lab)
    barcodes = vlm_helper.get_barcodes_from_hostnames(lab["hosts"])

    active_con = lab["controller-0"]

    LOG.tc_setup_start("{} install".format(install_type))

    LOG.fixture_step("Reserve hosts")
    hosts = lab["hosts"]
    LOG.info("Un-reserving {}".format(hosts))
    vlm_helper.force_unreserve_hosts(hosts)
    LOG.info("Reserving {}".format(hosts))
    for barcode in barcodes:
        vlm_helper._reserve_vlm_console(barcode, "AUTO: lab installation")

    LOG.fixture_step("Attempt to reset port on controller-0")
    fresh_install_helper.reset_controller_telnet_port(active_con)

    def install_cleanup():
        fresh_install_helper.install_teardown(lab, active_con)

    request.addfinalizer(install_cleanup)

    is_subcloud = InstallVars.get_install_var("INSTALL_SUBCLOUD") is not None

    _install_setup = fresh_install_helper.setup_fresh_install(
        lab, subcloud=is_subcloud)
    resume_step = InstallVars.get_install_var("RESUME")
    if resume_step and resume_step not in \
            ["setup", "install_controller", "configure_controller", "download_lab_files"]:
        try:
            if active_con.ssh_conn is None:
                active_con.ssh_conn = install_helper.ssh_to_controller(
                    active_con.host_ip)
        except:
            pass

    return _install_setup
예제 #10
0
def pre_download_setup():

    lab = InstallVars.get_install_var('LAB')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)

    bld_server = get_build_server_info(
        InstallVars.get_install_var('BUILD_SERVER'))

    output_dir = ProjVar.get_var('LOG_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)
    load_path = BuildServerPath.LATEST_HOST_BUILD_PATHS[current_version]

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']

    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])

    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    _download_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
    }

    return _download_setup
예제 #11
0
def test_restore_simplex_install(install_setup):
    """
     Complete fresh_install steps for a simplex lab
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Boot controller-0
         - Run restore controller-0
         - Unlock controller-0
     """
    lab = install_setup["lab"]
    controller0_node = lab["controller-0"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]

    do_boot_c0 = RestoreVars.get_restore_var('RESTORE_PRE_BOOT_CONTROLLER0')
    stop_before_ansible_restore =\
        RestoreVars.get_restore_var('STOP_BEFORE_ANSIBLE_RESTORE')

    if do_boot_c0:
        fresh_install_helper.install_controller(
            sys_type=SysType.AIO_SX,
            patch_dir=patch_dir,
            patch_server_conn=patch_server.ssh_conn,
            init_global_vars=True)
    else:
        LOG.tc_step("Skipping controller-0 install")

    if stop_before_ansible_restore:
        skip("Stopping test before restoring")

    if InstallVars.get_install_var('IPV6_OAM'):
        restore_helper.setup_ipv6_oam(controller0_node)

    restore_helper.restore_platform()

    fresh_install_helper.unlock_active_controller(controller0_node)
    controller0_node.telnet_conn.hostname = r"controller\-[01]"
    controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)

    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(
            controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    if lab.get("floating ip"):
        setup_tis_ssh(lab)

    fresh_install_helper.wait_for_hosts_ready(controller0_node.name, lab=lab)

    fresh_install_helper.reset_global_vars()
예제 #12
0
def test_system_fip():
    lab = InstallVars.get_install_var("LAB")
    res_dict = setups.collect_sys_net_info(lab)
    retry = False
    for res in res_dict.values():
        if not res:
            retry = True
            break
    if retry:
        res_dict = setups.collect_sys_net_info(lab)
        for key, value in res_dict.items():
            assert value, "could not {}".format(key)
예제 #13
0
def test_system_telnet():
    lab = InstallVars.get_install_var("LAB")
    node_obj = lab["controller-0"]
    install_helper.power_on_host(node_obj.name, wait_for_hosts_state_=False)
    telnet_conn = install_helper.open_telnet_session(
        node_obj) if node_obj.telnet_conn is None else node_obj.telnet_conn
    telnet_conn.send("\r\n")
    index = telnet_conn.expect(["ogin:", telnet_conn.prompt], fail_ok=True)
    if index < 0:
        telnet_conn.send_control("\\")
        telnet_conn.expect(["anonymous:.+:PortCommand> "])
        telnet_conn.send("resetport")
        telnet_conn.send("\r\n")
        telnet_conn.login()
예제 #14
0
def pytest_configure(config):

    # Lab fresh_install params
    lab_arg = config.getoption('lab')
    controller0_ceph_mon_device = config.getoption('ceph_mon_dev_controller_0')
    controller1_ceph_mon_device = config.getoption('ceph_mon_dev_controller_1')
    ceph_mon_gib = config.getoption('ceph_mon_gib')
    build_server = config.getoption('build_server')
    boot_server = config.getoption('boot_server')
    patch_dir = config.getoption('patch_dir')

    setups.set_install_params(installconf_path=None, lab=lab_arg,
                              controller0_ceph_mon_device=controller0_ceph_mon_device,
                              controller1_ceph_mon_device=controller1_ceph_mon_device,
                              ceph_mon_gib=ceph_mon_gib,
                              patch_dir=patch_dir,
                              build_server=build_server, boot_server=boot_server)
    print(" Pre Configure Install vars: {}".format(InstallVars.get_install_vars()))
    print("")
예제 #15
0
def pre_system_clone():

    lab = InstallVars.get_install_var('LAB')

    LOG.info("Preparing lab for system clone....")

    if 'compute_nodes' in lab.keys() or 'storage_nodes' in lab.keys():
        skip(
            "The system {} is not All-in-one; clone is supported only for AIO systems"
            .format(lab))

    assert system_helper.get_active_controller_name(
    ) == 'controller-0', "controller-0 is not the active controller"
    LOG.fixture_step(
        "Checking if a USB flash drive is plugged in controller-0 node... ")
    usb_device = install_helper.get_usb_device_name()
    assert usb_device, "No USB found in controller-0"

    usb_size = install_helper.get_usb_disk_size(usb_device)
    LOG.info("Size of {} = {}".format(usb_device, usb_size))
    if not (usb_size >= 5):
        skip("Insufficient size in {} which is {}; at least 8G is required.".
             format(usb_device, usb_size))
예제 #16
0
def pytest_runtest_teardown(item):
    install_testcases = [
        "test_simplex_install.py", "test_duplex_install.py",
        "test_standard_install.py", "test_storage_install.py",
        "test_distributed_cloud_install.py", "test_duplex_plus_install.py"
    ]
    for install_testcase in install_testcases:
        if install_testcase in item.nodeid:
            final_step = LOG.test_step
            lab = InstallVars.get_install_var("LAB")
            progress_dir = ProjVar.get_var("LOG_DIR") + "/.."
            progress_file_path = progress_dir + "/{}_install_progress.txt".format(
                lab["short_name"])

            LOG.info("Writing install step to {}".format(progress_file_path))
            with open(progress_file_path, "w+") as progress_file:
                progress_file.write(item.nodeid + "\n")
                progress_file.write("End step: {}".format(str(final_step)))

            os.chmod(progress_file_path, 0o755)
            LOG.info(
                "Done Writing install step to {}".format(progress_file_path))
            break
예제 #17
0
    def find_options(self,
                     telnet_conn,
                     end_of_menu=r"(utomatic(ally)?( boot)? in)".encode(),
                     option_identifier=r"[A-Z][A-Za-z]".encode(),
                     newline=r'(\x1b\[\d+;\d+H)+'.encode()):

        output = telnet_conn.read_until(end_of_menu, 5)
        self.options = [
            KickstartOption(name="Boot from hard drive", index=0),
            KickstartOption(name="WRL Serial Controller Install", index=1),
            KickstartOption(name="CentOS Serial Controller Install", index=2),
            KickstartOption(name="WRL Serial CPE Install", index=3),
            KickstartOption(name="CentOS Serial CPE Install", index=4),
            KickstartOption(name="Security Profile Enabled Boot Options",
                            index=4)
        ]
        current_option = self.options[0]
        security_type = InstallVars.get_install_var("SECURITY")
        if security_type == 'extended':
            security_menu = KickstartMenu(
                name="PXE Security Menu",
                kwargs=bios.BootMenus.Kickstart.Security)
            self.sub_menus.append(security_menu)
            self.sub_menus.options = [
                KickstartOption(name="CentOS Serial Controller Install",
                                index=0),
                KickstartOption(name="CentOS Serial CPE Install", index=1)
            ]
            LOG.info("HP380 sub menu added: {}".format(self.sub_menus[0].name))

        LOG.info("HP380 current option: {}; index {}".format(
            current_option.name, current_option.index))
        self.index = current_option.index

        LOG.debug("{} options are: {}".format(
            self.name, [option.name for option in self.options]))
예제 #18
0
    def __init__(self, lab_name=None):
        if lab_name is None:
            lab = InstallVars.get_install_var("LAB")
            lab_name = lab["name"]
        lab_name = lab_name.lower()
        LOG.debug("Lab name: {}".format(lab_name))
        menus = bios.BiosMenus
        lab_menu_dict = {
            'wolfpass|wildcat|grizzly': menus.American_Megatrends,
            'hp': menus.HP,
            'ironpass': menus.Ironpass,
            'ml350': menus.ml350,
            'r730|r430': menus.PowerEdge,
            'r720': menus.Phoenix,
            'supermicro': menus.Supermicro,
        }
        for k, v in lab_menu_dict.items():
            if re.search(k, lab_name):
                bios_menu_dict = v
                break
        else:
            raise NotImplementedError('{} not handled'.format(lab_name))

        super().__init__(name=bios_menu_dict["name"], kwargs=bios_menu_dict)
예제 #19
0
def pytest_configure(config):

    # Lab fresh_install params
    lab_arg = config.getoption('lab')
    resume_install = config.getoption('resumeinstall')
    skiplist = config.getoption('skiplist')
    wipedisk = config.getoption('wipedisk')
    controller0_ceph_mon_device = config.getoption('ceph_mon_dev_controller_0')
    controller1_ceph_mon_device = config.getoption('ceph_mon_dev_controller_1')
    ceph_mon_gib = config.getoption('ceph_mon_gib')
    install_conf = config.getoption('installconf')
    lab_file_dir = config.getoption('file_dir')
    build_server = config.getoption('build_server')
    boot_server = config.getoption('boot_server')
    tis_build_dir = config.getoption('tis_build_dir')
    tis_builds_dir = config.getoption('tis_builds_dir')
    install_license = config.getoption('upgrade_license')
    heat_templates = config.getoption('heat_templates')
    guest_image = config.getoption('guest_image_path')
    boot_type = config.getoption('boot_list')
    iso_path = config.getoption('iso_path')
    low_lat = config.getoption('low_latency')
    security = config.getoption('security')
    controller = config.getoption('controller')
    compute = config.getoption('compute')
    storage = config.getoption('storage')
    stop_step = config.getoption('stop_step')
    drop_num = config.getoption('drop_num')
    patch_dir = config.getoption('patch_dir')
    kubernetes = config.getoption('kubernetes_config')
    no_openstack = config.getoption('no_openstack')
    deploy_openstack_from_controller_1 = config.getoption(
        'deploy_openstack_from_controller_1')
    dc_ipv6 = config.getoption('dc_ipv6')
    helm_chart_path = config.getoption('helm_chart_path')
    no_manage = config.getoption('no_manage')
    extract_deploy_config = config.getoption('extract_deploy_config')
    vswitch_type = config.getoption('vswitch_type')
    ipv6_oam = config.getoption('ipv6_oam')
    subcloud_host = config.getoption('subcloud_host')

    # Restore parameters
    backup_src_path = config.getoption('backup_path')
    has_wipe_ceph_osds = config.getoption('has_wipe_ceph_osds')
    wipe_ceph_osds = config.getoption('wipe_ceph_osds')
    restore_pre_boot_controller0 = config.getoption(
        'restore_pre_boot_controller0')
    stop_before_ansible_restore = config.getoption(
        'stop_before_ansible_restore')

    RestoreVars.set_restore_var(backup_src_path=backup_src_path)
    RestoreVars.set_restore_var(has_wipe_ceph_osds=has_wipe_ceph_osds)
    RestoreVars.set_restore_var(wipe_ceph_osds=wipe_ceph_osds)
    RestoreVars.set_restore_var(
        restore_pre_boot_controller0=restore_pre_boot_controller0)
    RestoreVars.set_restore_var(
        stop_before_ansible_restore=stop_before_ansible_restore)

    if not lab_arg:
        raise ValueError("Lab name must be provided")

    vswitch_types = [
        VSwitchType.OVS, VSwitchType.OVS_DPDK, VSwitchType.AVS,
        VSwitchType.NONE
    ]
    if vswitch_type not in vswitch_types:
        raise ValueError(
            "Invalid vswitch type {}; Valid types are: {} ".format(
                vswitch_type, vswitch_types))

    lab_dict = setups.get_lab_dict(lab_arg)
    lab_name = lab_dict['name']
    if 'yow' in lab_name:
        lab_name = lab_name[4:]

    if subcloud_host:
        is_subcloud = False
        sublcoud_name = None
        dc_float_ip = None
        dc_lab_name = None
    else:
        is_subcloud, sublcoud_name, dc_float_ip, dc_lab_name = setups.is_lab_subcloud(
            lab_dict, ipv6=ipv6_oam)
        if is_subcloud and 'yow' in dc_lab_name:
            dc_lab_name = dc_lab_name[4:]

    if resume_install is True:
        resume_install = fresh_install_helper.get_resume_step(lab_dict)
        LOG.info("Resume Install step at {}".format(resume_install))

    if not install_conf:
        build_server = build_server if build_server else BuildServerPath.DEFAULT_BUILD_SERVER
        if not tis_builds_dir and not tis_build_dir:
            # Take latest master load from cengn
            host_build_dir_path = BuildServerPath.DEFAULT_HOST_BUILD_PATH
        elif tis_build_dir and os.path.isabs(tis_build_dir):
            host_build_dir_path = tis_build_dir
        else:
            # Take in-house StarlingX_Upstream_build
            tis_builds_dir = tis_builds_dir if tis_builds_dir else ''
            tis_build_dir = tis_build_dir if tis_build_dir else BuildServerPath.LATEST_BUILD
            host_build_dir_path = os.path.join(
                BuildServerPath.DEFAULT_WORK_SPACE, tis_builds_dir,
                tis_build_dir)

        host_build_dir_path = os.path.normpath(host_build_dir_path)
        if host_build_dir_path.endswith('/latest_build'):
            build_id = build_info.get_latest_host_build_dir(
                build_server=build_server,
                latest_build_simlink=host_build_dir_path)
            host_build_dir_path = host_build_dir_path[:-len('latest_build'
                                                            )] + build_id

        files_server = build_server
        if lab_file_dir:
            if lab_file_dir.find(":/") != -1:
                files_server = lab_file_dir[:lab_file_dir.find(":/")]
                lab_file_dir = lab_file_dir[lab_file_dir.find(":") + 1:]
            if not os.path.isabs(lab_file_dir):
                lab_file_dir = "{}/lab/yow/{}".format(host_build_dir_path,
                                                      lab_file_dir)
        else:
            lab_file_dir = "{}/lab/yow/{}".format(host_build_dir_path, lab_name if lab_name else '') \
                if not is_subcloud else "{}/lab/yow/{}".format(host_build_dir_path, dc_lab_name if dc_lab_name else '')

        if not heat_templates:
            if BuildServerPath.BldsDirNames.TC_19_05_BUILD in host_build_dir_path:
                heat_templates = os.path.join(
                    BuildServerPath.EAR_HOST_BUILD_PATH,
                    BuildServerPath.HEAT_TEMPLATES)
            else:
                heat_templates = os.path.join(
                    BuildServerPath.STX_HOST_BUILDS_DIR, 'latest_full_build',
                    BuildServerPath.HEAT_TEMPLATES)
        elif not os.path.isabs(heat_templates):
            heat_templates = os.path.join(host_build_dir_path, heat_templates)

        if not helm_chart_path:
            helm_path_in_build = BuildServerPath.STX_HELM_CHARTS_CENGN \
                if '/import/' in host_build_dir_path or '19.05' \
                in host_build_dir_path else BuildServerPath.TITANIUM_HELM_CHARTS

            helm_chart_path = os.path.join(host_build_dir_path,
                                           helm_path_in_build)

        if boot_type.lower() in ('usb_burn', 'pxe_iso',
                                 'iso_feed') and not iso_path:
            iso_path_in_build = BuildServerPath.ISO_PATH_CENGN \
                if '/import/' in host_build_dir_path \
                else BuildServerPath.ISO_PATH
            iso_path = os.path.join(host_build_dir_path, iso_path_in_build)

        install_conf = setups.write_installconf(
            lab=lab_arg,
            controller=controller,
            compute=compute,
            storage=storage,
            lab_files_dir=lab_file_dir,
            patch_dir=patch_dir,
            tis_build_dir=host_build_dir_path,
            build_server=build_server,
            files_server=files_server,
            license_path=install_license,
            guest_image=guest_image,
            heat_templates=heat_templates,
            boot=boot_type,
            iso_path=iso_path,
            security=security,
            low_latency=low_lat,
            stop=stop_step,
            vswitch_type=vswitch_type,
            boot_server=boot_server,
            resume=resume_install,
            skip=skiplist,
            kubernetes=kubernetes,
            helm_chart_path=helm_chart_path)

        setups.set_install_params(
            lab=lab_arg,
            skip=skiplist,
            resume=resume_install,
            wipedisk=wipedisk,
            drop=drop_num,
            installconf_path=install_conf,
            controller0_ceph_mon_device=controller0_ceph_mon_device,
            controller1_ceph_mon_device=controller1_ceph_mon_device,
            ceph_mon_gib=ceph_mon_gib,
            boot=boot_type,
            iso_path=iso_path,
            security=security,
            low_latency=low_lat,
            stop=stop_step,
            patch_dir=patch_dir,
            vswitch_type=vswitch_type,
            boot_server=boot_server,
            dc_float_ip=dc_float_ip,
            ipv6_oam=ipv6_oam,
            install_subcloud=sublcoud_name,
            kubernetes=kubernetes,
            no_openstack=no_openstack,
            dc_ipv6=dc_ipv6,
            helm_chart_path=helm_chart_path,
            no_manage=no_manage,
            deploy_openstack_from_controller_1=
            deploy_openstack_from_controller_1,
            extract_deploy_config=extract_deploy_config,
            subcloud_host=subcloud_host)

    frame_str = '*' * len('Install Arguments:')
    args = "\n\n{}\nInstall Arguments:\n{}\n".format(frame_str, frame_str)
    install_vars = InstallVars.get_install_vars()
    bs = install_vars['BUILD_SERVER']
    for var, value in install_vars.items():
        if (not value and value != 0) or (value == bs
                                          and var != 'BUILD_SERVER'):
            continue
        elif var == 'LAB':
            for k, v in dict(value).items():
                if re.search('_nodes| ip', k):
                    args += "\n{:<20}: {}".format(k, v)
        else:
            args += "\n{:<20}: {}".format(var, value)
    args += "\n{:<20}: {}\n".format('LOG_DIR', ProjVar.get_var('LOG_DIR'))
    LOG.info(args)

    if resume_install:
        try:
            con0_ip = install_vars.get('LAB', {}).get('controller-0 ip')
            if con0_ip:
                with host_helper.ssh_to_host(con0_ip, timeout=60) as con0_ssh:
                    setups.set_build_info(con_ssh=con0_ssh)
                    setups.set_session(con_ssh=con0_ssh)
        except:
            pass
예제 #20
0
def get_lab_dict():
    return InstallVars.get_install_var('LAB')
예제 #21
0
    def find_options(
        self,
        telnet_conn,
        end_of_menu=r"([A|a]utomatic(ally)?( boot)? in)|Press (\[Tab\]|\'e\') to edit"
        .encode(),
        option_identifier=r"(\dm?\)\s+[\w]+)|Boot from hard drive\s+|([\w]+\s)+\s+> "
        .encode(),
        newline=r'(\x1b\[\d+;\d+H)+'.encode()):

        super().find_options(telnet_conn,
                             end_of_menu=end_of_menu,
                             option_identifier=option_identifier,
                             newline=newline)
        # TODO: this is a wasteful way to initialize the Options.
        self.options = [
            KickstartOption(name=option.name,
                            index=option.index,
                            key=option.key) for option in self.options
        ]

        for option in self.options:
            # TODO: would like to make this more general, but it's impossible to determine the prompt
            #LOG.info("Kickstart option before match: {}".format(option.name))
            # matches = re.search(r"\s([A-Za-z\-\(\)]{2,}\s)+\s", option.name)
            # if matches:
            #     option_name = matches.group(0).strip()

            LOG.info("Kickstart option no match: {} tag: {} ".format(
                option.name.lower(), option.tag))
            if "security" in option.name.lower() and (
                    "  >" in option.name.lower()
                    or "options" in option.name.lower()):
                security_type = InstallVars.get_install_var("SECURITY")
                if security_type == 'extended':
                    security_menu = KickstartMenu(
                        name="PXE Security Menu",
                        kwargs=bios.BootMenus.Kickstart.Security)
                    self.sub_menus.append(security_menu)

            if "controller configuration" in option.name.lower(
            ) and "  >" in option.name.lower():
                if not self.get_sub_menu("Controller Configuration"):

                    kwargs = {
                        'name':
                        'Controller Configuration',
                        'prompt':
                        bios.BootMenus.Kickstart.
                        Controller_Configuration['prompt'].encode(),
                        'option_identifiers':
                        "Serial|Graphical"
                    }
                    controller_sub_menu = KickstartMenu(name=kwargs['name'],
                                                        kwargs=kwargs)

                    controller_sub_menu.options = [
                        KickstartOption(name="Serial Console",
                                        index=0,
                                        key="Enter"),
                        KickstartOption(name="Graphical Console",
                                        index=1,
                                        key="Enter")
                    ]

                    self.sub_menus.append(controller_sub_menu)
                    LOG.info("Kickstart sub menu added to menu {}: {}".format(
                        self.name, controller_sub_menu.name))

            if "console" in option.name.lower() and "  >" in option.name.lower(
            ):
                if not self.get_sub_menu("Console"):

                    kwargs = {
                        'name':
                        'Console',
                        'prompt':
                        bios.BootMenus.Kickstart.Console['prompt'].encode(),
                        'option_identifiers':
                        "STANDARD|EXTENDED"
                    }

                    console_sub_menu = KickstartMenu(name="Console",
                                                     kwargs=kwargs)

                    console_sub_menu.options = [
                        KickstartOption(name="STANDARD Security",
                                        index=0,
                                        key="Enter"),
                        KickstartOption(name="EXTENDED Security",
                                        index=1,
                                        key="Enter")
                    ]

                    self.sub_menus.append(console_sub_menu)
                    LOG.info("Kickstart sub menu added: {}".format(
                        console_sub_menu.name))

        current_option = self.get_current_option(telnet_conn)
        LOG.info("Current option = {} index = {}".format(
            current_option.name, current_option.index))
        self.index = current_option.index
예제 #22
0
def _test_storage_profile(personality, from_backing, to_backing):
    """
    This test creates a storage profile and then applies it to a node with
    identical hardware, assuming one exists.

    Storage profiles do not apply on controller nodes.  Storage profiles can be
    applied on controller+compute nodes, compute nodes and storage nodes.

    Arguments:
    - personality (string) - controller, compute or storage
    - from_backing (string) - image, remote or None
    - to_backing (string) - image, remote or None

    Test Steps:
    1.  Query system and determine which nodes have compatible hardware.
    2.  Create a storage profile on one of those nodes
    3.  Apply the created storage profile on a compatible node*
    4.  Ensure the storage profiles have been successfully applied.

    * If the node is a compute node or a controller+compute, we will also change
      the backend if required for additional coverage.

    Returns:
    - Nothing
    """

    global PROFILES_TO_DELETE
    PROFILES_TO_DELETE = []

    # Skip if test is not applicable to hardware under test
    if personality == 'controller' and not system_helper.is_aio_system():
        skip("Test does not apply to controller hosts without subtype compute")

    hosts = system_helper.get_hosts(personality=personality)
    if not hosts:
        skip("No hosts of type {} available".format(personality))

    if (from_backing == "remote" or to_backing
            == "remote") and not system_helper.is_storage_system():
        skip("This test doesn't apply to systems without storage hosts")

    LOG.tc_step("Identify hardware compatible hosts")
    hash_to_hosts = get_hw_compatible_hosts(hosts)

    # Pick the hardware group that has the most compatible hosts
    current_size = 0
    candidate_hosts = []
    for value in hash_to_hosts:
        candidate_size = len(hash_to_hosts[value])
        if candidate_size > current_size:
            current_size = candidate_size
            candidate_hosts = hash_to_hosts[value]
    LOG.info(
        "This is the total set of candidate hosts: {}".format(candidate_hosts))

    if len(candidate_hosts) < 2:
        skip("Insufficient hardware compatible hosts to run test")

    # Rsync lab setup dot files between controllers
    con_ssh = ControllerClient.get_active_controller()
    _rsync_files_to_con1(con_ssh=con_ssh, file_to_check="force.txt")

    # Take the hardware compatible hosts and check if any of them already have
    # the backend that we want.  This will save us test time.
    new_to_backing = None
    if personality == "compute":
        from_hosts = []
        to_hosts = []
        for host in candidate_hosts:
            host_backing = host_helper.get_host_instance_backing(host)
            if host_backing == from_backing:
                from_hosts.append(host)
            elif host_backing == to_backing:
                to_hosts.append(host)
            else:
                pass
        LOG.info(
            "Candidate hosts that already have the right from backing {}: {}".
            format(from_backing, from_hosts))
        LOG.info(
            "Candidate hosts that already have the right to backing {}: {}".
            format(to_backing, to_hosts))

        # Determine what hosts to use
        if not from_hosts and to_hosts:
            to_host = random.choice(to_hosts)
            candidate_hosts.remove(to_host)
            from_host = random.choice(candidate_hosts)
        elif not to_hosts and from_hosts:
            from_host = random.choice(from_hosts)
            candidate_hosts.remove(from_host)
            to_host = random.choice(candidate_hosts)
        elif not to_hosts and not from_hosts:
            to_host = random.choice(candidate_hosts)
            candidate_hosts.remove(to_host)
            from_host = random.choice(candidate_hosts)
        else:
            to_host = random.choice(to_hosts)
            from_host = random.choice(from_hosts)

        LOG.info("From host is: {}".format(from_host))
        LOG.info("To host is: {}".format(to_host))

        LOG.tc_step(
            "Check from host backing and convert to {} if necessary".format(
                from_backing))
        host_helper.set_host_storage_backing(from_host, from_backing)
        system_helper.wait_for_host_values(
            from_host,
            availability=HostAvailState.AVAILABLE,
            timeout=120,
            fail_ok=False)

        LOG.tc_step(
            "Check to host backing and convert to {} if necessary".format(
                to_backing))
        new_to_backing = host_helper.set_host_storage_backing(
            to_host, to_backing)
    elif personality == "controller":
        # For now, we don't want to host reinstall controller-0 since it will default to
        # pxeboot, but this could be examined as a possible enhancement.
        from_host = "controller-0"
        to_host = "controller-1"

        LOG.info("From host is: {}".format(from_host))
        LOG.info("To host is: {}".format(to_host))

        LOG.tc_step(
            "Check from host backing and convert to {} if necessary".format(
                from_backing))
        host_helper.set_host_storage_backing(from_host, from_backing)

        LOG.tc_step(
            "Check to host backing and convert to {} if necessary".format(
                to_backing))
        new_to_backing = host_helper.set_host_storage_backing(
            to_host, to_backing)
    else:
        # Backing doesn't apply to storage nodes so just pick from compatible hardware
        from_host = random.choice(candidate_hosts)
        candidate_hosts.remove(from_host)
        to_host = random.choice(candidate_hosts)

    LOG.tc_step(
        "Create storage and interface profiles on the from host {}".format(
            from_host))
    prof_name = 'storprof_{}_{}'.format(
        from_host, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
    storage_helper.create_storage_profile(from_host, profile_name=prof_name)
    PROFILES_TO_DELETE.append(prof_name)

    # Deleting VMs in case the remaining host(s) cannot handle all VMs
    # migrating on lock, particularly important in the case of AIO-DX systems.
    LOG.tc_step(
        "Delete all VMs and lock the host before applying the storage profile")
    vm_helper.delete_vms()
    HostsToRecover.add(to_host, scope='function')
    system_helper.wait_for_host_values(from_host,
                                       availability=HostAvailState.AVAILABLE,
                                       timeout=120,
                                       fail_ok=False)
    system_helper.wait_for_host_values(to_host,
                                       availability=HostAvailState.AVAILABLE,
                                       timeout=120,
                                       fail_ok=False)

    # Negative test #1 - attempt to apply profile on unlocked host (should be rejected)
    LOG.tc_step('Apply the storage-profile {} onto unlocked host:{}'.format(
        prof_name, to_host))
    cmd = 'host-apply-storprofile {} {}'.format(to_host, prof_name)
    rc, msg = cli.system(cmd, fail_ok=True)
    assert rc != 0, msg
    host_helper.lock_host(to_host, swact=True)

    # 3 conditions to watch for: no partitions, ready partitions and in-use
    # partitions on the compute.  If in-use, delete and freshly install host.
    # If ready, delete all ready partitions to make room for potentially new
    # partitions.  If no partitions, just delete nova-local lvg.
    if personality == "compute":

        # Negative test #2 - attempt to apply profile onto host with existing
        # nova-local (should be rejected)
        LOG.tc_step(
            'Apply the storage-profile {} onto host with existing nova-local:{}'
            .format(prof_name, to_host))
        cmd = 'host-apply-storprofile {} {}'.format(to_host, prof_name)
        rc, msg = cli.system(cmd, fail_ok=True)
        assert rc != 0, msg

        # If we were simply switching backing (without applying a storage
        # profile), the nova-local lvg deletion can be omitted according to design
        LOG.tc_step("Delete nova-local lvg on to host {}".format(to_host))
        cli.system("host-lvg-delete {} nova-local".format(to_host))

        in_use = storage_helper.get_host_partitions(to_host, "In-Use")

        if in_use:

            # Negative test #3 - attempt to apply profile onto host with existing
            # in-use partitions (should be rejected)
            LOG.tc_step('Apply the storage-profile {} onto host with existing \
                         in-use partitions:{}'.format(prof_name, to_host))
            cmd = 'host-apply-storprofile {} {}'.format(to_host, prof_name)
            rc, msg = cli.system(cmd, fail_ok=True)
            assert rc != 0, msg

            LOG.tc_step(
                "In-use partitions found.  Must delete the host and freshly install before proceeding."
            )
            LOG.info("Host {} has in-use partitions {}".format(
                to_host, in_use))
            lab = InstallVars.get_install_var("LAB")
            lab.update(create_node_dict(lab['compute_nodes'], 'compute'))
            lab['boot_device_dict'] = create_node_boot_dict(lab['name'])
            install_helper.open_vlm_console_thread(to_host)

            LOG.tc_step("Delete the host {}".format(to_host))
            cli.system("host-bulk-export")
            cli.system("host-delete {}".format(to_host))
            assert len(
                system_helper.get_controllers()) > 1, "Host deletion failed"

            cli.system("host-bulk-add hosts.xml")
            system_helper.wait_for_host_values(
                to_host, timeout=6000, availability=HostAvailState.ONLINE)

            wait_for_disks(to_host)

        ready = storage_helper.get_host_partitions(to_host, "Ready")
        if ready:
            LOG.tc_step(
                "Ready partitions have been found.  Must delete them before profile application"
            )
            LOG.info("Host {} has Ready partitions {}".format(to_host, ready))
            for uuid in reversed(ready):
                storage_helper.delete_host_partition(to_host, uuid)
            # Don't bother restoring in this case since the system should be
            # functional after profile is applied.

        LOG.tc_step('Apply the storage-profile {} onto host:{}'.format(
            prof_name, to_host))
        cli.system('host-apply-storprofile {} {}'.format(to_host, prof_name))

        LOG.tc_step("Unlock to host")
        host_helper.unlock_host(to_host)

        to_host_backing = host_helper.get_host_instance_backing(to_host)
        LOG.info("To host backing was {} and is now {}".format(
            new_to_backing, to_host_backing))
        assert to_host_backing == from_backing, "Host backing was not changed on storage profile application"

    if personality == "storage":
        if not storage_helper.is_ceph_healthy():
            skip("Cannot run test when ceph is not healthy")

        LOG.tc_step("Delete the host {}".format(to_host))
        cli.system("host-bulk-export")
        cli.system("host-delete {}".format(to_host))
        cli.system("host-bulk-add hosts.xml")
        system_helper.wait_for_host_values(to_host,
                                           timeout=6000,
                                           availability=HostAvailState.ONLINE)

        wait_for_disks(to_host)

        LOG.tc_step('Apply the storage-profile {} onto host:{}'.format(
            prof_name, to_host))
        cli.system('host-apply-storprofile {} {}'.format(to_host, prof_name))

        # Re-provision interfaces through lab_setup.sh
        LOG.tc_step("Reprovision the host as necessary")
        files = ['interfaces']
        con_ssh = ControllerClient.get_active_controller()
        delete_lab_setup_files(con_ssh, to_host, files)

        rc, msg = install_helper.run_lab_setup()
        assert rc == 0, msg

        LOG.tc_step("Unlock to host")
        host_helper.unlock_host(to_host)

    if personality == "controller":

        # Note, install helper doesn't work on all labs.  Some labs don't
        # display BIOS type which causes install helper to fail
        lab = InstallVars.get_install_var("LAB")
        lab.update(create_node_dict(lab['controller_nodes'], 'controller'))
        lab['boot_device_dict'] = create_node_boot_dict(lab['name'])
        install_helper.open_vlm_console_thread(to_host)

        LOG.tc_step("Delete the host {}".format(to_host))
        cli.system("host-bulk-export")
        cli.system("host-delete {}".format(to_host))
        assert len(system_helper.get_controllers()) > 1, "Host deletion failed"

        cli.system("host-bulk-add hosts.xml")
        system_helper.wait_for_host_values(to_host,
                                           timeout=6000,
                                           availability=HostAvailState.ONLINE)

        wait_for_disks(to_host)

        LOG.tc_step("Apply the storage-profile {} onto host:{}".format(
            prof_name, to_host))
        cli.system("host-apply-storprofile {} {}".format(to_host, prof_name))

        # Need to re-provision everything on node through lab_setup (except storage)
        LOG.tc_step("Reprovision the host as necessary")
        files = [
            'interfaces', 'cinder_device', 'vswitch_cpus', 'shared_cpus',
            'extend_cgts_vg', 'addresses'
        ]
        con_ssh = ControllerClient.get_active_controller()
        delete_lab_setup_files(con_ssh, to_host, files)

        rc, msg = install_helper.run_lab_setup()
        assert rc == 0, msg

        LOG.tc_step("Unlock to host")
        host_helper.unlock_host(to_host)

        to_host_backing = host_helper.get_host_instance_backing(to_host)
        LOG.info("To host backing was {} and is now {}".format(
            new_to_backing, to_host_backing))
        assert to_host_backing == from_backing, "Host backing was not changed on storage profile application"
예제 #23
0
def upgrade_setup(pre_check_upgrade):
    lab = InstallVars.get_install_var('LAB')
    col_kpi = ProjVar.get_var('COLLECT_KPI')
    collect_kpi_path = None
    if col_kpi:
        collect_kpi_path = ProjVar.get_var('KPI_PATH')

    # establish ssh connection with controller-0
    controller0_conn = ControllerClient.get_active_controller()
    cpe = system_helper.is_aio_system(controller0_conn)
    upgrade_version = UpgradeVars.get_upgrade_var('UPGRADE_VERSION')
    license_path = UpgradeVars.get_upgrade_var('UPGRADE_LICENSE')
    is_simplex = system_helper.is_aio_simplex()
    if license_path is None:
        if cpe:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                1]
        elif is_simplex:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                2]
        else:
            license_path = BuildServerPath.TIS_LICENSE_PATHS[upgrade_version][
                0]
    bld_server = get_build_server_info(
        UpgradeVars.get_upgrade_var('BUILD_SERVER'))
    load_path = UpgradeVars.get_upgrade_var('TIS_BUILD_DIR')
    if isinstance(load_path, list):
        load_path = load_path[0]
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = UpgradeVars.get_upgrade_var('PATCH_DIR')

    current_version = system_helper.get_sw_version(use_existing=False)

    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    # bld_server_attr['prompt'] = r'.*yow-cgts[1234]-lx.*$ '
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    # '.*yow\-cgts[34]\-lx ?~\]?\$ '
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # # get upgrade license file for release
    LOG.info("Downloading the license {}:{} for target release {}".format(
        bld_server_obj.name, license_path, upgrade_version))
    install_helper.download_upgrade_license(lab, bld_server_obj, license_path)

    LOG.fixture_step("Checking if target release license is downloaded......")
    cmd = "test -e " + os.path.join(HostLinuxUser.get_home(),
                                    "upgrade_license.lic")
    assert controller0_conn.exec_cmd(
        cmd)[0] == 0, "Upgrade license file not present in Controller-0"
    LOG.info("Upgrade  license {} download complete".format(license_path))

    # Install the license file for release
    LOG.fixture_step("Installing the target release {} license file".format(
        upgrade_version))
    rc = upgrade_helper.install_upgrade_license(os.path.join(
        HostLinuxUser.get_home(), "upgrade_license.lic"),
                                                con_ssh=controller0_conn)
    assert rc == 0, "Unable to install upgrade license file in Controller-0"
    LOG.info("Target release license installed......")

    # Check load already imported if not  get upgrade load iso file
    # Run the load_import command to import the new release iso image build
    if not upgrade_helper.get_imported_load_version():
        LOG.fixture_step(
            "Downloading the {} target release  load iso image file {}:{}".
            format(upgrade_version, bld_server_obj.name, load_path))
        install_helper.download_upgrade_load(lab,
                                             bld_server_obj,
                                             load_path,
                                             upgrade_ver=upgrade_version)
        upgrade_load_path = os.path.join(HostLinuxUser.get_home(),
                                         install_helper.UPGRADE_LOAD_ISO_FILE)

        cmd = "test -e {}".format(upgrade_load_path)
        assert controller0_conn.exec_cmd(cmd)[0] == 0, "Upgrade build iso image file {} not present in Controller-0" \
            .format(upgrade_load_path)
        LOG.info("Target release load {} download complete.".format(
            upgrade_load_path))
        LOG.fixture_step("Importing Target release  load iso file from".format(
            upgrade_load_path))
        upgrade_helper.import_load(upgrade_load_path,
                                   upgrade_ver=upgrade_version)

        # download and apply patches if patches are available in patch directory
        if patch_dir and upgrade_version < "18.07":
            LOG.fixture_step(
                "Applying  {} patches, if present".format(upgrade_version))
            apply_patches(lab, bld_server_obj, patch_dir)

    # check disk space
    check_controller_filesystem()

    # Check for simplex and return
    if is_simplex:
        backup_dest_path = BackupVars.get_backup_var('backup_dest_path')

        delete_backups = BackupVars.get_backup_var('delete_buckups')

        _upgrade_setup_simplex = {
            'lab': lab,
            'cpe': cpe,
            'output_dir': output_dir,
            'current_version': current_version,
            'upgrade_version': upgrade_version,
            'build_server': bld_server_obj,
            'load_path': load_path,
            'backup_dest_path': backup_dest_path,
            'delete_backups': delete_backups
        }
        return _upgrade_setup_simplex
        # check which nodes are upgraded using orchestration

    orchestration_after = UpgradeVars.get_upgrade_var('ORCHESTRATION_AFTER')
    storage_apply_strategy = UpgradeVars.get_upgrade_var('STORAGE_APPLY_TYPE')
    compute_apply_strategy = UpgradeVars.get_upgrade_var('COMPUTE_APPLY_TYPE')
    max_parallel_computes = UpgradeVars.get_upgrade_var(
        'MAX_PARALLEL_COMPUTES')
    alarm_restrictions = UpgradeVars.get_upgrade_var('ALARM_RESTRICTIONS')

    if orchestration_after:
        LOG.info("Upgrade orchestration start option: {}".format(
            orchestration_after))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    controller_ndoes, compute_nodes, storage_nodes = system_helper.get_hosts_per_personality(
        rtn_tuple=True)
    system_nodes = controller_ndoes + compute_nodes + storage_nodes
    orchestration_nodes = []
    cpe = False if (compute_nodes or storage_nodes) else True

    if not cpe and orchestration_after and (orchestration_after == 'default'
                                            or 'controller'
                                            in orchestration_after):
        orchestration_nodes.extend(system_nodes)
        orchestration_nodes.remove('controller-1')
        if 'controller' in orchestration_after:
            orchestration_nodes.remove('controller-0')

    elif not cpe and orchestration_after and 'storage' in orchestration_after:
        number_of_storages = len(storage_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_storages
        if num_selected > number_of_storages:
            num_selected = number_of_storages
        if num_selected > 0:
            for i in range(num_selected):
                orchestration_nodes.extend(
                    [h for h in storage_nodes if h != 'storage-{}'.format(i)])
        orchestration_nodes.extend(compute_nodes)
    elif not cpe and orchestration_after and 'compute' in orchestration_after:
        number_of_computes = len(compute_nodes)
        num_selected = int(orchestration_after.split(':')[1]) if len(orchestration_after.split(':')) == 2 \
            else number_of_computes
        if num_selected > number_of_computes:
            num_selected = number_of_computes

        orchestration_nodes.extend(compute_nodes[num_selected:])
    else:
        LOG.info(
            "System {} will be upgraded though manual procedure without orchestration."
            .format(lab['name']))

    man_upgrade_nodes = [
        h for h in system_nodes if h not in orchestration_nodes
    ]

    LOG.info(" Nodes upgraded manually are: {}".format(man_upgrade_nodes))
    LOG.info(" Nodes upgraded through Orchestration are: {}".format(
        orchestration_nodes))

    _upgrade_setup = {
        'lab': lab,
        'cpe': cpe,
        'output_dir': output_dir,
        'current_version': current_version,
        'upgrade_version': upgrade_version,
        'build_server': bld_server_obj,
        'load_path': load_path,
        'man_upgrade_nodes': man_upgrade_nodes,
        'orchestration_nodes': orchestration_nodes,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'alarm_restrictions': alarm_restrictions,
        'col_kpi': collect_kpi_path,
    }
    ver = (upgrade_helper.get_imported_load_version()).pop()
    assert upgrade_version in ver, "Import error. Expected " \
                                   "version {} not found in imported load list" \
                                   "{}".format(upgrade_version, ver)
    LOG.info("Imported Target release  load iso {}".format(
        upgrade_version, ver))
    return _upgrade_setup
예제 #24
0
def test_backup(pre_system_backup):
    """
    Test create backup on the system and it's avaliable and in-use volumes.
    copy backup files to USB flash drive

    Args:


    Setup:
        - create system backup use config_controller (create system,image tgz)
        - backup image separately if its storage lab that use CEPH
        - back up all available and in-use volumes from the lab

    Test Steps:
        - check system and img tgz are created for system backup
        - check all images are back up in storage
        - check all volumes tgz are created for backup

    Teardown:
        - Delete vm if booted
        - Delete created flavor (module)

    """

    backup_info = pre_system_backup
    LOG.info('Before backup, perform configuration changes and launch VMs')

    con_ssh = ControllerClient.get_active_controller()
    backup_info['con_ssh'] = con_ssh

    is_ceph = backup_info.get('is_storage_lab', False)
    LOG.debug('This is a {} lab'.format(
        'Storage/Ceph' if is_ceph else 'Non-Storage/Ceph'))

    if is_ceph:
        con_ssh.exec_sudo_cmd('touch /etc/ceph/ceph.client.None.keyring')
        pre_backup_test(backup_info, con_ssh)

    lab = InstallVars.get_install_var('LAB')
    LOG.tc_step(
        "System backup: lab={}; backup dest = {} backup destination path = {} ..."
        .format(lab['name'], backup_info['backup_dest'],
                backup_info['backup_dest_full_path']))
    copy_to_usb = None
    usb_part2 = None

    backup_dest = backup_info['backup_dest']
    if backup_dest == 'usb':
        usb_partition_info = backup_info['usb_parts_info']
        for k, v in usb_partition_info.items():
            if k[-1:] == "1":
                pass
                # usb_part1 = k
            elif k[-1:] == '2':
                usb_part2 = k
        copy_to_usb = usb_part2

    backup_info['copy_to_usb'] = copy_to_usb
    backup_info['backup_file_prefix'] = get_backup_file_name_prefix(
        backup_info)
    backup_info['cinder_backup'] = BackupVars.get_backup_var('cinder_backup')
    reinstall_storage = BackupVars.get_backup_var('reinstall_storage')

    if reinstall_storage:
        if is_ceph:
            backup_cinder_volumes(backup_info)

        backup_sysconfig_images(backup_info)
    else:
        # if is_ceph:
        #     backup_cinder_volumes(backup_info)

        backup_sysconfig_images(backup_info)

    collect_logs('after_backup')

    if system_helper.is_avs(con_ssh=con_ssh):
        # Copying system backup ISO file for future restore
        assert backup_load_iso_image(backup_info)
예제 #25
0
def pre_system_backup():
    """
    Actions before system backup, including:
        - check the USB device is ready if it is the destination
        - create folder for the backup files on destination server
        - collect logs on the current system

    Args:

    Returns:
    """
    lab = InstallVars.get_install_var('LAB')

    LOG.info("Preparing lab for system backup....")
    backup_dest = BackupVars.get_backup_var("BACKUP_DEST")

    NATBoxClient.set_natbox_client()

    _backup_info = {
        'backup_dest': backup_dest,
        'usb_parts_info': None,
        'backup_dest_full_path': None,
        'dest_server': None
    }

    if backup_dest == 'usb':
        _backup_info['dest'] = 'usb'
        active_controller_name = system_helper.get_active_controller_name()
        if active_controller_name != 'controller-0':
            msg = "controller-0 is not the active controller"
            LOG.info(msg + ", try to swact the host")
            host_helper.swact_host(active_controller_name)
            active_controller_name = system_helper.get_active_controller_name()
            assert active_controller_name == 'controller-0', msg

        LOG.fixture_step(
            "Checking if  a USB flash drive is plugged in controller-0 node... "
        )
        usb_device = install_helper.get_usb_device_name()
        assert usb_device, "No USB found in controller-0"
        parts_info = install_helper.get_usb_device_partition_info(
            usb_device=usb_device)

        part1 = "{}1".format(usb_device)
        part2 = "{}2".format(usb_device)

        if len(parts_info) < 3:
            skip(
                "USB {} is not partitioned;  Create two partitions using fdisk; partition 1 = {}1, "
                "size = 2G, bootable; partition 2 = {}2, size equal to the avaialble space."
                .format(usb_device, usb_device, usb_device))

        devices = parts_info.keys()
        LOG.info("Size of {} = {}".format(
            part1, install_helper.get_usb_partition_size(part1)))
        if not (part1 in devices
                and install_helper.get_usb_partition_size(part1) >= 2):
            skip("Insufficient size in {}; at least 2G is required. {}".format(
                part1, parts_info))

        if not (part2 in devices
                and install_helper.get_usb_partition_size(part2) >= 10):
            skip("Insufficient size in {}; at least 2G is required. {}".format(
                part1, parts_info))

        if not install_helper.mount_usb(part2):
            skip("Fail to mount USB for backups")

        LOG.tc_step("Erasing existing files from USB ... ")

        assert install_helper.delete_backup_files_from_usb(
            part2), "Fail to erase existing file from USB"
        _backup_info['usb_parts_info'] = parts_info
        _backup_info['backup_dest_full_path'] = BackupRestore.USB_BACKUP_PATH

    elif backup_dest == 'local':
        _backup_info['dest'] = 'local'

        # save backup files in Test Server which local
        backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')
        backup_dest_full_path = '{}/{}'.format(backup_dest_path,
                                               lab['short_name'])
        # ssh to test server
        test_server_attr = dict()
        test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0]
        test_server_attr['server_ip'] = TestFileServer.get_server()
        test_server_attr['prompt'] = r'\[{}@{} {}\]\$ '\
            .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user())

        test_server_conn = install_helper.establish_ssh_connection(
            test_server_attr['name'],
            user=TestFileServer.get_user(),
            password=TestFileServer.get_password(),
            initial_prompt=test_server_attr['prompt'])

        test_server_conn.set_prompt(test_server_attr['prompt'])
        test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
        test_server_attr['ssh_conn'] = test_server_conn
        test_server_obj = Server(**test_server_attr)
        _backup_info['dest_server'] = test_server_obj
        # test if backup path for the lab exist in Test server
        if test_server_conn.exec_cmd(
                "test -e {}".format(backup_dest_full_path))[0]:
            test_server_conn.exec_cmd(
                "mkdir -p {}".format(backup_dest_full_path))
            # delete any existing files
        test_server_conn.exec_cmd("rm -rf {}/*".format(backup_dest_full_path))

        _backup_info['usb_parts_info'] = None
        _backup_info['backup_dest_full_path'] = backup_dest_full_path

    collect_logs('before_br')

    _backup_info['is_storage_lab'] = (len(system_helper.get_storage_nodes()) >
                                      0)
    return _backup_info
예제 #26
0
def patch_orchestration_setup():
    ProjVar.set_var(SOURCE_OPENRC=True)
    patching_helper.check_system_health()

    lab = InstallVars.get_install_var('LAB')
    bld_server = get_build_server_info(
        PatchingVars.get_patching_var('PATCH_BUILD_SERVER'))
    output_dir = ProjVar.get_var('LOG_DIR')
    patch_dir = PatchingVars.get_patching_var('PATCH_DIR')

    LOG.info("Using  patch directory path: {}".format(patch_dir))
    bld_server_attr = dict()
    bld_server_attr['name'] = bld_server['name']
    bld_server_attr['server_ip'] = bld_server['ip']
    bld_server_attr['prompt'] = Prompt.BUILD_SERVER_PROMPT_BASE.format(
        'svc-cgcsauto', bld_server['name'])
    bld_server_conn = SSHClient(bld_server_attr['name'],
                                user=TestFileServer.get_user(),
                                password=TestFileServer.get_password(),
                                initial_prompt=bld_server_attr['prompt'])
    bld_server_conn.connect()
    bld_server_conn.exec_cmd("bash")
    bld_server_conn.set_prompt(bld_server_attr['prompt'])
    bld_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    bld_server_attr['ssh_conn'] = bld_server_conn
    bld_server_obj = Server(**bld_server_attr)

    # Download patch files from specified patch dir
    LOG.info("Downloading patch files from patch dir {}".format(patch_dir))
    rc = bld_server_obj.ssh_conn.exec_cmd("test -d " + patch_dir)[0]
    assert rc == 0, "Patch directory path {} not found".format(patch_dir)
    clear_patch_dest_dir()
    patches = download_patches(lab, bld_server_obj, patch_dir)
    if len(patches) == 0:
        pytest.skip("No patch files found in {}:{}.".format(
            bld_server_obj.name, patch_dir))

    controller_apply_strategy = PatchingVars.get_patching_var(
        'CONTROLLER_APPLY_TYPE')
    storage_apply_strategy = PatchingVars.get_patching_var(
        'STORAGE_APPLY_TYPE')
    compute_apply_strategy = PatchingVars.get_patching_var(
        'COMPUTE_APPLY_TYPE')
    max_parallel_computes = PatchingVars.get_patching_var(
        'MAX_PARALLEL_COMPUTES')
    instance_action = PatchingVars.get_patching_var('INSTANCE_ACTION')
    alarm_restrictions = PatchingVars.get_patching_var('ALARM_RESTRICTIONS')

    if controller_apply_strategy:
        LOG.info("Controller apply type: {}".format(controller_apply_strategy))
    if storage_apply_strategy:
        LOG.info("Storage apply type: {}".format(storage_apply_strategy))
    if compute_apply_strategy:
        LOG.info("Compute apply type: {}".format(compute_apply_strategy))
    if max_parallel_computes:
        LOG.info("Maximum parallel computes: {}".format(max_parallel_computes))
    if instance_action:
        LOG.info("Instance action: {}".format(instance_action))
    if alarm_restrictions:
        LOG.info("Alarm restriction option: {}".format(alarm_restrictions))

    _patching_setup = {
        'lab': lab,
        'output_dir': output_dir,
        'build_server': bld_server_obj,
        'patch_dir': patch_dir,
        'patches': patches,
        'controller_apply_strategy': controller_apply_strategy,
        'storage_apply_strategy': storage_apply_strategy,
        'compute_apply_strategy': compute_apply_strategy,
        'max_parallel_computes': max_parallel_computes,
        'instance_action': instance_action,
        'alarm_restrictions': alarm_restrictions,
    }

    LOG.info("Patch Orchestration ready to start: {} ".format(_patching_setup))
    return _patching_setup
예제 #27
0
def test_standard_restore_install(install_setup):
    """
     Configure the active controller

     Prerequisites:
         - pxeboot has been setup.
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Determine active controller
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Unlock controller-0
         - Boot the other hosts
         - Unlock the other hosts
     """
    lab = install_setup["lab"]
    hosts = lab["hosts"]
    boot_device = lab['boot_device_dict']
    controller0_node = lab["controller-0"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]

    # Power off that which is NOT Controller-0
    hostnames = [
        hostname for hostname in lab['hosts'] if 'controller-0' not in hostname
    ]
    vlm_helper.power_off_hosts(hostnames, lab=lab, count=2)

    do_boot_c0 = RestoreVars.get_restore_var('RESTORE_PRE_BOOT_CONTROLLER0')
    stop_before_ansible_restore = \
        RestoreVars.get_restore_var('STOP_BEFORE_ANSIBLE_RESTORE')

    if do_boot_c0:
        fresh_install_helper.install_controller(
            sys_type=SysType.REGULAR,
            patch_dir=patch_dir,
            patch_server_conn=patch_server.ssh_conn,
            init_global_vars=True)
    else:
        LOG.tc_step("Skipping controller-0 install")

    if stop_before_ansible_restore:
        skip("Stopping test before restoring")

    if InstallVars.get_install_var('IPV6_OAM'):
        restore_helper.setup_ipv6_oam(controller0_node)

    restore_helper.restore_platform()

    fresh_install_helper.unlock_active_controller(controller0_node)

    controller0_node.telnet_conn.hostname = r"controller\-[01]"
    controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)
    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(
            controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    # Boot that which is Not Controller-0
    fresh_install_helper.restore_boot_hosts(boot_device)

    # Unlock controller-1
    fresh_install_helper.unlock_hosts(['controller-1'],
                                      con_ssh=controller0_node.ssh_conn)

    # Unlock computes
    fresh_install_helper.unlock_hosts(
        [host_ for host_ in hosts if 'compute' in host_],
        con_ssh=controller0_node.ssh_conn)

    fresh_install_helper.send_arp_cmd()

    if lab.get("floating ip"):
        collect_sys_net_info(lab)
        setup_tis_ssh(lab)

    fresh_install_helper.reset_global_vars()
    fresh_install_helper.verify_install_uuid(lab)
예제 #28
0
def test_install_cloned_image(install_clone_setup):

    controller1 = 'controller-1'

    lab = InstallVars.get_install_var('LAB')
    install_output_dir = ProjVar.get_var('LOG_DIR')

    controller0_node = lab['controller-0']
    hostnames = install_clone_setup['hostnames']
    system_mode = install_clone_setup['system_mode']
    lab_name = lab['name']
    LOG.info("Starting install-clone on AIO lab {} .... ".format(lab_name))
    LOG.tc_step("Booting controller-0 ... ")

    if controller0_node.telnet_conn is None:
        controller0_node.telnet_conn = install_helper.open_telnet_session(
            controller0_node, install_output_dir)
        try:
            controller0_node.telnet_conn.login()
        except:
            LOG.info("Telnet Login failed. Attempting to reset password")
            try:
                controller0_node.telnet_conn.login(reset=True)
            except:
                if controller0_node.telnet_conn:
                    controller0_node.telnet_conn.close()
                    controller0_node.telnet_conn = None

    if controller0_node.telnet_conn:
        install_helper.wipe_disk_hosts(hostnames)

    # power off hosts
    LOG.tc_step("Powring off system hosts ... ")
    install_helper.power_off_host(hostnames)

    install_helper.boot_controller(boot_usb=True,
                                   small_footprint=True,
                                   clone_install=True)

    # establish telnet connection with controller
    LOG.tc_step(
        "Establishing telnet connection with controller-0 after install-clone ..."
    )

    node_name_in_ini = '{}.*\~\$ '.format(controller0_node.host_name)
    normalized_name = re.sub(r'([^\d])0*(\d+)', r'\1\2', node_name_in_ini)

    # controller_prompt = Prompt.TIS_NODE_PROMPT_BASE.format(lab['name'].split('_')[0]) \
    #                     + '|' + Prompt.CONTROLLER_0 \
    #                     + '|{}'.format(node_name_in_ini) \
    #                     + '|{}'.format(normalized_name)

    if controller0_node.telnet_conn:
        controller0_node.telnet_conn.close()

    output_dir = ProjVar.get_var('LOG_DIR')
    controller0_node.telnet_conn = install_helper.open_telnet_session(
        controller0_node, output_dir)
    controller0_node.telnet_conn.login()
    controller0_node.telnet_conn.exec_cmd("xterm")

    LOG.tc_step("Verify install-clone status ....")
    install_helper.check_clone_status(
        tel_net_session=controller0_node.telnet_conn)

    LOG.info("Source Keystone user admin environment ...")

    #controller0_node.telnet_conn.exec_cmd("cd; source /etc/platform/openrc")

    LOG.tc_step("Checking controller-0 hardware ....")
    install_helper.check_cloned_hardware_status('controller-0')

    if system_mode == 'duplex':
        LOG.tc_step("Booting controller-1 ... ")
        boot_interfaces = lab['boot_device_dict']
        install_helper.open_vlm_console_thread('controller-1',
                                               boot_interface=boot_interfaces,
                                               vlm_power_on=True,
                                               wait_for_thread=True)

        LOG.info("waiting for {} to boot ...".format(controller1))

        LOG.info("Verifying {} is Locked, Disabled and Online ...".format(
            controller1))
        system_helper.wait_for_hosts_states(
            controller1,
            check_interval=20,
            use_telnet=True,
            con_telnet=controller0_node.telnet_conn,
            administrative=HostAdminState.LOCKED,
            operational=HostOperState.DISABLED,
            availability=HostAvailState.ONLINE)

        LOG.info("Unlocking {} ...".format(controller1))

        rc, output = host_helper.unlock_host(
            controller1,
            use_telnet=True,
            con_telnet=controller0_node.telnet_conn)
        assert rc == 0, "Host {} unlock failed: {}".format(controller1, output)

        LOG.info("Host {} unlocked successfully ...".format(controller1))

        LOG.info("Host controller-1  booted successfully... ")

        LOG.tc_step("Checking controller-1 hardware ....")
        install_helper.check_cloned_hardware_status(controller1)
    #
    LOG.tc_step("Customizing the cloned system ....")
    LOG.info("Changing the OAM IP configuration ... ")
    install_helper.update_oam_for_cloned_system(system_mode=system_mode)

    LOG.tc_step("Downloading lab specific license, config and scripts ....")
    software_version = system_helper.get_sw_version()
    load_path = BuildServerPath.LATEST_HOST_BUILD_PATHS[software_version]
    install_helper.download_lab_config_files(
        lab, install_clone_setup['build_server'], load_path)

    LOG.tc_step("Running lab cleanup to removed source attributes ....")
    install_helper.run_setup_script(script='lab_cleanup')

    LOG.tc_step(
        "Running lab setup script to upadate cloned system attributes ....")
    rc, output = install_helper.run_lab_setup()
    assert rc == 0, "Lab setup run failed: {}".format(output)

    time.sleep(30)
    LOG.tc_step(
        "Checking config status of controller-0 and perform lock/unlock if necessary..."
    )
    if system_helper.get_host_values(
            'controller-0', 'config_status')[0] == 'Config out-of-date':
        host_helper.lock_unlock_controllers()

    LOG.tc_step("Verifying system health after restore ...")
    system_helper.wait_for_all_alarms_gone(timeout=300)
    rc, failed = system_helper.get_system_health_query()
    assert rc == 0, "System health not OK: {}".format(failed)
예제 #29
0
def test_duplex_plus_install(install_setup):
    """
     Complete fresh_install steps for a duplex plus (AIO+) lab
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Determine active controller
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Download configuration files, heat templates, images, and licenses
         - Configure controller-0, run lab_setup, and unlock controller-0
         - Add the standby controller
         - Run the lab_setup.sh script
         - Re-add the standby controller
         - Run the lab_setup.sh script
         - Install the Standby Controller
         - Run the lab_setup.sh script twice
         - Unlock the standby controller
         - Run the lab_setup.sh script
     """
    lab = install_setup["lab"]
    hosts = lab["hosts"]
    boot_device = lab["boot_device_dict"]
    controller0_node = lab["controller-0"]
    final_step = install_setup["control"]["stop"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]
    guest_server = install_setup["servers"]["guest"]
    install_subcloud = install_setup.get("install_subcloud")
    lab_files_server = install_setup["servers"]["lab_files"]
    helm_chart_server = install_setup["servers"]["helm_charts"]

    if final_step == '0' or final_step == "setup":
        skip("stopping at install step: {}".format(LOG.test_step))

    fresh_install_helper.install_controller(
        sys_type=SysType.AIO_DX,
        patch_dir=patch_dir,
        patch_server_conn=patch_server.ssh_conn,
        init_global_vars=True)
    # controller0_node.telnet_conn.login()
    # controller0_node.telnet_conn.flush()
    # fresh_install_helper.set_software_version_var(use_telnet=True, con_telnet=controller0_node.telnet_conn)

    build_server = install_setup["servers"]["build"]
    fresh_install_helper.download_lab_files(
        lab_files_server=lab_files_server,
        build_server=build_server,
        guest_server=guest_server,
        load_path=InstallVars.get_install_var("TIS_BUILD_DIR"),
        license_path=InstallVars.get_install_var("LICENSE"),
        guest_path=InstallVars.get_install_var('GUEST_IMAGE'),
        helm_chart_server=helm_chart_server)

    if install_subcloud:
        fresh_install_helper.configure_subcloud(controller0_node,
                                                lab_files_server,
                                                subcloud=install_subcloud,
                                                final_step=final_step)
    else:
        fresh_install_helper.configure_controller_(controller0_node)

    deploy_mgr = fresh_install_helper.use_deploy_manager(controller0_node, lab)
    if not deploy_mgr:

        fresh_install_helper.check_ansible_configured_mgmt_interface(
            controller0_node, lab)

        #fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_INTITIAL)
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_active_controller(controller0_node)

    else:
        fresh_install_helper.wait_for_deploy_mgr_controller_config(
            controller0_node, lab=lab)

    controller0_node.telnet_conn.hostname = r"controller\-[01]"
    controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)
    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.establish_ssh_connection(
            controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    if not deploy_mgr:
        fresh_install_helper.bulk_add_hosts(lab=lab,
                                            con_ssh=controller0_node.ssh_conn)
    else:
        fresh_install_helper.wait_for_deployment_mgr_to_bulk_add_hosts(
            controller0_node, lab=lab)

    fresh_install_helper.boot_hosts(boot_device)
    #fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_INTERIM)
    if not deploy_mgr:
        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_hosts(["controller-1"],
                                          con_ssh=controller0_node.ssh_conn)

    else:
        fresh_install_helper.wait_for_deploy_mgr_lab_config(controller0_node,
                                                            lab=lab)

    fresh_install_helper.wait_for_hosts_ready(hosts, lab=lab)
    container_helper.wait_for_apps_status(apps='platform-integ-apps',
                                          timeout=1800,
                                          con_ssh=controller0_node.ssh_conn,
                                          status='applied')
    fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)

    if lab.get("floating ip"):
        collect_sys_net_info(lab)
        setup_tis_ssh(lab)

    #fresh_install_helper.check_heat_resources(con_ssh=controller0_node.ssh_conn)
    if not deploy_mgr:
        fresh_install_helper.collect_lab_config_yaml(
            lab, build_server, stage=fresh_install_helper.DEPLOY_LAST)

    fresh_install_helper.attempt_to_run_post_install_scripts()

    fresh_install_helper.reset_global_vars()

    fresh_install_helper.verify_install_uuid(lab)
    if deploy_mgr:
        fresh_install_helper.validate_deployment_mgr_install(
            controller0_node, lab)
예제 #30
0
def test_simplex_install(install_setup):
    """
     Complete fresh_install steps for a simplex lab
     Test Setups:
         - Retrieve dictionary containing lab information
         - Retrieve required paths to directories, images, and licenses
         - Initialize build server and boot server objects
         - Retrieve what steps to be skipped
     Test Steps:
         - Install controller-0
         - Download configuration files, heat templates, images, and licenses
         - Configure controller-0, run lab_setup, and unlock controller-0
         - Run lab setup script if specified
         - Setup heat resources
     """
    lab = install_setup["lab"]
    controller0_node = lab["controller-0"]
    final_step = install_setup["control"]["stop"]
    patch_dir = install_setup["directories"]["patches"]
    patch_server = install_setup["servers"]["patches"]
    guest_server = install_setup["servers"]["guest"]
    install_subcloud = install_setup.get("install_subcloud")
    helm_chart_server = install_setup["servers"]["helm_charts"]

    if final_step == '0' or final_step == "setup":
        skip("stopping at install step: {}".format(LOG.test_step))

    fresh_install_helper.install_controller(sys_type=SysType.AIO_SX, patch_dir=patch_dir,
                                            patch_server_conn=patch_server.ssh_conn, init_global_vars=True)
    # controller0_node.telnet_conn.login()
    # controller0_node.telnet_conn.flush()
    # fresh_install_helper.set_software_version_var(use_telnet=True, con_telnet=controller0_node.telnet_conn)

    lab_files_server = install_setup["servers"]["lab_files"]
    build_server = install_setup["servers"]["build"]
    fresh_install_helper.download_lab_files(lab_files_server=lab_files_server, build_server=build_server,
                                            guest_server=guest_server,
                                            load_path=InstallVars.get_install_var("TIS_BUILD_DIR"),
                                            license_path=InstallVars.get_install_var("LICENSE"),
                                            guest_path=InstallVars.get_install_var('GUEST_IMAGE'),
                                            helm_chart_server=helm_chart_server)

    if install_subcloud:
        fresh_install_helper.configure_subcloud(controller0_node, install_setup["dc_system_controller"],
                                                subcloud=install_subcloud, final_step=final_step)
    else:
        fresh_install_helper.configure_controller_(controller0_node)

    deploy_mgr = fresh_install_helper.use_deploy_manager(controller0_node, lab)
    if not deploy_mgr:
        # fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_INTITIAL)

        fresh_install_helper.run_lab_setup(con_ssh=controller0_node.ssh_conn)
        fresh_install_helper.unlock_active_controller(controller0_node)
        controller0_node.telnet_conn.hostname = r"controller\-[01]"
        controller0_node.telnet_conn.set_prompt(Prompt.CONTROLLER_PROMPT)
    else:
        fresh_install_helper.wait_for_deploy_mgr_controller_config(controller0_node, lab=lab)

    if controller0_node.ssh_conn is None:
        controller0_node.ssh_conn = install_helper.ssh_to_controller(controller0_node.host_ip)
    install_helper.update_auth_url(ssh_con=controller0_node.ssh_conn)

    container_helper.wait_for_apps_status(apps='platform-integ-apps', timeout=1800,
                                          con_ssh=controller0_node.ssh_conn, status='applied')
    fresh_install_helper.run_lab_setup(controller0_node.ssh_conn)

    if lab.get("floating ip"):
        setup_tis_ssh(lab)

    fresh_install_helper.wait_for_hosts_ready(controller0_node.name, lab=lab)

    # fresh_install_helper.check_heat_resources(con_ssh=controller0_node.ssh_conn)
    if not deploy_mgr:
        fresh_install_helper.collect_lab_config_yaml(lab, build_server, stage=fresh_install_helper.DEPLOY_LAST)

    if install_subcloud:
        fresh_install_helper.wait_for_subcloud_to_be_managed(install_subcloud, install_setup["dc_system_controller"],
                                                             lab=lab)
    fresh_install_helper.attempt_to_run_post_install_scripts()

    fresh_install_helper.reset_global_vars()