def test_system_upgrade_simplex(upgrade_setup,
                                check_system_health_query_upgrade):
    """
     This script starts the upgrade with creating a backup file which is wipes the disk at the end of the execution .
      to complete the upgrade test_upgrade_simplex_restore.py need to be executed with the backup file path.
    Args:
        upgrade_setup:   This will check parameters ftp upload load and patches
        check_system_health_query_upgrade: Check the health of system for upgrade
    Example
        To Execute

         check_system_health_query_upgrade: Checks the upgrade health .
        steps:

         1. FTP load and patches and loads to system.
         2. Checks the health of the upgrade
         3. Start upgrade
         4. Checks the backup files.
         5. Backup the volume and images
         6. Execute host-upgrade
         7. Ftp backup files

    teardown:
         flush ssh.

    """
    lab = upgrade_setup['lab']

    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']

    if not system_helper.is_aio_simplex():
        assert False, "This lab is not simplex to start upgrade"
    force = False
    controller0 = lab['controller-0']

    backup_dest_path = BackupVars.get_backup_var('BACKUP_DEST_PATH')
    backup_dest_full_path = '{}/{}/'.format(backup_dest_path,
                                            lab['short_name'])
    date = time.strftime(BACKUP_FILE_DATE_STR)
    build_id = system_helper.get_build_info()['BUILD_ID']
    lab_system_name = lab['name']
    backup_file_name = "{}{}_{}_{}".format(PREFIX_BACKUP_FILE, date, build_id,
                                           lab_system_name)
    print('Backup_File_Name', backup_file_name)
    # ssh to test server
    test_server_attr = dict()
    test_server_attr['name'] = TestFileServer.get_hostname().split('.')[0]
    test_server_attr['server_ip'] = TestFileServer.get_server()
    test_server_attr['prompt'] = r'\[{}@{} {}\]\$ ' \
        .format(TestFileServer.get_user(), test_server_attr['name'], TestFileServer.get_user())

    test_server_conn = install_helper.establish_ssh_connection(
        test_server_attr['name'],
        user=TestFileServer.get_user(),
        password=TestFileServer.get_password(),
        initial_prompt=test_server_attr['prompt'])

    test_server_conn.set_prompt(test_server_attr['prompt'])
    test_server_conn.deploy_ssh_key(install_helper.get_ssh_public_key())
    test_server_attr['ssh_conn'] = test_server_conn
    test_server_obj = Server(**test_server_attr)
    dest_server = test_server_obj
    # test if backup path for the lab exist in Test server
    if test_server_conn.exec_cmd(
            "test -e {}".format(backup_dest_full_path))[0]:
        test_server_conn.exec_cmd("mkdir -p {}".format(backup_dest_full_path))
    LOG.tc_step("Checking system health for upgrade .....")
    if check_system_health_query_upgrade[0] == 0:
        LOG.info("System health OK for upgrade......")
    if check_system_health_query_upgrade[0] == 1:
        assert False, "System health query upgrade failed: {}".format(
            check_system_health_query_upgrade[1])

    if check_system_health_query_upgrade[
            0] == 3 or check_system_health_query_upgrade[0] == 2:
        LOG.info(
            "System health indicate minor alarms; using --force option to start upgrade......"
        )
        force = True

    vol_ids = cinder_helper.get_volumes(auth_info=Tenant.get('admin'))
    if len(vol_ids) > 0:
        LOG.info("Exporting cinder volumes: {}".format(vol_ids))
        exported = install_helper.export_cinder_volumes(
            backup_dest='local',
            backup_dest_path=backup_dest_full_path,
            dest_server=dest_server)

        assert len(exported) > 0, "Fail to export all volumes"
        assert len(exported) == len(
            vol_ids), "Some volumes failed export: {}".format(
                set(vol_ids) - set(exported))
    else:
        LOG.info(
            "No cinder volumes are avaialbe in the system; skipping cinder volume export..."
        )

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    upgrade_helper.wait_for_upgrade_states('started',
                                           timeout=1360,
                                           check_interval=30,
                                           fail_ok=True)

    LOG.info("upgrade started successfully......")

    # scp backup files to test server
    LOG.tc_step("SCP system and image tgz file into test server {} ",
                backup_dest_full_path)

    source_file = '/opt/backups/upgrade_data_*system.tgz '
    backup_dest_full_path_image = backup_dest_full_path
    backup_dest_full_path = backup_dest_full_path + "/" + backup_file_name + "_system.tgz"
    common.scp_from_active_controller_to_test_server(source_file,
                                                     backup_dest_full_path,
                                                     is_dir=False)
    backup_dest_full_path_image = backup_dest_full_path_image + "/" + backup_file_name + "_images.tgz"
    source_file = '/opt/backups/upgrade_data_*images.tgz '
    common.scp_from_active_controller_to_test_server(
        source_file, backup_dest_full_path_image, is_dir=False)
    LOG.info("Starting {} upgrade.....".format(controller0.name))
    # Below line will wipe disk
    # upgrade_helper.upgrade_host(controller0.name, lock=True)

    LOG.tc_step(
        "Host Upgrade executed .This will wipe the disk reboot controller-0 .")
    time.sleep(3)
    # open vlm console for controller-0 for boot through mgmt interface
    LOG.info(
        "Upgrade simpelx backup is complete . Resotore script should be run on this backup to compelte  upgrade "
    )
def test_system_upgrade(vms_with_upgrade, upgrade_setup,
                        check_system_health_query_upgrade):
    LOG.info("Boot VM before upgrade ")
    vms = vms_with_upgrade
    vm_helper.ping_vms_from_natbox(vms)
    lab = upgrade_setup['lab']
    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']

    controller0 = lab['controller-0']
    upgrade_helper.ensure_host_provisioned(controller0.name)
    force = False
    LOG.tc_step("Checking system health for upgrade .....")
    if check_system_health_query_upgrade[0] == 0:
        LOG.info("System health OK for upgrade......")
    elif check_system_health_query_upgrade[0] == 2:
        LOG.info(
            "System health indicate minor alarms; using --force option to start upgrade......"
        )
        force = True
    else:
        assert False, "System health query upgrade failed: {}".format(
            check_system_health_query_upgrade[1])

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    LOG.info("upgrade started successfully......")

    # upgrade standby controller
    LOG.tc_step("Upgrading controller-1")
    upgrade_helper.upgrade_host("controller-1", lock=True)
    LOG.info("Host controller-1 is upgraded successfully......")

    vm_helper.ping_vms_from_natbox(vms)
    # unlock upgraded controller-1
    LOG.tc_step("Unlocking controller-1 after upgrade......")
    host_helper.unlock_host("controller-1",
                            available_only=True,
                            check_hypervisor_up=False)
    LOG.info("Host controller-1 unlocked after upgrade......")

    # Swact to standby controller-1
    LOG.tc_step("Swacting to controller-1 .....")
    rc, output = host_helper.swact_host(hostname="controller-0")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted and  controller-1 has become active......")

    # upgrade  controller-0
    LOG.tc_step("Upgrading  controller-0......")

    LOG.info("Ensure controller-0 is provisioned before upgrade.....")
    upgrade_helper.ensure_host_provisioned(controller0.name)
    LOG.info("Host {} is provisioned for upgrade.....".format(
        controller0.name))

    # open vlm console for controller-0 for boot through mgmt interface
    LOG.info("Opening a vlm console for controller-0 .....")
    install_helper.open_vlm_console_thread("controller-0")

    LOG.info("Starting {} upgrade.....".format(controller0.name))
    upgrade_helper.upgrade_host(controller0.name, lock=True)
    LOG.info("controller-0 is upgraded successfully.....")

    # unlock upgraded controller-0
    LOG.tc_step("Unlocking controller-0 after upgrade......")
    host_helper.unlock_host(controller0.name, available_only=True)
    LOG.info("Host {} unlocked after upgrade......".format(controller0.name))
    vm_helper.ping_vms_from_natbox(vms)
    upgrade_hosts = install_helper.get_non_controller_system_hosts()
    LOG.info(
        "Starting upgrade of the other system hosts: {}".format(upgrade_hosts))

    for host in upgrade_hosts:
        LOG.tc_step("Starting {} upgrade.....".format(host))
        if "storage" in host:
            # wait for replication  to be healthy
            storage_helper.wait_for_ceph_health_ok()

        upgrade_helper.upgrade_host(host, lock=True)
        LOG.info("{} is upgraded successfully.....".format(host))
        LOG.tc_step("Unlocking {} after upgrade......".format(host))
        host_helper.unlock_host(host, available_only=True)
        LOG.info("Host {} unlocked after upgrade......".format(host))
        LOG.info("Host {} upgrade complete.....".format(host))
        vm_helper.ping_vms_from_natbox(vms)

    # Activate the upgrade
    LOG.tc_step("Activating upgrade....")
    upgrade_helper.activate_upgrade()
    LOG.info("Upgrade activate complete.....")

    # Make controller-0 the active controller
    # Swact to standby controller-0
    LOG.tc_step("Making controller-0 active.....")
    rc, output = host_helper.swact_host(hostname="controller-1")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted to controller-0 ......")

    # Complete upgrade
    LOG.tc_step("Completing upgrade from  {} to {}".format(
        current_version, upgrade_version))
    upgrade_helper.complete_upgrade()
    LOG.info("Upgrade is complete......")

    LOG.info("Lab: {} upgraded successfully".format(lab['name']))

    # Delete the previous load
    LOG.tc_step("Deleting  {} load... ".format(current_version))
    upgrade_helper.delete_imported_load()
    LOG.tc_step("Delete  previous load version {}".format(current_version))
Example #3
0
def test_system_upgrade(upgrade_setup, check_system_health_query_upgrade):
    lab = upgrade_setup['lab']
    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']
    bld_server = upgrade_setup['build_server']
    collect_kpi = upgrade_setup['col_kpi']
    missing_manifests = False
    cinder_configuration = False
    force = False

    controller0 = lab['controller-0']
    if not upgrade_helper.is_host_provisioned(controller0.name):
        rc, output = upgrade_helper.upgrade_host_lock_unlock(controller0.name)
        assert rc == 0, "Failed to lock/unlock host {}: {}".format(
            controller0.name, output)

    # update health query
    # system_upgrade_health = list(upgrade_helper.get_system_health_query_upgrade())
    system_upgrade_health = list(
        upgrade_helper.get_system_health_query_upgrade_2())

    LOG.tc_step("Checking system health for upgrade .....")
    if system_upgrade_health[0] == 0:
        LOG.info("System health OK for upgrade......")
    elif system_upgrade_health[0] == 2:
        if system_upgrade_health[2] and "lock_unlock" in system_upgrade_health[
                2].keys():
            controller_nodes = system_upgrade_health[2]["lock_unlock"][0]
            LOG.info("Locking/Unlocking required for {} ......".format(
                controller_nodes))
            if 'controller-1' in controller_nodes:
                rc, output = upgrade_helper.upgrade_host_lock_unlock(
                    'controller-1')
                assert rc == 0, "Failed to lock/unlock host {}: {}".format(
                    'controller-1', output)
            if 'controller-0' in controller_nodes:
                rc, output = upgrade_helper.upgrade_host_lock_unlock(
                    'controller-0')
                assert rc == 0, "Failed to lock/unlock host {}: {}".format(
                    'controller-0', output)
                time.sleep(60)
                # system_upgrade_health[2]["swact"][0] = False
        if system_upgrade_health[2]["swact"][0]:
            LOG.info("Swact Required: {}".format(
                system_upgrade_health[2]["swact"][1]))
            host_helper.swact_host('controller-0')
            time.sleep(60)
            host_helper.swact_host('controller-1')
            time.sleep(60)
        if system_upgrade_health[2]["force_upgrade"][0]:
            LOG.info("{}; using --force option to start upgrade......".format(
                system_upgrade_health[2]["force_upgrade"][1]))
            force = True

    else:
        assert False, "System health query upgrade failed: {}".format(
            system_upgrade_health[1])

    # if system_upgrade_health[0] == 0:
    #     LOG.info("System health OK for upgrade......")
    # if system_upgrade_health[0] == 1:
    #     assert False, "System health query upgrade failed: {}".format(system_upgrade_health[1])
    #
    # if system_upgrade_health[0] == 4 or system_upgrade_health[0] == 2:
    #     LOG.info("System health indicate missing manifests; lock/unlock controller-0 to resolve......")
    #     missing_manifests = True
    #     if any("Cinder configuration" in k for k in system_upgrade_health[1].keys()):
    #         cinder_configuration = True
    #
    # if system_upgrade_health[0] == 3 or system_upgrade_health[0] == 2:
    #
    #     LOG.info("System health indicate minor alarms; using --force option to start upgrade......")
    #     force = True
    #
    # if missing_manifests:
    #     LOG.info("Locking/Unlocking to resolve missing manifests in controller......")
    #
    #     lock_unlock_hosts = []
    #     if any("controller-1" in k for k in system_upgrade_health[1].keys()):
    #         lock_unlock_hosts.append('controller-1')
    #     if any("controller-0" in k for k in system_upgrade_health[1].keys()):
    #         lock_unlock_hosts.append('controller-0')
    #         cinder_configuration = False
    #
    #     for host in lock_unlock_hosts:
    #         rc, output = upgrade_helper.upgrade_host_lock_unlock(host)
    #         assert rc == 0, "Failed to lock/unlock host {}: {}".format(host, output)
    #
    # if cinder_configuration:
    #     LOG.info("Invalid Cinder configuration: Swact to controller-1 and back to synchronize.......")
    #     host_helper.swact_host('controller-0')
    #     time.sleep(60)
    #     host_helper.swact_host('controller-1')

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    upgrade_helper.wait_for_upgrade_states("started")
    LOG.info("upgrade started successfully......")
    if collect_kpi:
        upgrade_helper.collect_upgrade_start_kpi(lab, collect_kpi)

    # upgrade standby controller
    LOG.tc_step("Upgrading controller-1")
    upgrade_helper.upgrade_host("controller-1", lock=True)
    LOG.info("Host controller-1 is upgraded successfully......")

    # unlock upgraded controller-1
    LOG.tc_step("Unlocking controller-1 after upgrade......")
    host_helper.unlock_host("controller-1",
                            timeout=(HostTimeout.CONTROLLER_UNLOCK + 10),
                            available_only=True,
                            check_hypervisor_up=False)
    LOG.info("Host controller-1 unlocked after upgrade......")

    time.sleep(60)

    # Before Swacting ensure the controller-1 is in available state
    if not system_helper.wait_for_host_values(
            "controller-1",
            timeout=600,
            fail_ok=True,
            operational=HostOperState.ENABLED,
            availability=HostAvailState.AVAILABLE):
        err_msg = " Swacting to controller-1 is not possible because controller-1 is not in available state " \
                  "within  the specified timeout"
        assert False, err_msg

    # Swact to standby contime.sleep(60)  troller-1
    LOG.tc_step("Swacting to controller-1 .....")
    rc, output = host_helper.swact_host(hostname="controller-0")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted and  controller-1 has become active......")

    time.sleep(60)

    # upgrade  controller-0
    LOG.tc_step("Upgrading  controller-0......")
    controller0 = lab['controller-0']

    # open vlm console for controller-0 for boot through mgmt interface
    if 'vbox' not in lab['name']:
        LOG.info("Opening a vlm console for controller-0 .....")
        install_helper.open_vlm_console_thread("controller-0", upgrade=True)

    LOG.info("Starting {} upgrade.....".format(controller0.name))
    upgrade_helper.upgrade_host(controller0.name, lock=True)
    LOG.info("controller-0 is upgraded successfully.....")

    # unlock upgraded controller-0
    LOG.tc_step("Unlocking controller-0 after upgrade......")
    host_helper.unlock_host(controller0.name, available_only=True)
    LOG.info("Host {} unlocked after upgrade......".format(controller0.name))

    upgrade_hosts = install_helper.get_non_controller_system_hosts()
    LOG.info(
        "Starting upgrade of the other system hosts: {}".format(upgrade_hosts))

    for host in upgrade_hosts:
        LOG.tc_step("Starting {} upgrade.....".format(host))
        if "storage" in host:
            # wait for replication  to be healthy
            ceph_health_timeout = 300
            if 'vbox' in lab['name']:
                ceph_health_timeout = 3600
            storage_helper.wait_for_ceph_health_ok(timeout=ceph_health_timeout)

        upgrade_helper.upgrade_host(host, lock=True)
        LOG.info("{} is upgraded successfully.....".format(host))
        LOG.tc_step("Unlocking {} after upgrade......".format(host))
        host_helper.unlock_host(host, available_only=True)
        LOG.info("Host {} unlocked after upgrade......".format(host))
        LOG.info("Host {} upgrade complete.....".format(host))

    # Activate the upgrade
    LOG.tc_step("Activating upgrade....")
    upgrade_helper.activate_upgrade()
    LOG.info("Upgrade activate complete.....")

    # Make controller-0 the active controller
    # Swact to standby controller-0
    LOG.tc_step("Making controller-0 active.....")
    rc, output = host_helper.swact_host(hostname="controller-1")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted to controller-0 ......")

    # Complete upgrade
    LOG.tc_step("Completing upgrade from  {} to {}".format(
        current_version, upgrade_version))
    upgrade_helper.complete_upgrade()
    LOG.info("Upgrade is complete......")

    LOG.info("Lab: {} upgraded successfully".format(lab['name']))

    # Delete the previous load
    LOG.tc_step("Deleting  {} load... ".format(current_version))
    upgrade_helper.delete_imported_load()
    LOG.tc_step("Delete  previous load version {}".format(current_version))

    LOG.tc_step(
        "Downloading images to upgraded {} lab ".format(upgrade_version))
    install_helper.download_image(
        lab, bld_server, BuildServerPath.GUEST_IMAGE_PATHS[upgrade_version])

    load_path = upgrade_setup['load_path']

    LOG.tc_step(
        "Downloading heat temples to upgraded {} lab ".format(upgrade_version))
    install_helper.download_heat_templates(lab, bld_server, load_path)

    LOG.tc_step("Downloading lab config scripts to upgraded {} lab ".format(
        upgrade_version))
    install_helper.download_lab_config_files(lab, bld_server, load_path)
Example #4
0
def test_system_upgrade_controllers(upgrade_setup,
                                    check_system_health_query_upgrade):
    lab = upgrade_setup['lab']
    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']

    # run system upgrade-start
    # must be run in controller-0
    active_controller = system_helper.get_active_controller_name()
    LOG.tc_step("Checking if active controller is controller-0......")
    assert "controller-0" in active_controller, "The active controller is not " \
                                                "controller-0. Make controller-0 " \
                                                "active before starting upgrade"

    force = False
    LOG.tc_step("Checking system health for upgrade .....")
    if check_system_health_query_upgrade[0] == 0:
        LOG.info("System health OK for upgrade......")
    elif check_system_health_query_upgrade[0] == 2:
        LOG.info(
            "System health indicate minor alarms; using --force option to start upgrade......"
        )
        force = True
    else:
        assert False, "System health query upgrade failed: {}".format(
            check_system_health_query_upgrade[1])

    LOG.info("Starting upgrade from release {} to target release {}".format(
        current_version, upgrade_version))
    upgrade_helper.system_upgrade_start(force=force)
    LOG.tc_step("upgrade started successfully......")

    # upgrade standby controller
    LOG.tc_step("Upgrading controller-1")
    upgrade_helper.upgrade_host("controller-1", lock=True)
    LOG.tc_step("Host controller-1 is upgraded successfully......")

    # unlock upgraded controller-1
    LOG.tc_step("Unlocking controller-1 after upgrade......")
    host_helper.unlock_host("controller-1",
                            available_only=True,
                            check_hypervisor_up=False)
    LOG.tc_step("Host controller-1 unlocked after upgrade......")

    time.sleep(60)
    # Before Swacting ensure the controller-1 is in available state
    if not system_helper.wait_for_host_values(
            "controller-1",
            timeout=360,
            fail_ok=True,
            operational=HostOperState.ENABLED,
            availability=HostAvailState.AVAILABLE):
        err_msg = " Swacting to controller-1 is not possible because controller-1 is not in available state " \
                  "within  the specified timeout"
        assert False, err_msg

    # Swact to standby controller-1
    LOG.tc_step("Swacting to controller-1 .....")
    rc, output = host_helper.swact_host(hostname="controller-0")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted and  controller-1 has become active......")
    time.sleep(60)
    # upgrade  controller-0
    LOG.tc_step("Upgrading  controller-0......")
    controller0 = lab['controller-0']

    LOG.info("Ensure controller-0 is provisioned before upgrade.....")
    upgrade_helper.ensure_host_provisioned(controller0.name)
    LOG.info("Host {} is provisioned for upgrade.....".format(
        controller0.name))

    # open vlm console for controller-0 for boot through mgmt interface
    LOG.info("Opening a vlm console for controller-0 .....")
    install_helper.open_vlm_console_thread("controller-0")

    LOG.info("Starting {} upgrade.....".format(controller0.name))
    upgrade_helper.upgrade_host(controller0.name, lock=True)
    LOG.info("controller-0 is upgraded successfully.....")

    # unlock upgraded controller-0
    LOG.tc_step("Unlocking controller-0 after upgrade......")
    host_helper.unlock_host(controller0.name, available_only=True)
    LOG.info("Host {} unlocked after upgrade......".format(controller0.name))
def test_system_upgrade(upgrade_setup, check_system_health_query_upgrade):
    """
    This test verifies the upgrade system using orchestration or manual (one node at a time) procedures. The system
    hosts are upgraded  in the order: controller-1, controller-0, storages, computes. Upgrading through orchestration or
    manual is selected through the argument option --orchestration. The standby controller-1 is always upgraded first
    using the manual upgrade regardless of the orchestration option. The remaining nodes are upgraded either one at a
    time or through orchestration depending on the option selected. The default is to use upgrade orchestration.  The
    --orchestration is specified in form: [<host personality>[:<number of hosts>]],  where:
        <personality>  is either compute or storage
        <number of hosts> is the number of hosts that are upgraded manually before using orchestration.
        e.g:
          --orchestration compute:1  -  do manual upgrade for controller-0, controller-1, all storages if exist
                                        and one compute,  the remaining computes are upgraded through orchestration.
          --orchestration default -  do manual upgrade for controller-1 and use orchestration for the rest of the nodes.
          --orchestration controller  - do manual upgrade for controller-1 and controller-0, the rest nodes are upgraded
                                        through orchestration.
          --orchestration storage:2   - use orchestration after 2 storages are upgraded manually.

    option specified during executing this test,  the system is upgraded
    Args:
        upgrade_setup:
        check_system_health_query_upgrade:

    Returns:

    """

    lab = upgrade_setup['lab']
    current_version = upgrade_setup['current_version']
    upgrade_version = upgrade_setup['upgrade_version']
    bld_server = upgrade_setup['build_server']
    collect_kpi = upgrade_setup['col_kpi']

    # orchestration = 'upgrade'
    man_upgrade_nodes = upgrade_setup['man_upgrade_nodes']
    orchestration_nodes = upgrade_setup['orchestration_nodes']
    system_upgrade_health = list(check_system_health_query_upgrade)
    missing_manifests = False
    force = False
    controller0 = lab['controller-0']
    if not upgrade_helper.is_host_provisioned(controller0.name):
        upgrade_helper.ensure_host_provisioned(controller0.name)
    # update health query
    # system_upgrade_health = list(upgrade_helper.get_system_health_query_upgrade())

    system_upgrade_health = list(upgrade_helper.get_system_health_query_upgrade_2())

    LOG.tc_step("Checking system health for upgrade .....")
    if system_upgrade_health[0] == 0:
        LOG.info("System health OK for upgrade......")
    elif system_upgrade_health[0] == 2:
        if system_upgrade_health[2] and "lock_unlock" in system_upgrade_health[2].keys():
            controller_nodes = system_upgrade_health[2]["lock_unlock"][0]
            LOG.info("Locking/Unlocking required for {} ......".format(controller_nodes))
            if 'controller-1' in controller_nodes:
                rc, output = upgrade_helper.upgrade_host_lock_unlock('controller-1')
                assert rc == 0, "Failed to lock/unlock host {}: {}".format('controller-1', output)
            if 'controller-0' in controller_nodes:
                rc, output = upgrade_helper.upgrade_host_lock_unlock('controller-0')
                assert rc == 0, "Failed to lock/unlock host {}: {}".format('controller-0', output)
                time.sleep(60)
                # system_upgrade_health[2]["swact"] = False
        if system_upgrade_health[2]["swact"][0]:
            LOG.info("Swact Required: {}".format(system_upgrade_health[2]["swact"][1]))
            host_helper.swact_host('controller-0')
            time.sleep(60)
            host_helper.swact_host('controller-1')
            time.sleep(60)
        if system_upgrade_health[2]["force_upgrade"][0]:
            LOG.info("{}; using --force option to start upgrade......"
                     .format(system_upgrade_health[2]["force_upgrade"][1]))
            force = True

    else:
        assert False, "System health query upgrade failed: {}".format(system_upgrade_health[1])

    #
    #
    # LOG.tc_step("Checking system health for upgrade .....")
    # if system_upgrade_health[0] == 0:
    #     LOG.info("System health OK for upgrade......")
    # if system_upgrade_health[0] == 1:
    #     assert False, "System health query upgrade failed: {}".format(system_upgrade_health[1])
    #
    # if system_upgrade_health[0] == 4 or system_upgrade_health[0] == 2:
    #     LOG.info("System health indicate missing manifests; lock/unlock controller-0 to resolve......")
    #     missing_manifests = True
    #
    # if system_upgrade_health[0] == 3 or system_upgrade_health[0] == 2:
    #
    #     LOG.info("System health indicate minor alarms; using --force option to start upgrade......")
    #     force = True
    #
    # if missing_manifests:
    #     LOG.info("Locking/Unlocking to resolve missing manifests in controller......")
    #
    #     lock_unlock_hosts = []
    #     if any("controller-1" in k for k in check_system_health_query_upgrade[1].keys()):
    #         lock_unlock_hosts.append('controller-1')
    #     if any("controller-0" in k for k in check_system_health_query_upgrade[1].keys()):
    #         lock_unlock_hosts.append('controller-0')
    #
    #     for host in lock_unlock_hosts:
    #         rc, output = upgrade_helper.upgrade_host_lock_unlock(host)
    #         assert rc == 0, "Failed to lock/unlock host {}: {}".format(host, output)

    upgrade_init_time = str(datetime.now())

    LOG.tc_step("Starting upgrade from release {} to target release {}".format(current_version, upgrade_version))
    current_state = upgrade_helper.get_upgrade_state()
    if "No upgrade in progress" in current_state:
        upgrade_helper.system_upgrade_start(force=force)
        LOG.info("upgrade started successfully......")
    elif "started" in current_state:
        LOG.info("upgrade already started ......")
    else:
        LOG.info("upgrade is already in state {} please continue manual upgrade ......".format(current_state))
        assert False, "upgrade is already in state {} please continue manual upgrade ......".format(current_state)
    time.sleep(60)
    # upgrade standby controller
    LOG.tc_step("Upgrading controller-1")
    upgrade_helper.upgrade_controller('controller-1')

    time.sleep(60)

    # Swact to standby controller-1
    LOG.tc_step("Swacting to controller-1 .....")
    # Before Swacting ensure the controller-1 is in available state
    if not system_helper.wait_for_host_values("controller-1", timeout=900, fail_ok=True,
                                              operational=HostOperState.ENABLED,
                                              availability=HostAvailState.AVAILABLE):
        err_msg = " Swacting to controller-1 is not possible because controller-1 is not in available state " \
                  "within  the specified timeout"
        assert False, err_msg

    if collect_kpi:
        upgrade_helper.collected_upgrade_controller1_kpi(lab, collect_kpi, init_time=upgrade_init_time)

    rc, output = host_helper.swact_host(hostname="controller-0")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted and  controller-1 has become active......")
    time.sleep(120)
    # active_controller = system_helper.get_active_controller_name()

    if 'controller-1' in man_upgrade_nodes:
        man_upgrade_nodes.remove('controller-1')
    if len(man_upgrade_nodes) > 0:
        upgrade_helper.manual_upgrade_hosts(manual_nodes=man_upgrade_nodes)
    if len(orchestration_nodes) > 0:
        upgrade_helper.orchestration_upgrade_hosts(upgraded_hosts=man_upgrade_nodes,
                                                   orchestration_nodes=orchestration_nodes)
    if collect_kpi:
        if len(orchestration_nodes) > 0:
            upgrade_helper.collect_upgrade_orchestration_kpi(lab, collect_kpi)
        else:
            if upgrade_setup['cpe']:
                upgrade_helper.collected_upgrade_controller0_kpi(lab, collect_kpi)

    # Activate the upgrade
    LOG.tc_step("Activating upgrade....")
    upgrade_helper.activate_upgrade()
    LOG.info("Upgrade activate complete.....")

    # Make controller-0 the active controller
    # Swact to standby controller-0
    LOG.tc_step("Making controller-0 active.....")
    rc, output = host_helper.swact_host(hostname="controller-1")
    assert rc == 0, "Failed to swact: {}".format(output)
    LOG.info("Swacted to controller-0 ......")

    # Complete upgrade
    LOG.tc_step("Completing upgrade from  {} to {}".format(current_version, upgrade_version))
    upgrade_helper.complete_upgrade()
    LOG.info("Upgrade is complete......")

    LOG.info("Lab: {} upgraded successfully".format(lab['name']))

    # Delete the previous load
    LOG.tc_step("Deleting  {} load... ".format(current_version))
    upgrade_helper.delete_imported_load()
    LOG.tc_step("Delete  previous load version {}".format(current_version))

    LOG.tc_step("Downloading images to upgraded {} lab ".format(upgrade_version))
    install_helper.download_image(lab, bld_server, BuildServerPath.GUEST_IMAGE_PATHS[upgrade_version])

    load_path = UpgradeVars.get_upgrade_var('TIS_BUILD_DIR')
    LOG.tc_step("Downloading heat templates to upgraded {} lab ".format(upgrade_version))
    install_helper.download_heat_templates(lab, bld_server, load_path)

    LOG.tc_step("Downloading lab config scripts to upgraded {} lab ".format(upgrade_version))
    install_helper.download_lab_config_files(lab, bld_server, load_path)