예제 #1
0
def CopyDhcpServerCfg(remote_node):
    api.Logger.info("Copy DHCP server config.")
    dhcp_cfg = GetDhcpCfgLocation()
    api.Logger.info(
        "Copying {} TO IP {}".format(
            dhcp_cfg,
            api.GetMgmtIPAddress(remote_node)))
    cmd = "sshpass -p docker ssh -o StrictHostKeyChecking=no root@{} rm -f /etc/dhcp/dhcpd.conf".format(
        api.GetMgmtIPAddress(remote_node))
    RunLocalCommand(cmd)
    cmd = "sshpass -p docker scp -o StrictHostKeyChecking=no {} root@{}:/etc/dhcp/dhcpd.conf".format(
        dhcp_cfg, api.GetMgmtIPAddress(remote_node))
    RunLocalCommand(cmd)
예제 #2
0
def __readd_classic_workloads(target_node=None):
    req = topo_svc.WorkloadMsg()
    req.workload_op = topo_svc.ADD
    for wl in api.GetWorkloads():
        if target_node and target_node != wl.node_name:
            api.Logger.debug("Skipping add classic workload for node %s" %
                             wl.node_name)
            continue
        wl_msg = req.workloads.add()
        intf = wl_msg.interfaces.add()
        intf.ip_prefix = wl.ip_prefix
        intf.ipv6_prefix = wl.ipv6_prefix
        intf.mac_address = wl.mac_address
        intf.encap_vlan = wl.encap_vlan
        intf.uplink_vlan = wl.uplink_vlan
        wl_msg.workload_name = wl.workload_name
        wl_msg.node_name = wl.node_name
        intf.pinned_port = wl.pinned_port
        intf.interface_type = wl.interface_type
        intf.interface = wl.parent_interface
        intf.parent_interface = wl.parent_interface
        intf.device_name = wl.device_name
        wl_msg.workload_type = wl.workload_type
        wl_msg.workload_image = wl.workload_image
        wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)
    if len(req.workloads):
        resp = api.AddWorkloads(req)
        if resp is None:
            return api.types.status.FAILURE
    return api.types.status.SUCCESS
예제 #3
0
def __add_workloads(redirect_port):

    req = topo_svc.WorkloadMsg()
    req.workload_op = topo_svc.ADD

    for ep in config_api.GetEndpoints():
        wl_msg = req.workloads.add()
        # Make the workload_name unique across nodes by appending node-name
        wl_msg.workload_name = ep.name + ep.node_name
        wl_msg.node_name = ep.node_name
        intf = wl_msg.interfaces.add()
        if not ep.vnic.DhcpEnabled:
            intf.ip_prefix = ep.ip_addresses[0]
            intf.sec_ip_prefix.extend(ep.ip_addresses[1:])
        # wl_msg.ipv6_prefix = ep.ip_addresses[1]
        intf.mac_address = ep.macaddr
        if ep.vlan != 0:
            intf.interface_type = topo_svc.INTERFACE_TYPE_VSS
        else:
            intf.interface_type = topo_svc.INTERFACE_TYPE_NONE
        intf.encap_vlan = ep.vlan
        interface = ep.interface
        if interface != None: intf.interface = interface
        intf.parent_interface = intf.interface
        wl_msg.workload_type = api.GetWorkloadTypeForNode(wl_msg.node_name)
        wl_msg.workload_image = api.GetWorkloadImageForNode(wl_msg.node_name)
        wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)
        if redirect_port:
            _add_exposed_ports(wl_msg)
        api.Logger.info(f"Workload {wl_msg.workload_name} "
                        f"Node {wl_msg.node_name} Intf {intf.interface} Parent-Intf {intf.parent_interface} "
                        f"IP {intf.ip_prefix} MAC {intf.mac_address} "
                        f"VLAN {intf.encap_vlan}")
    if len(req.workloads):
        api.Logger.info("Adding %d Workloads" % len(req.workloads))
        resp = api.AddWorkloads(req, skip_bringup=api.IsConfigOnly())
        if resp is None:
            sys.exit(1)

        dhcp_wl_list = []
        for ep in config_api.GetEndpoints():
            workload_name = ep.name + ep.node_name
            wl = api.GetWorkloadByName(workload_name)
            if wl is None:
                return api.types.status.CRITICAL

            wl.vnic = ep.vnic
            if wl.vnic.DhcpEnabled:
                dhcp_wl_list.append(wl)
                wl.ip_prefix = ep.ip_addresses[0]
                wl.ip_address = wl.ip_prefix.split('/')[0]
                wl.sec_ip_prefixes = []
                wl.sec_ip_addresses = []
                for secip in ep.ip_addresses[1:]:
                    wl.sec_ip_prefixes.append(secip)
                    wl.sec_ip_addresses.append(secip.split('/')[0])

        if len(dhcp_wl_list):
            if not dhcp_utils.AcquireIPFromDhcp(dhcp_wl_list):
                return api.types.status.CRITICAL
예제 #4
0
def __readd_classic_workloads(target_node = None, workloads = []):

    req = topo_svc.WorkloadMsg()
    req.workload_op = topo_svc.ADD

    workloads = workloads if workloads else api.GetWorkloads()
    for wl in workloads:
        if target_node and target_node != wl.node_name:
            api.Logger.debug("Skipping add classic workload for node %s" % wl.node_name)
            continue

        wl_msg = req.workloads.add()
        intf = wl_msg.interfaces.add()
        intf.ip_prefix = wl.ip_prefix
        intf.ipv6_prefix = wl.ipv6_prefix
        intf.sec_ip_prefix.extend(wl.sec_ip_prefixes)
        intf.mac_address = wl.mac_address
        intf.encap_vlan = wl.encap_vlan
        intf.uplink_vlan = wl.uplink_vlan
        wl_msg.workload_name = wl.workload_name
        wl_msg.node_name = wl.node_name
        intf.pinned_port = wl.pinned_port
        intf.interface_type = wl.interface_type
        # Interface to be set to parent intf in vlan case, same as workloads created first time
        interface = wl.parent_interface
        if interface != None: intf.interface = interface
        intf.parent_interface = wl.parent_interface
        wl_msg.workload_type = wl.workload_type
        wl_msg.workload_image = wl.workload_image
        wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)

    if len(req.workloads):
        resp = api.AddWorkloads(req, skip_store=True)
        if resp is None:
            sys.exit(1)
예제 #5
0
    def CreateTaggedWorkloads(self, req):

        for device_name, interfaces in self.__interfaces.items():
            for subif_indx in range(self.__num_subifs):
                wl_msg = req.workloads.add()
                intf = wl_msg.interfaces.add()
                intfObj = interfaces[subif_indx % len(
                    interfaces
                )]  # TODO enhance for unequal distribution of workloads
                nw_spec = self.GetSubInterfaceNetworkSpec(
                    device_name[-1:], subif_indx, self.__wlSpec.spec)
                ipv4_allocator = nw_spec.GetPrimaryIPv4Allocator(
                )  # ipv4_allocators[i]
                ipv6_allocator = nw_spec.GetPrimaryIPv6Allocator(
                )  # ipv6_allocators[i]
                vlan = nw_spec.GetVLAN()  # vlans[i]
                ip4_addr_str = str(ipv4_allocator.Alloc())
                ip6_addr_str = str(ipv6_allocator.Alloc())
                intf.ip_prefix = ip4_addr_str + "/" + str(
                    nw_spec.GetIPv4PrefixLength())
                intf.ipv6_prefix = ip6_addr_str + "/" + str(
                    nw_spec.GetIPv6PrefixLength())
                intf.mac_address = nw_spec.GetClassicMacAllocator().Alloc(
                ).get()
                intf.encap_vlan = vlan
                intf.uplink_vlan = intf.encap_vlan
                wl_msg.node_name = self.GetNodeName()
                intf.pinned_port = 1
                intf.interface_type = intfObj.InterfaceType
                # node_intf = node_ifs[wl.node][int(intfObj.replace('host_if', '')) - 1]
                if api.GetWorkloadTypeForNode(
                        self.GetNodeName()) == topo_svc.WorkloadType.Value(
                            'WORKLOAD_TYPE_BARE_METAL'):
                    intf.interface = intfObj.HostInterface + "_" + str(vlan)
                    wl_msg.workload_name = self.GetNodeName(
                    ) + "_" + intfObj.HostInterface + "_subif_" + str(vlan)
                else:
                    intf.interface = intfObj.HostInterface + "_mv%d" % (
                        subif_indx + 1)
                    wl_msg.workload_name = self.GetNodeName(
                    ) + "_" + intfObj.HostInterface + "_mv%d" % (subif_indx +
                                                                 1)
                intf.parent_interface = intfObj.ParentHostInterface
                intf.interface_index = intfObj.InterfaceIndex
                intf.device_name = device_name
                wl_msg.workload_type = api.GetWorkloadTypeForNode(
                    self.GetNodeName())
                wl_msg.workload_image = api.GetWorkloadImageForNode(
                    self.GetNodeName())
                wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)
                self.__add_exposed_ports(wl_msg)
                for a in nw_spec.GetSecondaryIpv4Allocator(
                ):  # sec_ipv4_allocators[subif_indx]:
                    wl_msg.sec_ip_prefix.append(
                        str(next(a)) + "/" + str(nw_spec.ipv4.prefix_length))
                for a in nw_spec.GetSecondaryIpv6Allocator(
                ):  # sec_ipv6_allocators[subif_indx]:
                    wl_msg.sec_ipv6_prefix.append(
                        str(next(a)) + "/" + str(nw_spec.ipv6.prefix_length))
예제 #6
0
def Setup(tc):

    # parse iterator args
    # parse_args(tc)

    # skip some iterator cases
    if skip_curr_test(tc):
        return api.types.status.SUCCESS

    # node init
    tc.tester_node = None
    tc.tester_node_name = None
    tc.dut_node = None

    # init response list
    tc.resp = []

    workloads = api.GetWorkloads()
    if len(workloads) == 0:
        api.Logger.error('No workloads available')
        return api.types.status.FAILURE

    # initialize tester-node and dut-node.
    tc.nodes = api.GetNodes()
    for node in tc.nodes:
        if api.GetNicType(node.Name()) == 'intel':
            tc.tester_node = node
            tc.tester_node_name = node.Name()
            tc.tester_node_mgmt_ip = api.GetMgmtIPAddress(node.Name())
            api.Logger.info('tester node: %s mgmt IP: %s' %
                            (node.Name(), tc.tester_node_mgmt_ip))
        else:
            tc.dut_node = node
            tc.dut_node_mgmt_ip = api.GetMgmtIPAddress(node.Name())
            api.Logger.info('dut node: %s mgmt IP: %s' %
                            (node.Name(), tc.dut_node_mgmt_ip))

    return api.types.status.SUCCESS
예제 #7
0
def RebootHost(n):
    nodes = []
    nodes.append(n)
    api.Logger.info("Rebooting Host {}".format(n))
    ret = api.RestartNodes(nodes)
    # Loop to check if the card is reachable
    reqPing = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd = "ping -c 2 " + api.GetMgmtIPAddress(n)
    api.Trigger_AddHostCommand(reqPing, n, cmd)
    retryCount = 0

    while retryCount < 6:
        retryCount = retryCount + 1
        respPing = api.Trigger(reqPing)
        if len(respPing.commands) != 1:
            continue

        if respPing.commands[0].exit_code == 0:
            break

        api.Logger.info(
            "Ping to the host failed. Ping Response {}".format(respPing))
        time.sleep(10)

    if retryCount == 6:
        api.Logger.info(
            "Nodes were unreachable after reboot. {}".format(nodes))
        api.types.status.FAILURE

    api.Logger.info("Successfully rebooted host {}".format(n))
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    enable_sshd = "system enable-sshd"
    copy_key = "update ssh-pub-key -f ~/.ssh/id_rsa.pub"
    for n in nodes:
        # hack for now, need to set date
        AddPenctlCommand(req, n, enable_sshd)
        AddPenctlCommand(req, n, copy_key)

    resp = api.Trigger(req)

    return api.types.status.SUCCESS
예제 #8
0
    def CreateNativeWorkloads(self, req):
        for device_name, interfaces in self.__interfaces.items():
            nic_hint = api.GetDeviceHint(self.__node, device_name)
            for intfObj in interfaces:
                wl_msg = req.workloads.add()
                intf = wl_msg.interfaces.add()
                nw_spec = self.GetNativeNetworkSpec(nic_hint,
                                                    intfObj.LogicalInterface,
                                                    self.__wlSpec.spec)
                ipv4_allocator = nw_spec.GetPrimaryIPv4Allocator()
                ipv6_allocator = nw_spec.GetPrimaryIPv6Allocator()
                vlan = 0
                ip4_addr_str = str(ipv4_allocator.Alloc())
                ip6_addr_str = str(ipv6_allocator.Alloc())

                intf.ip_prefix = ip4_addr_str + "/" + str(
                    nw_spec.GetIPv4PrefixLength())
                intf.ipv6_prefix = ip6_addr_str + "/" + str(
                    nw_spec.GetIPv6PrefixLength())
                # intf.mac_address = nw_spec.GetClassicMacAllocator().Alloc().get()
                intf.encap_vlan = vlan
                intf.uplink_vlan = intf.encap_vlan
                wl_msg.workload_name = self.GetNodeName(
                ) + "_" + intfObj.HostInterface + "_subif_" + str(vlan)
                wl_msg.node_name = self.GetNodeName()  # wl.node
                intf.pinned_port = 1
                intf.interface_type = intfObj.InterfaceType
                intf.interface = intfObj.HostInterface
                intf.parent_interface = intfObj.ParentHostInterface
                intf.interface_index = intfObj.InterfaceIndex
                intf.device_name = device_name
                wl_msg.workload_type = api.GetWorkloadTypeForNode(
                    self.GetNodeName())
                wl_msg.workload_image = api.GetWorkloadImageForNode(
                    self.GetNodeName())
                wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)
                self.__add_exposed_ports(wl_msg)
예제 #9
0
def NodeInit(n):
    cmd = "sshpass -p docker ssh -o StrictHostKeyChecking=no root@{} /naples/nodeinit.sh".format(
        api.GetMgmtIPAddress(n))
    RunLocalCommand(cmd)
    return api.types.status.SUCCESS
예제 #10
0
def AddConfigWorkloads(req, nodes=None):
    mva = api.GetMultiVlanAllocators()
    third_party_workload_count = 0
    ep_objs = netagent_api.QueryConfigs(kind='Endpoint')
    ep_ref = None
    for ep in ep_objs:
        node_name = getattr(ep.spec, "_node_name", None)
        if not node_name:
            node_name = ep.spec.node_uuid
        if nodes and node_name not in nodes:
            api.Logger.debug("Skipping add workload for node %s" % node_name)
            continue
        if not api.IsNaplesNode(node_name):
            #if api.GetConfigNicMode() == 'hostpin' and third_party_workload_count > 0:
            #    continue
            third_party_workload_count += 1
        req.workload_op = topo_svc.ADD
        wl_msg = req.workloads.add()
        wl_msg.workload_name = ep.meta.name
        wl_msg.node_name = node_name
        intf = wl_msg.interfaces.add()
        intf.ip_prefix = __prepare_ip_address_str_for_endpoint(ep)
        #wl_msg.ipv6_prefix = __prepare_ipv6_address_str_for_endpoint(ep)
        intf.mac_address = ep.spec.mac_address
        intf.pinned_port = 1
        intf.interface_type = topo_svc.INTERFACE_TYPE_VSS

        encap_vlan = getattr(ep.spec, 'useg_vlan', None)
        host_if = None
        if api.GetTestbedNicMode(node_name) == 'hostpin':
            host_if = api.AllocateHostInterfaceForNode(wl_msg.node_name)
            intf.uplink_vlan = __get_l2segment_vlan_for_endpoint(ep)
            if api.GetNicType(wl_msg.node_name) in ['pensando', 'naples']:
                intf.encap_vlan = encap_vlan if encap_vlan else intf.uplink_vlan
            else:
                intf.encap_vlan = intf.uplink_vlan
        elif api.GetTestbedNicMode() == 'hostpin_dvs':
            host_if = api.AllocateHostInterfaceForNode(wl_msg.node_name)
            intf.interface_type = topo_svc.INTERFACE_TYPE_DVS_PVLAN
            intf.switch_name = api.GetVCenterDVSName()
            intf.uplink_vlan = __get_l2segment_vlan_for_endpoint(ep)
            if api.GetNicType(wl_msg.node_name) in ['pensando', 'naples']:
                intf.encap_vlan = ep.spec.primary_vlan
                intf.secondary_encap_vlan = ep.spec.secondary_vlan
            else:
                intf.encap_vlan = intf.uplink_vlan

        elif api.GetTestbedNicMode(node_name) == 'classic':
            global classic_vlan_map
            if mva:
                node_vlan = api.Testbed_AllocateVlan()
            else:
                node_vlan = classic_vlan_map.get(node_name)
            if not node_vlan:
                node_vlan = NodeVlan(node_name)
                classic_vlan_map[node_name] = node_vlan
            #Allocate only if useg_vlan present.
            host_if, wire_vlan = node_vlan.AllocateHostIntfWireVlan(encap_vlan)
            if not node_vlan.IsNativeVlan(wire_vlan):
                #Set encap vlan if its non native.
                intf.encap_vlan = wire_vlan
                intf.uplink_vlan = wire_vlan

        elif api.GetTestbedNicMode(node_name) == 'unified':
            host_if = api.AllocateHostInterfaceForNode(wl_msg.node_name)
            intf.ipv6_prefix = __prepare_ipv6_address_str_for_endpoint(ep)
            intf.uplink_vlan = __get_l2segment_vlan_for_endpoint(ep)
            intf.encap_vlan = intf.uplink_vlan

        else:
            return api.types.status.FAILURE

        intf.interface = host_if
        intf.parent_interface = host_if

        wl_msg.workload_type = api.GetWorkloadTypeForNode(wl_msg.node_name)
        wl_msg.workload_image = api.GetWorkloadImageForNode(wl_msg.node_name)
        wl_msg.mgmt_ip = api.GetMgmtIPAddress(wl_msg.node_name)
        wl_msg.cpus = api.GetWorkloadCpusForNode(wl_msg.node_name)
        wl_msg.memory = api.GetWorkloadMemoryForNode(wl_msg.node_name)
    return api.types.status.SUCCESS
예제 #11
0
def Setup(tc):

    # parse iterator args
    # parse_args(tc)

    # skip some iterator cases
    if skip_curr_test(tc):
        return api.types.status.SUCCESS

    # node init
    tc.tester_node = None
    tc.tester_node_name = None
    tc.dut_node = None

    # init response list
    tc.resp = []

    workloads = api.GetWorkloads()
    if len(workloads) == 0:
        api.Logger.error('No workloads available')
        return api.types.status.FAILURE

    # initialize tester-node and dut-node.
    tc.nodes = api.GetNodes()
    for node in tc.nodes:
        if api.GetNicType(node.Name()) == 'intel':
            tc.tester_node = node
            tc.tester_node_name = node.Name()
            tc.tester_node_mgmt_ip = api.GetMgmtIPAddress(node.Name())
            api.Logger.info('tester node: %s mgmt IP: %s' %
                            (node.Name(), tc.tester_node_mgmt_ip))
        else:
            tc.dut_node = node
            tc.dut_node_mgmt_ip = api.GetMgmtIPAddress(node.Name())
            api.Logger.info('dut node: %s mgmt IP: %s' %
                            (node.Name(), tc.dut_node_mgmt_ip))

    # create tar.gz file of dpdk and dpdk-test
    sdk_fullpath = api.GetTopDir() + SDK_SRC_PATH
    dpdk_tar_path = api.GetTopDir() + DPDK_TAR_FILE

    tar = tarfile.open(dpdk_tar_path, "w:gz")
    os.chdir(sdk_fullpath)
    tar.add("dpdk")
    os.chdir("dpdk-test")
    for name in os.listdir("."):
        tar.add(name)
    tar.close()

    api.Logger.info("dpdk-test tarfile location is: " + dpdk_tar_path)

    api.Logger.info("Configuring DTS on " + tc.tester_node_mgmt_ip)

    # copy dpdk-test.tar.gz to tester node.
    api.CopyToHost(tc.tester_node.Name(), [dpdk_tar_path], "")

    # untar dpdk-test.tar.gz and configure tester to run DTS
    req = api.Trigger_CreateExecuteCommandsRequest()
    trig_cmd1 = "tar -xzvf dpdk-test.tar.gz"
    trig_cmd2 = "scripts/config_tester.sh %s %s" % (tc.dut_node_mgmt_ip,
                                                    tc.tester_node_mgmt_ip)
    api.Trigger_AddHostCommand(req,
                               tc.tester_node.Name(),
                               trig_cmd1,
                               timeout=60)
    api.Trigger_AddHostCommand(req,
                               tc.tester_node.Name(),
                               trig_cmd2,
                               timeout=60)
    trig_resp = api.Trigger(req)
    tc.resp.append(trig_resp)

    # disable internal mnic
    cmd = "ifconfig inb_mnic0 down && ifconfig inb_mnic1 down"
    resp = api.RunNaplesConsoleCmd(tc.dut_node.Name(), cmd)

    return api.types.status.SUCCESS
예제 #12
0
def Trigger(tc):

    cimc_info = tc.test_node.GetCimcInfo()

    cimc_ip_address = cimc_info.GetIp()
    cimc_username = cimc_info.GetUsername()
    cimc_password = cimc_info.GetPassword()

    host_ipaddr = api.GetMgmtIPAddress(tc.test_node_name)


    if reboot.checkLinks(tc, tc.test_node_name) is  api.types.status.FAILURE:
        api.Logger.error("Error verifying uplink interfaces")
        return api.types.status.FAILURE

    for install in range(tc.args.install_iterations):

        # save
        api.Logger.info(f"Saving node: {tc.test_node_name}")
        if api.SaveIotaAgentState([tc.test_node_name]) == api.types.status.FAILURE:
            raise OfflineTestbedException

        # touch the file on server to ensure this instance of OS is gone later

        req = api.Trigger_CreateExecuteCommandsRequest()
        touch_file_cmd = "touch /naples/oldfs"
        api.Trigger_AddHostCommand(req, tc.test_node_name, touch_file_cmd)
        resp = api.Trigger(req)

        if api.Trigger_IsSuccess(resp) is not True:
            api.Logger.error(f"Failed to run command on host {tc.test_node_name}, {touch_file_cmd}")
            return api.types.status.FAILURE

        # Boot from PXE to intall an OS
        api.Logger.info(f"Starting PXE Install Loop # {install} on {tc.test_node_name}")
        cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev pxe options=efiboot" %\
              (cimc_ip_address, cimc_username, cimc_password)
        subprocess.check_call(cmd, shell=True)
        time.sleep(5)

        # reboot server
        cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\
              (cimc_ip_address, cimc_username, cimc_password)
        subprocess.check_call(cmd, shell=True)
        time.sleep(180)

        # wait for installation to finish and server to come back
        api.Logger.info(f"Waiting for host to come up: {host_ipaddr}")
        if not waitforssh(host_ipaddr):
            raise OfflineTestbedException

        # Boot from HDD to run the test
        api.Logger.info(f"Setting Boot Order to HDD and rebooting {tc.test_node_name}")
        cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev disk options=efiboot" %\
              (cimc_ip_address, cimc_username, cimc_password)
        subprocess.check_call(cmd, shell=True)
        time.sleep(5)

        #reboot server
        cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\
              (cimc_ip_address, cimc_username, cimc_password)
        subprocess.check_call(cmd, shell=True)

        api.Logger.info(f"Waiting for host to come up: {host_ipaddr}")
        time.sleep(180)

        if not waitforssh(host_ipaddr):
            raise OfflineTestbedException

        # restore
        api.Logger.info(f"Restoring node: {tc.test_node_name}")
        resp = api.ReInstallImage(fw_version=None, dr_version="latest")
        if resp != api.types.status.SUCCESS:
            api.Logger.error(f"Failed to install images on the testbed")
            raise OfflineTestbedException

        resp = api.RestoreIotaAgentState([tc.test_node_name])
        if resp != api.types.status.SUCCESS:
            api.Logger.error(f"Failed to restore agent state after PXE install")
            raise OfflineTestbedException
        api.Logger.info(f"PXE install iteration #{install} - SUCCESS")

        try:
            wl_api.ReAddWorkloads(tc.test_node_name)
        except:
            api.Logger.error(f"ReaddWorkloads failed with exception - See logs for details")
            return api.types.status.FAILURE

        # check touched file is not present, to ensure this is a new OS instance
        oldfs_command = "ls /naples/oldfs"
        req = api.Trigger_CreateExecuteCommandsRequest()
        api.Trigger_AddHostCommand(req, tc.test_node_name, oldfs_command)
        resp = api.Trigger(req)

        if api.IsApiResponseOk(resp) is not True:
            api.Logger.error(f"Failed to run command on host {tc.test_node_name} {oldfs_command}")
            return api.types.status.FAILURE

        cmd = resp.commands.pop()
        if cmd.exit_code == 0:
            api.Logger.error(f"Old file is present in FS after PXE install")
            return api.types.status.FAILURE

        api.Logger.info("PXE boot completed! Host is up.")

    return api.types.status.SUCCESS