Example #1
0
def Teardown(tc):
    # adding sleep to make sure last occurance of vmotion is updated in old(src) node
    time.sleep(5)
    if tc.GetStatus() != api.types.status.SUCCESS:
        api.Logger.info("verify failed, returning without teardown")
        return tc.GetStatus()
    vm_threads = []
    for wl_info in tc.move_info:
        # if new_node and old_node handles exists, vmotion trigger happend
        # cleanup and restore to as per DB in iota
        if (wl_info.new_node and wl_info.old_node):
            vm_thread = threading.Thread(target=triggerVmotion,
                                         args=(
                                             tc,
                                             wl_info.wl,
                                             wl_info.old_node,
                                         ))
            vm_threads.append(vm_thread)
            vm_thread.start()
            if (api.IsNaplesNode(wl_info.old_node)):
                create_ep_info(tc, wl_info.wl, wl_info.old_node, "START",
                               wl_info.new_node)
    for vm_thread in vm_threads:
        vm_thread.join()
    for wl_info in tc.move_info:
        vm_utils.update_ep_migr_status(tc, wl_info.wl, wl_info.old_node,
                                       "DONE")
        if (api.IsNaplesNode(wl_info.new_node)):
            delete_ep_info(tc, wl_info.wl, wl_info.new_node)
    return api.types.status.SUCCESS
Example #2
0
def Setup(tc):
    vm_threads = []
    node_list = []
    node = getattr(tc.args, "node", None)
    if node:
        node_list.append(node)
    else:
        '''
        add all nodes in the topo
        '''
        nodes = api.GetNodes()
        for node in nodes:
            node_list.append(node.Name())
    tc.uuidMap = api.GetNaplesNodeUuidMap()
    for node in node_list:
        (wls, new_node) = getWorkloadsToRemove(tc, node)
        for wl in wls:
            api.Logger.info("Moving wl {} from node {} to node {}".format(
                wl.workload_name, wl.node_name, new_node))
            vm_thread = threading.Thread(target=triggerVmotion,
                                         args=(
                                             tc,
                                             wl,
                                             new_node,
                                         ))
            vm_threads.append(vm_thread)
            vm_thread.start()
            if (api.IsNaplesNode(new_node)):
                create_ep_info(tc, wl, new_node, "START", node)
        for vm_thread in vm_threads:
            vm_thread.join()
        for wl in wls:
            if (api.IsNaplesNode(node)):
                delete_ep_info(tc, wl, node)
    return api.types.status.SUCCESS
Example #3
0
def Verify(tc):
    nodes = api.GetWorkloadNodeHostnames()
    if api.IsNaplesNode(nodes[0]) and api.IsNaplesNode(nodes[1]):
        return api.types.status.DISABLED
    if tc.resp is None:
        return api.types.status.FAILURE

    file1 = tc.GetLogsDir() + '/tcp_proxy_client.dat'
    file2 = tc.GetLogsDir() + '/tcp_proxy_server.dat'
    if not filecmp.cmp(file1, file2, shallow=False):
        api.Logger.error("Client and server files do not match")
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS
    cookie_idx = 0
    api.Logger.info("NC Results for %s" % (tc.cmd_descr))
    for cmd in tc.resp.commands:
        api.Logger.info("%s" % (tc.cmd_cookies[cookie_idx]))
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd):
            #TOOD nc seems to be stuck sometimes, have to debug this
            #result = api.types.status.FAILURE
            pass
        cookie_idx += 1
    return result
Example #4
0
def getNativeWorkloads():
    # Get all available host interfaces
    lst_nodes = api.GetWorkloadNodeHostnames()
    lst_native_if = {}
    mgmt_intf = {}

    # Get host interfaces on all nodes
    for node in lst_nodes:
        lst_native_if[node] = list(api.GetWorkloadNodeHostInterfaces(node))
        # Exclude host side management interface from this test case on naples
        if api.IsNaplesNode(node):
            mgmt_intf[node] = list(
                naples_utils.GetHostInternalMgmtInterfaces(node))

    for node, infs in mgmt_intf.items():
        for inf in infs:
            if inf in lst_native_if[node]:
                lst_native_if[node].remove(inf)

    # Get workloads corresponding to host interfaces
    total_workloads = api.GetWorkloads()
    native_workloads = []
    for w1 in total_workloads:
        for node, infs in lst_native_if.items():
            if w1.interface in infs and w1.node_name == node:
                native_workloads.append(w1)

    return native_workloads
Example #5
0
def Setup(tc):
    host = None
    for _node in api.GetNaplesHostnames():
        if api.IsNaplesNode(_node) and api.GetNodeOs(_node) == "linux":
            host = _node
            break

    if not host:
        api.Logger.error("Unable to find a Naples node with linux os")
        return api.types.status.ERROR

    tc.host = host
    tc.pf = api.GetNaplesHostInterfaces(host)[0]
    tc.num_vfs = GetSupportedVFs(tc.host, tc.pf)
    api.Logger.info("Host %s PF %s supports %d VFs" %
                    (tc.host, tc.pf, tc.num_vfs))

    if tc.num_vfs == 0:
        api.Logger.warn(
            "Max supported VFs on host %s is 0, expected non-zero" % host)
        return api.types.status.ERROR

    if CreateVFs(tc.host, tc.pf, tc.num_vfs) != api.types.status.SUCCESS:
        return api.types.status.ERROR

    return api.types.status.SUCCESS
Example #6
0
def UpdateNodeUuidEndpoints(objects):
    #allocate pvlan pair for dvs
    if api.GetConfigNicMode() == 'hostpin_dvs':
        pvlan_start = api.GetPVlansStart()
        pvlan_end = api.GetPVlansEnd()
        index = 0
        for obj in objects:
            obj.spec.primary_vlan = pvlan_start + index
            obj.spec.secondary_vlan = pvlan_start + index + 1
            obj.spec.useg_vlan = obj.spec.primary_vlan
            index = index + 2
            if index > pvlan_end:
                assert (0)

    agent_uuid_map = api.GetNaplesNodeUuidMap()
    for ep in objects:
        node_name = getattr(ep.spec, "_node_name", None)
        if not node_name:
            node_name = ep.spec.node_uuid
        assert (node_name)
        if api.IsNaplesNode(node_name):
            ep.spec.node_uuid = formatMac(GetNaplesUUID(node_name))
        else:
            ep.spec.node_uuid = agent_uuid_map[node_name]
        ep.spec._node_name = node_name
Example #7
0
def create_ep_info(tc, wl, dest_node, migr_state, src_node):
    # get a naples handle to move to
    ep_filter = "meta.name=" + wl.workload_name + ";"
    if not hasattr(tc, 'dsc_conn_type'):
       api.Logger.info(" seeing dsc_conn_type to oob")
       tc.dsc_conn_type = 'oob'  
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert(len(objects) == 1)
    object                          = copy.deepcopy(objects[0])
    # delete endpoint being moved on new host, TEMP
    agent_api.DeleteConfigObjects([object], [dest_node], ignore_error=True)

    # sleep to let delete cleanup all sessions/handles
    time.sleep(1)

    object.spec.node_uuid           = tc.uuidMap[dest_node]
    object.spec.migration           = migr_state 
    if (api.IsNaplesNode(src_node)):
        object.status.node_uuid         = tc.uuidMap[src_node]
        if (tc.dsc_conn_type == "oob"):
            object.spec.homing_host_address = api.GetNicMgmtIP(src_node)
        else:
            object.spec.homing_host_address = api.GetBondIp(src_node)
    else:
        object.status.node_uuid         = "0011.2233.4455"  # TEMP
        object.spec.homing_host_address = "169.169.169.169" # TEMP
    # this triggers endpoint on new host(naples) to setup flows
    agent_api.PushConfigObjects([object], [dest_node], ignore_error=True)
Example #8
0
def vm_move_back(tc):
    vm_threads = [] 
    # if new_node and old_node handles exists, vmotion trigger happend
    # cleanup and restore to as per DB in iota
    if (tc.new_node and tc.old_node):
       vm_thread = threading.Thread(target=triggerVmotion, args=(tc, tc.wl, tc.old_node,))
       vm_threads.append(vm_thread)
       vm_thread.start()
       if (api.IsNaplesNode(tc.old_node)):
           create_ep_info(tc, tc.wl, tc.old_node, "START", tc.new_node)
    for vm_thread in vm_threads:
       vm_thread.join()
    update_ep_migr_status(tc, tc.wl, tc.old_node, "DONE")
    if (api.IsNaplesNode(tc.new_node)):
       delete_ep_info(tc, tc.wl, tc.new_node)
    return api.types.status.SUCCESS
Example #9
0
def __update_endpoint_info(tc):
    for dest_host, workloads in tc.vmotion_cntxt.MoveRequest.items():
        api.Logger.debug(
            "Creating endpoint info at %s for workloads being moved" %
            dest_host)
        if not api.IsNaplesNode(dest_host):
            continue
        for wl in workloads:
            api.Logger.debug("Updating ep-info for %s" % wl.workload_name)
            ep_filter = "meta.name=" + wl.workload_name + ";"
            objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
            assert (len(objects) == 1)
            obj = copy.deepcopy(objects[0])
            # update to indicate completion of vmotion
            obj.spec.migration = "DONE"
            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            resp = agent_api.UpdateConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error(
                    "Update migr status done failed for %s for %s" %
                    (wl.workload_name, dest_host))

            # update to keep new node happy, only in iota
            obj.spec.migration = None
            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            resp = agent_api.UpdateConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error(
                    "Update migr state to None failed for %s for %s" %
                    (wl.workload_name, dest_host))
    api.Logger.debug("Completed endpoint update at NewHome")
    return
Example #10
0
def do_vmotion(tc, dsc_to_dsc):
    vm_threads = []
    wls        = []

    for wl_info in tc.move_info:
        api.Logger.info("moving wl {} from node {} to node {}".format(wl_info.wl.workload_name, wl_info.old_node, wl_info.new_node))
        wls.append(wl_info.wl)
        if (api.IsNaplesNode(wl_info.wl.node_name)):
            wl_info.sess_info_before = get_session_info(tc, wl_info.wl)

    vm_thread = threading.Thread(target=triggerVmotions,args=(tc, wls, wl_info.new_node, ))
    vm_threads.append(vm_thread)
    vm_thread.start()

    for wl_info in tc.move_info: 
        create_ep_info(tc, wl_info.wl, wl_info.new_node, "START", wl_info.old_node)
    
    # wait for vmotion thread to complete, meaning vmotion is done on vcenter
    for vm_thread in vm_threads:
        vm_thread.join()
    for wl_info in tc.move_info: 
        update_ep_migr_status(tc, wl_info.wl, wl_info.new_node, "DONE")
        # delete_ep_info(tc, wl_info.wl, wl_info.old_node)
    if tc.resp != api.types.status.SUCCESS:
        api.Logger.info("vmotion failed")
        return api.types.status.FAILURE
    else:
        api.Logger.info("vmotion successful")
        return api.types.status.SUCCESS
Example #11
0
def Setup(tc):

    tc.skip = False
    node_names = api.GetWorkloadNodeHostnames()

    if api.IsNaplesNode(node_names[0]):
        tc.naples_node = node_names[0]
        tc.peer_node = node_names[1]
    elif api.IsNaplesNode(node_names[1]):
        tc.naples_node = node_names[1]
        tc.peer_node = node_names[0]
    else:
        api.Logger.verbose("Skipping as there are no Naples nodes")
        tc.skip = True
        return api.types.status.IGNORED

    tc.on_host = {}

    tc.host_intfs = list(api.GetNaplesHostInterfaces(tc.naples_node))
    for intf in tc.host_intfs:
        tc.on_host[intf] = True

    # Mgmt interface on host for network connection to Naples over PCIE
    tc.host_int_intfs = naples_host_utils.GetHostInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.host_int_intfs:
        tc.on_host[intf] = True

    tc.inband_intfs = naples_host_utils.GetNaplesInbandInterfaces(
        tc.naples_node)
    for intf in tc.inband_intfs:
        tc.on_host[intf] = False

    tc.naples_int_mgmt_intfs = naples_host_utils.GetNaplesInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.naples_int_mgmt_intfs:
        tc.on_host[intf] = False

    tc.naples_oob_mgmt_intfs = naples_host_utils.GetNaplesOobInterfaces(
        tc.naples_node)
    for intf in tc.naples_oob_mgmt_intfs:
        tc.on_host[intf] = False

    tc.all_intfs = tc.host_intfs + tc.host_int_intfs + tc.inband_intfs + tc.naples_int_mgmt_intfs + tc.naples_oob_mgmt_intfs
    api.Logger.debug("Promiscuous test interfaces: ", tc.all_intfs)

    return api.types.status.SUCCESS
Example #12
0
def Trigger(tc):
    naples_nodes = []
    #for every node in the setup
    for node in tc.nodes:
        if api.IsNaplesNode(node):
            naples_nodes.append(node)
            api.Logger.info(f"Found Naples Node: [{node}]")

    if len(naples_nodes) == 0:
        api.Logger.error("Failed to find a Naples Node!")
        return api.types.status.FAILURE

    for reboot in range(tc.args.reboots):
        # Reboot Node.
        # Reboot method (APC, IPMI, OS Reboot) is passed as a testcase parameter
        for node in naples_nodes:
            api.Logger.info(
                f"==== Reboot Loop # {reboot} on {node}. Reboot method: {tc.iterators.reboot_method} ===="
            )
            if api.RestartNodes(
                [node],
                    tc.iterators.reboot_method) != api.types.status.SUCCESS:
                return api.types.status.FAILURE
                #raise OfflineTestbedException

            # Enable SSH, some checks require Naples access
            if enable_ssh.Main(None) != api.types.status.SUCCESS:
                api.Logger.error("Enabling SSH failed")
                return api.types.status.FAILURE

            # there is not a real "PCI" in IOTA for Windows.
            if tc.os != host.OS_TYPE_WINDOWS:
                api.Logger.info(f"Verifying PCI on [{node}]: ")
                if verify_pci.verify_errors_lspci(
                        node, tc.os) != api.types.status.SUCCESS:
                    api.Logger.error(f"PCIe Failure detected on {node}")
                    return api.types.status.FAILURE
                    #raise OfflineTestbedException

            if checkLinks(tc, node) is api.types.status.FAILURE:
                api.Logger.error("Error verifying uplink interfaces")
                return api.types.status.FAILURE
                #raise OfflineTestbedException

            # don't run this while we figure out how to do this in ESX
            if tc.os != host.OS_TYPE_ESX:
                # Load the ionic driver except windows
                if tc.os != host.OS_TYPE_WINDOWS:
                    if host.LoadDriver(tc.os,
                                       node) is api.types.status.FAILURE:
                        api.Logger.info("ionic already loaded")
                # Make sure ionic driver attached to Uplink ports.

                if checkDrv(node) is api.types.status.FAILURE:
                    api.Logger.error("No ionic uplink interfaces detected")
                    return api.types.status.FAILURE
                    #raise OfflineTestbedException

    return api.types.status.SUCCESS
Example #13
0
def Verify(tc):
    nodes = api.GetWorkloadNodeHostnames()
    if api.IsNaplesNode(nodes[0]) and api.IsNaplesNode(nodes[1]):
        return api.types.status.DISABLED
    if tc.resp is None:
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS
    cookie_idx = 0
    api.Logger.info("Iperf Results for %s" % (tc.cmd_descr))
    for cmd in tc.resp.commands:
        api.Logger.info("%s" % (tc.cmd_cookies[cookie_idx]))
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd):
            result = api.types.status.FAILURE
        cookie_idx += 1
    return result
Example #14
0
def bsd_ethtool_rx_sg_size_cmd(node, intf, size):
    args = { }
    args['hw.ionic.rx_sg_size'] = size
    if api.IsNaplesNode(node):
        host.UnloadDriver(host.OS_TYPE_BSD, node)
        cmds = naples.InsertIonicDriverCommands(os_type = host.OS_TYPE_BSD, **args)
        return cmds
    return " "
Example #15
0
def GetHostInternalMgmtInterfaces(node, device = None):
    # Relay on IOTA infra to provide this information (dual-nic friendly API)
    if api.IsNaplesNode(node):
        interface_names = api.GetNaplesHostMgmtInterfaces(node, device)
        if interface_names:
            return interface_names

    interface_names = []

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    if api.GetNodeOs(node) == OS_TYPE_LINUX:
        pci_bdf_list = []
        #find pci bdf first for mgmt device which has deviceId as 1004
        cmd = "lspci -d :1004 | cut -d' ' -f1"
        api.Trigger_AddHostCommand(req, node, cmd)
        resp = api.Trigger(req)

        #find the interface name for all the pci_bdfs for all the mgmt interfaces
        pci_bdf_list = resp.commands[0].stdout.split("\n")

        for pci_bdf in pci_bdf_list:
            if (pci_bdf != ''):
                cmd = "ls /sys/bus/pci/devices/0000:" + pci_bdf + "/net/"

                req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
                api.Trigger_AddHostCommand(req, node, cmd)
                resp = api.Trigger(req)

                for command in resp.commands:
                    #iface_name = None
                    iface_name = command.stdout
                    interface_names.append(iface_name.strip("\n"))
    elif api.GetNodeOs(node) == OS_TYPE_ESX:
        #For now hardcoding.
        return ["eth1"]
    elif api.GetNodeOs(node) == OS_TYPE_WINDOWS:
        entries = GetWindowsPortMapping(node)
        if len(entries) == 0:
            return []
        maxbus = 0
        name = ""
        for k, v in entries.items():
            if int(v["Bus"]) > maxbus:
                maxbus = int(v["Bus"])
                name = k

        return [name]
    else:
        cmd = "pciconf -l | grep chip=0x10041dd8 | cut -d'@' -f1 | sed \"s/ion/ionic/g\""
        api.Trigger_AddHostCommand(req, node, cmd)
        resp = api.Trigger(req)

        for command in resp.commands:
            iface_name = command.stdout
            interface_names.append(iface_name.strip("\n"))

    return interface_names
Example #16
0
def do_vmotion(tc, dsc_to_dsc):
    factor_l2seg = True
    new_node = ''
    old_node = ''
    api.Logger.info("In do_vmotion for dsc_to_dsc {}".format(dsc_to_dsc))
    # get first naples node to move VM to
    if (dsc_to_dsc):
        assert (len(tc.Nodes) >= 2)
        # set old name as Node[0] and new node as Node[1]
        old_node = tc.Nodes[0]
        new_node = tc.Nodes[1]
    else:
        assert (len(tc.Nodes) >= 1)
        assert (len(tc.NonNaplesNodes) >= 1)
        old_node = tc.NonNaplesNodes[0]
        new_node = tc.Nodes[0]

    workloads = api.GetWorkloads(old_node)
    assert (len(workloads) != 0)
    api.Logger.info("moving workloads from {} to {}".format(
        old_node, new_node))
    update_move_info(tc, workloads, factor_l2seg, new_node)
    vm_threads = []
    #TEMP
    if 1:
        #import pdb; pdb.set_trace()
        temp_node = tc.Nodes[0]
        wl_temp = api.GetWorkloads(temp_node)
        wl_temp[0].sess_info_before = vm_utils.get_session_info(tc, wl_temp[0])
        wl_temp[0].sess_info_after = vm_utils.get_session_info(tc, wl_temp[0])
        ret = vm_utils.verify_session_info(tc, wl_temp[0])
        return ret

    for wl_info in tc.move_info:
        if (api.IsNaplesNode(wl_info.wl.node_name)):
            wl_info.sess_info_before = vm_utils.get_session_info(
                tc, wl_info.wl)
        api.Logger.info("moving wl {} from node {} to node {}".format(
            wl_info.wl.workload_name, wl_info.old_node, wl_info.new_node))
        vm_thread = threading.Thread(target=triggerVmotion,
                                     args=(
                                         tc,
                                         wl_info.wl,
                                         wl_info.new_node,
                                     ))
        vm_threads.append(vm_thread)
        vm_thread.start()
        create_ep_info(tc, wl_info.wl, wl_info.new_node, "START",
                       wl_info.old_node)
    # wait for vmotion thread to complete, meaning vmotion is done on vcenter
    for vm_thread in vm_threads:
        vm_thread.join()

    for wl_info in tc.move_info:
        vm_utils.update_ep_migr_status(tc, wl_info.wl, wl_info.new_node,
                                       "DONE")
    return api.types.status.SUCCESS
Example #17
0
def checkForIonicError(node):
    if not api.IsNaplesNode(node):
        return api.types.status.SUCCESS
    
    if api.GetNodeOs(node) == OS_TYPE_BSD:
        status = checkForBsdIonicError(node)
        if status != api.types.status.SUCCESS:
            return status

    return checkNaplesForError(node)
Example #18
0
def bsd_legacy_intr_mode_cmd(node, intf, op):
    args = { }
    if op == "on":
        args['hw.ionic.enable_msix'] = 0
    else:
        args['hw.ionic.enable_msix'] = 1
    if api.IsNaplesNode(node):
        host.UnloadDriver(host.OS_TYPE_BSD, node)
        cmds = naples.InsertIonicDriverCommands(os_type = host.OS_TYPE_BSD, **args)
        return cmds
    return " "
Example #19
0
def bsd_ethtool_queue_size_cmd(node, intf,queue_type, size):
    args = { }
    if queue_type == "tx":
        args['hw.ionic.max_queues'] = size
    else:
        args['hw.ionic.max_queues'] = size
    if api.IsNaplesNode(node):
        host.UnloadDriver(host.OS_TYPE_BSD, node)
        cmds = naples.InsertIonicDriverCommands(os_type = host.OS_TYPE_BSD, **args)
        return cmds
    return " " #.join(["ethtool", "-L", intf, queue_type,  str(size)])
Example #20
0
def SetActiveInterfaceOnBond(intf_name, node_name=None):
    node_list = [node_name] if node_name else api.GetNaplesHostnames()
    for node in node_list:
        if not api.IsNaplesNode(node):
            continue
        intfs = naples_workload_utils.GetNaplesInbandBondInterfaces(node)
        for intf in intfs:
            ret = intf.SetActiveInterface(intf_name)
            if ret != api.types.status.SUCCESS:
                return ret
    return api.types.status.SUCCESS
Example #21
0
def GetNaplesInbandInterfaces(node, device=None):
    if not api.IsNaplesNode(node):
        return []
    intfs = []
    devices = [device] if device else api.GetDeviceNames(node)
    for device_name in devices:
        for intf in naples_host.GetNaplesInbandInterfaces(node, device_name):
            intfObj = NaplesInterface(node, intf, InterfaceType.NAPLES_IB_100G,
                                      'linux', device_name)
            intfs.append(intfObj)
    api.Logger.debug("NaplesInbandInterfaces for node: ", node, intfs)
    return intfs
Example #22
0
def Trigger(tc):

    req = api.Trigger_CreateExecuteCommandsRequest()
    for n in tc.nodes:
        if api.IsNaplesNode(n):
            common.AddPenctlCommand(
                req, n, "show metrics lif > metrics_lif.out.before")

    tc.lif_metrics_old = api.Trigger(req)

    common.SendTraffic(tc)

    req = api.Trigger_CreateExecuteCommandsRequest()
    for n in tc.nodes:
        if api.IsNaplesNode(n):
            common.AddPenctlCommand(
                req, n, "show metrics lif > metrics.lif.out.after")

    tc.lif_metrics_new = api.Trigger(req)

    return api.types.status.SUCCESS
Example #23
0
def Trigger(tc):
    if tc.os != host.OS_TYPE_BSD:
        api.Logger.info("Not implemented")
        return api.types.status.IGNORED

    for node in tc.nodes:
        if api.IsNaplesNode(node):
            status = do_lif_reset_test(node, tc.os)
            if status != api.types.status.SUCCESS:
                api.Logger.error("lif reset test failed")
                return api.types.status.FAILURE

    return status
Example #24
0
def __delete_endpoint_info(tc):
    api.Logger.debug(
        "Deleting endpoint info from CurrentHome of moved workloads")
    for wload, host in tc.vmotion_cntxt.CurrentHome.items():
        if not api.IsNaplesNode(host):
            continue

        api.Logger.debug("Deleting ep-info at %s for wload: %s" %
                         (host, wload.workload_name))
        ep_filter = "meta.name=" + wload.workload_name + ";"
        objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
        assert (len(objects) == 1)
        agent_api.DeleteConfigObjects(objects[:1], [host], ignore_error=True)
    return
Example #25
0
def __get_agent_cfg_nodes(node_names=None, device_names=None):
    agent_node_names = node_names or api.GetNaplesHostnames()
    agent_cfg_nodes = []
    for node_name in agent_node_names:
        assert (api.IsNaplesNode(node_name))
        ip = api.GetNaplesMgmtIpAddress(node_name)
        if not ip:
            assert (0)
        if not device_names:
            device_names = api.GetDeviceNames(node_name)
        for device_name in device_names:
            nic_ip = api.GetNicIntMgmtIP(node_name, device_name)
            agent_cfg_nodes.append(cfg_api.NewCfgNode(node_name, ip, nic_ip))
    return agent_cfg_nodes
Example #26
0
def checkForIonicError(node):
    status = runHostCmd(node, 'dmesg | tail -n 10')
    if status != api.types.status.SUCCESS:
        api.Logger.error("CHECK_ERR: Is host: %s UP??" % (node))

    if not api.IsNaplesNode(node):
        return api.types.status.SUCCESS

    if api.GetNodeOs(node) == host.OS_TYPE_BSD:
        status = checkForBsdIonicError(node)
        if status != api.types.status.SUCCESS:
            return status

    return checkNaplesForError(node)
Example #27
0
def __create_endpoint_info(tc):
    time.sleep(
        5)  # trying to run vmotion and config update concurrently (hack)

    for dest_host, workloads in tc.vmotion_cntxt.MoveRequest.items():
        api.Logger.debug(
            "Creating endpoint info at %s for workloads being moved" %
            dest_host)
        if not api.IsNaplesNode(dest_host):
            continue
        for wl in workloads:
            api.Logger.debug("Updating ep-info for %s" % wl.workload_name)
            ep_filter = "meta.name=" + wl.workload_name + ";"
            objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
            assert (len(objects) == 1)
            obj = copy.deepcopy(objects[0])
            # delete endpoint being moved on new host, TEMP
            resp = agent_api.DeleteConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error("DeleteConfigObjects failed for %s for %s" %
                                 (wl.workload_name, dest_host))

            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            obj.spec.migration = "START"
            current_host = tc.vmotion_cntxt.CurrentHome[wl]
            if (api.IsNaplesNode(current_host)):
                obj.status.node_uuid = tc.vmotion_cntxt.UUIDMap[current_host]
                obj.spec.homing_host_address = api.GetNicMgmtIP(current_host)
            else:
                obj.status.node_uuid = "0011.2233.4455"  # TEMP
                obj.spec.homing_host_address = "169.169.169.169"  # TEMP

            # this triggers endpoint on new host(naples) to setup flows
            agent_api.PushConfigObjects([obj], [dest_host], ignore_error=True)
    api.Logger.debug("Completed endpoint info creation at NewHome")
    return
Example #28
0
def getNativeWorkloadIntfs(tc):
    tc.host_nodes = api.GetWorkloadNodeHostnames()
    tmp_native_intf_list = {}
    tc.native_intf_list = {}
    tc.mgmt_intf_list = {}

    # Get host interfaces on all nodes
    for node in tc.host_nodes:
        tmp_native_intf_list[node] = list(api.GetWorkloadNodeHostInterfaces(node))
        if api.IsNaplesNode(node):
            tc.mgmt_intf_list[node] = list(naples_host_utils.GetHostInternalMgmtInterfaces(node))

    for node in tc.mgmt_intf_list:
        tc.native_intf_list[node] = list(set(tmp_native_intf_list.get(node))-set(tc.mgmt_intf_list.get(node)))
    return api.types.status.SUCCESS
Example #29
0
def verify_errors_lspci(node, os_type):
    if os_type == host.OS_TYPE_ESX or os_type == host.OS_TYPE_WINDOWS:
        return api.types.status.SUCCESS

    if not api.IsNaplesNode(node):
        return api.types.status.SUCCESS

    uptime(node)
    if ((checkrootporterrors(node) != api.types.status.SUCCESS)
            and (checkpcirootbridge(node) != api.types.status.SUCCESS)):
        api.Logger.error("Errors on PCIe root port/bridge")
        return api.types.status.CRITICAL

    api.Logger.info("    No errors on PCIe root port/bridge")
    return api.types.status.SUCCESS
Example #30
0
def create_ep_info(tc, wl, new_node, migr_state, old_node):
    # get a naples handle to move to
    ep_filter = "meta.name=" + wl.workload_name + ";"
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert (len(objects) == 1)
    object = copy.deepcopy(objects[0])
    # delete endpoint being moved on new host, TEMP
    agent_api.DeleteConfigObjects([object], [new_node], ignore_error=True)

    object.spec.node_uuid = tc.uuidMap[new_node]
    object.spec.migration = migr_state
    if (api.IsNaplesNode(old_node)):
        object.status.node_uuid = tc.uuidMap[old_node]
        object.spec.homing_host_address = api.GetNicMgmtIP(old_node)
    else:
        object.status.node_uuid = "0011.2233.4455"  # TEMP
        object.spec.homing_host_address = "169.169.169.169"  # TEMP
    # this triggers endpoint on new host(naples) to setup flows
    agent_api.PushConfigObjects([object], [new_node], ignore_error=True)