コード例 #1
0
def Teardown(tc):
    req = api.Trigger_CreateExecuteCommandsRequest()
    for node in tc.Nodes:
        api.Trigger_AddNaplesCommand(
            req, node, "rm -rf /update/upgrade_halt_state_machine")
        api.Trigger_AddNaplesCommand(req, node,
                                     "rm -rf /update/pcieport_upgdata")
        api.Trigger_AddNaplesCommand(req, node,
                                     "rm -rf /update/pciemgr_upgdata")
        api.Trigger_AddNaplesCommand(req, node,
                                     "rm -rf /update/pciemgr_upgrollback")
        api.Trigger_AddNaplesCommand(req, node,
                                     "rm -rf /update/nicmgr_upgstate")
        api.Trigger_AddNaplesCommand(
            req, node, "rm -rf /data/upgrade_to_same_firmware_allowed")
    resp = api.Trigger(req)
    for cmd_resp in resp.commands:
        api.PrintCommandResults(cmd_resp)
        if cmd_resp.exit_code != 0:
            api.Logger.error("Setup failed %s", cmd_resp.command)

    req = api.Trigger_CreateExecuteCommandsRequest()
    for n in tc.Nodes:
        cmd = 'curl -k -X DELETE https://' + api.GetNicIntMgmtIP(
            n) + ':8888/api/v1/naples/rollout/'
        api.Trigger_AddHostCommand(req, n, cmd)
    tc.resp = api.Trigger(req)
    for cmd in tc.resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            return api.types.status.FAILURE
    return api.types.status.SUCCESS
コード例 #2
0
def Trigger(tc):
    naples_nodes = api.GetNaplesHostnames()
    for node in naples_nodes:
        api.Logger.info("Start second athena_app to pick up policy.json")
        req = api.Trigger_CreateExecuteCommandsRequest()
        api.Trigger_AddNaplesCommand(req, node, "/nic/tools/start-sec-agent.sh")
        api.Trigger_AddNaplesCommand(req, node, "\r\n")
        resp = api.Trigger(req)
        cmd = resp.commands[0]
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            api.Logger.error("Start second athena_app failed on node {}".format(node))
            return api.types.status.FAILURE
    
    time.sleep(10)

    for node in naples_nodes:
        req = api.Trigger_CreateExecuteCommandsRequest()
        api.Trigger_AddNaplesCommand(req, node, "ps -aef | grep athena_app")

        resp = api.Trigger(req)
        cmd = resp.commands[0]
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            api.Logger.error("ps failed on Node {}".format(node))
            return api.types.status.FAILURE
        if "athena_app" not in cmd.stdout:
            # TODO: If athena_app is not running, run start_agent.sh manually
            api.Logger.error("no athena_app running on Node {}, need to start athena_app first".format(node))
            return api.types.status.FAILURE
    
        athena_sec_app_pid = cmd.stdout.strip().split()[1]
        api.Logger.info("athena_app up and running on Node {} with PID {}".format(node, athena_sec_app_pid))

    return api.types.status.SUCCESS
コード例 #3
0
def Trigger(tc):
    req_uname = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    # Move driver package into position and build on naples nodes
    for n in tc.nodes:
        api.Logger.info("Building RDMA drivers and tools on: {node}".format(node=n))
        if tc.os == host.OS_TYPE_LINUX:
            api.Trigger_AddHostCommand(req, n,
                    "mkdir -p {path} && mv {pkg} {path} && cd {path} && tar -xma --strip-components 1 -f {pkg} && ./setup_libs.sh && ./build.sh"
                    .format(path=tc.iota_path, pkg=tc.pkgname),
                    timeout = 180)
        else:
            api.Trigger_AddHostCommand(req, n,
                    "mkdir -p {path} && mv {pkg} {path} && cd {path} && tar -xm --strip-components 1 -f {pkg} && ./build.sh"
                    .format(path=tc.iota_path, pkg=tc.pkgname),
                    timeout = 180)
        api.Trigger_AddHostCommand(req_uname, n, "uname -r")

    # Move show_gid into position on other nodes
    for n in tc.other_nodes:
        if n in tc.nodes:
            continue
        api.Logger.info("Moving show_gid to tools on {node}"
                .format(node=n))
        api.Trigger_AddHostCommand(req, n, "mkdir -p {path} && mv show_gid {path}"
                .format(path=tc.iota_path))
        api.Trigger_AddHostCommand(req_uname, n, "uname -r")

    tc.resp_uname = api.Trigger(req_uname)
    tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
コード例 #4
0
def Verify(tc):
    rc = api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    api.Trigger_AddNaplesCommand(req, tc.Nodes[0], 'sleep 3')
    resp = api.Trigger(req)

    for node, intr in intr_list:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
        api.Trigger_AddNaplesCommand(req, node, intr.get_count_cmd())
        resp = api.Trigger(req)

        cmd = resp.commands[0]
        if cmd.exit_code != 0:
            api.Logger.error(
                "Command failed: Node {}, Interrupt {}, Field {}".format(
                    node, intr.name(), intr.field()))
            return api.types.status.FAILURE

        expected = intr.count() + 1
        value = intr.parse_count_cmd_output(cmd.stdout)

        if value < expected:
            api.Logger.error(
                "Node {}, Interrupt {}, Field {}, Expected {}, Got {}".format(
                    node, intr.name(), intr.field(), expected, value))
            rc = api.types.status.FAILURE

    return rc
コード例 #5
0
def Teardown(tc):
    if tc.skip:
        return api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest()
    for node in tc.Nodes:
        api.Trigger_AddNaplesCommand(req, node, "rm -rf /data/upgrade_to_same_firmware_allowed")
    resp = api.Trigger(req)
    try:
        for cmd_resp in resp.commands:
            if cmd_resp.exit_code != 0:
                api.PrintCommandResults(cmd_resp)
                api.Logger.error("Teardown failed %s", cmd_resp.command)
    except:
        api.Logger.error("EXCEPTION occured in Naples command")
        return api.types.status.FAILURE

    req = api.Trigger_CreateExecuteCommandsRequest()
    for n in tc.Nodes:
        cmd = 'curl -k -X DELETE https://' + api.GetNicIntMgmtIP(n) + ':'+utils.GetNaplesMgmtPort()+'/api/v1/naples/rollout/'
        api.Trigger_AddHostCommand(req, n, cmd, timeout=100)
    tc.resp = api.Trigger(req)
    for cmd in tc.resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
コード例 #6
0
def Trigger(tc):
    # check that there have been packets showing up after previous tests
    #       ip -s link show <pf>

    rate = tc.iterators.rate
    duration = 30
    if __SetAndCheckRate(tc.host1, tc.pf_1, tc.vfid,
                         rate) != api.types.status.SUCCESS:
        return api.types.status.ERROR

    servercmd = iperf.ServerCmd(server_ip=tc.remote_ip, port=7777, run_core=2)
    clientcmd = iperf.ClientCmd(tc.remote_ip,
                                client_ip=tc.vf_ip,
                                jsonOut=True,
                                port=7777,
                                proto='tcp',
                                time=duration,
                                run_core=2)

    sreq = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    creq = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    api.Trigger_AddHostCommand(sreq, tc.host2, servercmd, background=True)
    api.Trigger_AddHostCommand(creq, tc.host1, clientcmd, timeout=3600)

    server_resp = api.Trigger(sreq)
    if not server_resp:
        api.Logger.error("Unable to execute server command")
        return api.types.status.ERROR
    time.sleep(5)

    client_resp = api.Trigger(creq)
    if not client_resp:
        api.Logger.error("Unable to execute client command")
        return api.types.status.ERROR
    resp = client_resp.commands.pop()
    if resp.exit_code != 0:
        api.Logger.error("Iperf client failed with exit code %d" %
                         resp.exit_code)
        api.PrintCommandResults(resp)
        return api.types.status.ERROR
    if not iperf.Success(resp.stdout):
        api.Logger.error("Iperf failed with error: %s" %
                         iperf.Error(resp.stdout))
        return api.types.status.ERROR

    api.Logger.info("Obs rate %sMbps" % iperf.GetSentMbps(resp.stdout))
    obs_rate = float(iperf.GetSentMbps(resp.stdout))

    delta = (abs(obs_rate - rate) * 100) / rate

    if delta > 10:
        api.Logger.error("Configured Tx rate %f but observed %f delta %f%%" %
                         (rate, obs_rate, delta))
        return api.types.status.FAILURE

    api.Trigger_TerminateAllCommands(server_resp)

    return api.types.status.SUCCESS
コード例 #7
0
def GetHostInternalMgmtInterfaces(node, device = None):
    # Relay on IOTA infra to provide this information (dual-nic friendly API)
    if api.IsNaplesNode(node):
        interface_names = api.GetNaplesHostMgmtInterfaces(node, device)
        if interface_names:
            return interface_names

    interface_names = []

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    if api.GetNodeOs(node) == OS_TYPE_LINUX:
        pci_bdf_list = []
        #find pci bdf first for mgmt device which has deviceId as 1004
        cmd = "lspci -d :1004 | cut -d' ' -f1"
        api.Trigger_AddHostCommand(req, node, cmd)
        resp = api.Trigger(req)

        #find the interface name for all the pci_bdfs for all the mgmt interfaces
        pci_bdf_list = resp.commands[0].stdout.split("\n")

        for pci_bdf in pci_bdf_list:
            if (pci_bdf != ''):
                cmd = "ls /sys/bus/pci/devices/0000:" + pci_bdf + "/net/"

                req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
                api.Trigger_AddHostCommand(req, node, cmd)
                resp = api.Trigger(req)

                for command in resp.commands:
                    #iface_name = None
                    iface_name = command.stdout
                    interface_names.append(iface_name.strip("\n"))
    elif api.GetNodeOs(node) == OS_TYPE_ESX:
        #For now hardcoding.
        return ["eth1"]
    elif api.GetNodeOs(node) == OS_TYPE_WINDOWS:
        entries = GetWindowsPortMapping(node)
        if len(entries) == 0:
            return []
        maxbus = 0
        name = ""
        for k, v in entries.items():
            if int(v["Bus"]) > maxbus:
                maxbus = int(v["Bus"])
                name = k

        return [name]
    else:
        cmd = "pciconf -l | grep chip=0x10041dd8 | cut -d'@' -f1 | sed \"s/ion/ionic/g\""
        api.Trigger_AddHostCommand(req, node, cmd)
        resp = api.Trigger(req)

        for command in resp.commands:
            iface_name = command.stdout
            interface_names.append(iface_name.strip("\n"))

    return interface_names
コード例 #8
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    srv = tc.workloads[0]
    cli = tc.workloads[1]
    
    # Determine where the commands will be run - host or Naples.
    test_type = getattr(tc.args, "test-type", INTF_TEST_TYPE_HOST)
    is_naples_cmd = True
    if test_type == INTF_TEST_TYPE_HOST:
        is_naples_cmd = False

    srv_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    cli_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    proto = getattr(tc.iterators, "proto", 'tcp')
    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)
    pktsize = getattr(tc.iterators, "pktsize", None)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v4':
        server_ip = srv.ip_address
        client_ip = cli.ip_address
    else:
        server_ip = srv.ipv6_address
        client_ip = cli.ipv6_address
        
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (srv.interface, server_ip, cli.interface, client_ip)

    api.Logger.info("Starting Iperf(%s/%s) test from %s"
                    % (proto, ipproto, tc.cmd_descr))

    duration =  10
    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()
 
        iperf_server_cmd = iperf.ServerCmd(port, naples = is_naples_cmd)
        api.Trigger_AddCommand(srv_req, srv.node_name, srv.workload_name, iperf_server_cmd, background = True)

        iperf_client_cmd = iperf.ClientCmd(server_ip, port, time=duration,
                                 proto=proto, jsonOut=True, ipproto=ipproto,
                                 pktsize=pktsize, client_ip=client_ip, naples = is_naples_cmd)
        api.Trigger_AddCommand(cli_req, cli.node_name, cli.workload_name, iperf_client_cmd, timeout = 60)

    srv_resp = api.Trigger(srv_req)
    # Wait for iperf server to start.
    time.sleep(10)
    tc.cli_resp = api.Trigger(cli_req)
    # Wait for iperf clients to finish.
    time.sleep(2*duration)

    srv_resp1 = api.Trigger_TerminateAllCommands(srv_resp)

    return api.types.status.SUCCESS
コード例 #9
0
def Trigger(tc):

    req = None
    req = api.Trigger_CreateExecuteCommandsRequest()
    tc.cmd_cookies = []
    ping_count = getattr(tc.args, "ping_count", 20)
    interval = getattr(tc.args, "ping_interval", 0.01)
    connectivity = tc.iterators.connectivity
    naplesHosts = api.GetNaplesHostnames()
    tc_intf = (tc.iterators.interface).capitalize()

    if tc_intf in ['Uplink0', 'Uplink1', 'Uplinks']:
       setDataPortStatePerUplink(naplesHosts, tc.iterators.port_status, tc_intf)   
    if tc_intf in ['Switchport0', 'Switchport1', 'Switchports']:
        switchPortOp(naplesHosts, tc.iterators.port_status, tc_intf)
    
    if connectivity == 'bgp_peer': 
        for node in naplesHosts:
            for bgppeer in bgp_peer.client.Objects(node):
                # Dont try to ping on the down interface
                if tc.iterators.port_status == 'down':
                   if tc_intf == 'Switchport0' and bgppeer.Id == 1:
                       continue
                   elif tc_intf == 'Switchport1' and bgppeer.Id == 2:
                       continue
                cmd_cookie = "%s --> %s" %\
                             (str(bgppeer.LocalAddr), str(bgppeer.PeerAddr))
                api.Trigger_AddNaplesCommand(req, node, \
                                     "ping -i %f -c %d -s %d %s" % \
                                     (interval, ping_count, tc.iterators.pktsize, \
                                             str(bgppeer.PeerAddr)))
                api.Logger.info("Ping test from %s" % (cmd_cookie))
                tc.cmd_cookies.append(cmd_cookie)

            tc.resp = api.Trigger(req)

    else:
        for node1 in naplesHosts:
            for node2 in naplesHosts:
                if node1 == node2:
                    continue

                objs = device.client.Objects(node1)
                device1 = next(iter(objs))
                objs = device.client.Objects(node2)
                device2 = next(iter(objs))
                cmd_cookie = "%s --> %s" %\
                             (device1.IP, device2.IP)
                api.Trigger_AddNaplesCommand(req, node1, \
                                       "ping -i %f -c %d -s %d %s" % \
                                        (interval, ping_count, tc.iterators.pktsize, \
                                              device2.IP))
                api.Logger.info("Loopback ping test from %s" % (cmd_cookie))
                tc.cmd_cookies.append(cmd_cookie)

            tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
コード例 #10
0
def Trigger(tc):

    # move device.json
    cmd = "mv /device.json /nic/conf/"
    resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd)

    # load drivers
    cmd = "insmod /nic/bin/ionic_mnic.ko && insmod /nic/bin/mnet_uio_pdrv_genirq.ko && insmod /nic/bin/mnet.ko"
    resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd)

    # start athena app
    cmd = "/nic/tools/start-agent-skip-dpdk.sh"
    resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd)

    # wait for athena app to be up
    utils.Sleep(80)

    # configure int_mnic0
    cmd = "ifconfig int_mnic0 " + tc.int_mnic_ip + " netmask 255.255.255.0"
    resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd)

    # run plugctl to gracefully bring up the PCI device on host
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd = "./plugctl.sh in"
    api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd)
    resp = api.Trigger(req)
    cmd = resp.commands[0]
    api.PrintCommandResults(cmd)

    if cmd.exit_code != 0:
        api.Logger.error("Failed to gracefully bring up the PCI device on host %s" % \
                          tc.bitw_node_name)
        return api.types.status.FAILURE

    # get host internal mgmt intf
    host_intfs = naples_host.GetHostInternalMgmtInterfaces(tc.bitw_node_name)
    # Assuming single nic per host
    if len(host_intfs) == 0:
        api.Logger.error('Failed to get host interfaces')
        return api.types.status.FAILURE

    intf = host_intfs[0]
    ip_addr = str(ip_address(tc.int_mnic_ip.rstrip()) + 1)

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd = "ifconfig " + str(intf) + " " + ip_addr + "/24 up"
    api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd)
    resp = api.Trigger(req)
    cmd = resp.commands[0]
    api.PrintCommandResults(cmd)

    if cmd.exit_code != 0:
        api.Logger.error("Failed to gracefully bring up the internal mgmt intf on host %s" % \
                          tc.bitw_node_name)
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
コード例 #11
0
def __installPenCtl(node):

    fullpath = api.GetTopDir() + '/' + common.PENCTL_PKG

    resp = api.CopyToHost(node, [fullpath], "")
    if resp is None:
        return api.types.status.FAILURE
    if resp.api_response.api_status != types_pb2.API_STATUS_OK:
        api.Logger.error("Failed to copy penctl to Node: %s" % node)
        return api.types.status.FAILURE

    fullpath = api.GetTopDir() + '/' + common.PENCTL_TOKEN_FILE

    resp = api.CopyToHost(node, [fullpath], "")
    if resp is None:
        return api.types.status.FAILURE
    if resp.api_response.api_status != types_pb2.API_STATUS_OK:
        api.Logger.error("Failed to copy penctl token to Node: %s" % node)
        return api.types.status.FAILURE


    req = api.Trigger_CreateExecuteCommandsRequest()
    api.Trigger_AddHostCommand(req, node, "tar -xvf %s" % os.path.basename(common.PENCTL_PKG) + " && sync",
                           background = False)

    #Create a symlink at top level
    execName = __penctl_exec(node)
    realPath = "realpath %s/%s " % (common.PENCTL_DEST_DIR, execName)
    api.Trigger_AddHostCommand(req, node, realPath, background = False)

    resp = api.Trigger(req)

    for cmd in resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            return api.types.status.FAILURE

    common.PENCTL_EXEC[node] = resp.commands[1].stdout.split("\n")[0]


    req = api.Trigger_CreateExecuteCommandsRequest()
    #Create a symlink at top level
    realPath = "realpath %s " % (common.PENCTL_TOKEN_FILE_NAME)
    api.Trigger_AddHostCommand(req, node, realPath, background = False)

    resp = api.Trigger(req)

    for cmd in resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            return api.types.status.FAILURE

    common.PENCTL_TOKEN[node] = resp.commands[0].stdout.split("\n")[0]



    return api.types.status.SUCCESS
コード例 #12
0
def do_lif_reset_test(node, os):
    for i in range(3):
        api.Logger.info("LIF reset and driver reload test loop %d" % i)
        if host.UnloadDriver(os, node, "all") is api.types.status.FAILURE:
            api.Logger.error("ionic unload failed loop %d" % i)
            return api.types.status.FAILURE

        if host.LoadDriver(os, node) is api.types.status.FAILURE:
            api.Logger.error("ionic load failed loop %d" % i)
            return api.types.status.FAILURE

        wl_api.ReAddWorkloads(node)

        if api.GetNaplesHostInterfaces(node) is None:
            api.Logger.error("No ionic interface after loop %d" % i)
            return api.types.status.FAILURE

        for intf in api.GetNaplesHostInterfaces(node):
            req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
            vlan_list = getVlanList(node, intf)
            filter_list = getFilterList(node, intf)
            # Single LIF reset
            api.Trigger_AddHostCommand(
                req, node,
                "sysctl dev.%s.reset=1" % (host.GetNaplesSysctl(intf)))
            resp = api.Trigger(req)
            time.sleep(5)
            vlan_list1 = getVlanList(node, intf)
            filter_list1 = getFilterList(node, intf)
            if vlan_list != vlan_list1:
                api.Logger.error(
                    "VLAN list doesn't match for %s, before: %s after: %s" %
                    (intf, str(vlan_list), str(vlan_list1)))
                return api.types.status.FAILURE

            if filter_list != filter_list1:
                api.Logger.error(
                    "Filter list doesn't match for %s, before: %s after: %s" %
                    (intf, str(filter_list), str(filter_list1)))
                return api.types.status.FAILURE

            api.Logger.info(
                "Success running LIF reset test on %s VLAN: %s, Filters; %s" %
                (intf, str(vlan_list), str(filter_list)))
        # Now stress test LIF reset
        for intf in api.GetNaplesHostInterfaces(node):
            req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
            api.Trigger_AddHostCommand(
                req, node,
                "for ((i=0;i<10;i++)); do sysctl dev.%s.reset=1; done &" %
                (host.GetNaplesSysctl(intf)))
            # Some of LIF reset will fill fail since it will be running in background
            # with reload of driver.
            resp = api.Trigger(req)

    return api.types.status.SUCCESS
コード例 #13
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    if not api.IsSimulation():
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]

        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" % (cmd_descr, num_sessions))

        if tc.iterators.proto == 'udp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp', jsonOut=True, num_of_streams = num_sessions)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, jsonOut=True,  num_of_streams = num_sessions)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background = True)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd)

    server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(30)

    tc.iperf_client_resp = api.Trigger(clientReq)
    #Its faster kill iperf servers

    #Still call terminate on all
    api.Trigger_TerminateAllCommands(server_resp)

    return api.types.status.SUCCESS
コード例 #14
0
def athena_sec_app_start(node_name=None,
                         nic_name=None,
                         init_wait_time=INIT_WAIT_TIME_DEFAULT):
    node_nic_names = []

    if (not node_name and nic_name) or (node_name and not nic_name):
        raise Exception("specify both node_name and nic_name or neither")

    if node_name and nic_name:
        node_nic_names.append((node_name, nic_name))
    else:
        node_nic_names = get_athena_node_nic_names()

    for nname, nicname in node_nic_names:
        req = api.Trigger_CreateExecuteCommandsRequest()

        cmd = "/nic/tools/start-sec-agent-iota.sh"
        api.Trigger_AddNaplesCommand(req, nname, cmd, nicname, background=True)

        resp = api.Trigger(req)
        cmd = resp.commands[0]
        api.PrintCommandResults(cmd)

        if cmd.exit_code != 0:
            api.Logger.error("command to start athena sec app failed on "
                             "node %s nic %s" % (nname, nicname))
            return api.types.status.FAILURE

    # sleep for init to complete
    misc_utils.Sleep(init_wait_time)

    for nname, nicname in node_nic_names:
        req = api.Trigger_CreateExecuteCommandsRequest()
        cmd = "ps -aef | grep athena_app | grep soft-init | grep -v grep"
        api.Trigger_AddNaplesCommand(req, nname, cmd, nicname)

        resp = api.Trigger(req)
        cmd = resp.commands[0]
        api.PrintCommandResults(cmd)

        if cmd.exit_code != 0:
            api.Logger.error("ps failed or athena_app failed to start "
                             "on node %s nic %s" % (nname, nicname))
            return api.types.status.FAILURE

        if "athena_app" in cmd.stdout:
            athena_sec_app_pid = cmd.stdout.strip().split()[1]
            api.Logger.info("Athena sec app came up on node %s nic %s and "
                            "has pid %s" %
                            (nname, nicname, athena_sec_app_pid))

    return api.types.status.SUCCESS
コード例 #15
0
def __load_linux_driver(node, node_os, manifest_file):

    image_manifest = parser.JsonParse(manifest_file)
    driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0]
    if driver_images is None: 
        api.Logger.error("Unable to load image manifest") 
        return api.types.status.FAILURE

    drImgFile = os.path.join(Gl, driver_images.drivers_pkg)
    api.Logger.info("Fullpath for driver image: " + drImgFile)
    resp = api.CopyToHost(node, [drImgFile], "")
    if not api.IsApiResponseOk(resp):
        api.Logger.error("Failed to copy %s" % drImgFile)
        return api.types.status.FAILURE

    rundir = os.path.basename(driver_images.drivers_pkg).split('.')[0]
    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    api.Trigger_AddHostCommand(req, node, "tar -xf " + os.path.basename(driver_images.drivers_pkg))
    api.Trigger_AddHostCommand(req, node, "./build.sh", rundir=rundir)

    resp = api.Trigger(req)

    if not api.IsApiResponseOk(resp):
        api.Logger.error("TriggerCommand for driver build failed")
        return api.types.status.FAILURE

    for cmd in resp.commands:
        if cmd.exit_code != 0 and cmd.command != './build.sh':  # Build.sh could fail -ignored (FIXME)
            api.Logger.error("Failed to exec cmds to build/load new driver")
            return api.types.status.FAILURE

    api.Logger.info("New driver image is built on target host. Prepare to load")

    if host.UnloadDriver(node_os, node) != api.types.status.SUCCESS:
        api.Logger.error("Failed to unload current driver - proceeding")

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    if node_os == OS_TYPE_LINUX: 
        api.Trigger_AddHostCommand(req, node, "insmod " + 
                os.path.join(rundir, "drivers/eth/ionic/ionic.ko"))
    elif node_os == OS_TYPE_BSD: 
        api.Trigger_AddHostCommand(req, node, "kldload " + 
                os.path.join(rundir, "drivers/eth/ionic/ionic.ko"))
    resp = api.Trigger(req)

    if not api.Trigger_IsSuccess(resp):
        api.Logger.error("TriggerCommand for driver installation failed")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
コード例 #16
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    w1 = tc.workloads[0]
    w2 = tc.workloads[1]
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (w1.interface, w1.ip_address, w2.interface, w2.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               w1.node_name,
                               w1.workload_name,
                               iperf_server_cmd,
                               background=True)

        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=w1.ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)
        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               iperf_client_cmd)

    trig_resp1 = api.Trigger(req1)
    trig_resp2 = api.Trigger(req2)
    term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    response = api.Trigger_AggregateCommandsResponse(trig_resp1, term_resp1)
    tc.resp = api.Trigger_AggregateCommandsResponse(response, trig_resp2)

    return api.types.status.SUCCESS
コード例 #17
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    store = tc.GetBundleStore()
    cps = int(getattr(tc.args, "cps", 500))
    attempts = int(getattr(tc.args, "attempts", 3))
    sessions = int(getattr(tc.args, "num_sessions", 1))
    session_time = str(getattr(tc.args, "session_time", "10")) + "s"
    static_arp = int(getattr(tc.args, "static_arp", False))

    if static_arp:
        #install arp entries before running
        arpReqs = store["arp_ctx"]
        api.Trigger(arpReqs)

    #Spread CPS evenly
    clientReq = api.Trigger_CreateAllParallelCommandsRequest()

    cps_per_node = int(cps / len(store["client_ctxts"]))
    for index, (_, client) in enumerate(store["client_ctxts"].items()):
        jsonInput = {"connections": []}
        for serverIPPort in client.GetServers():
            jsonInput["connections"].append({
                "ServerIPPort": serverIPPort,
                "proto": "tcp"
            })
        outfile = api.GetTestDataDirectory(
        ) + "/" + client.workload_name + "_fuz.json"
        with open(outfile, 'w') as fp:
            json.dump(jsonInput, fp)
        api.CopyToWorkload(client.node_name, client.workload_name, [outfile],
                           "")
        clientCmd = fuz_init.FUZ_EXEC[
            client.workload_name] + " -attempts " + str(
                attempts) + " -duration " + session_time + " -conns " + str(
                    sessions) + " -cps " + str(
                        cps_per_node
                    ) + " -talk  --jsonOut --jsonInput " + os.path.basename(
                        outfile)
        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True)

    #Initiate connections
    store["client_req"] = api.Trigger(clientReq)

    return api.types.status.SUCCESS
コード例 #18
0
def athena_sec_app_kill(node_name=None, nic_name=None):
    node_nic_names = []

    if (not node_name and nic_name) or (node_name and not nic_name):
        raise Exception("specify both node_name and nic_name or neither")

    if node_name and nic_name:
        node_nic_names.append((node_name, nic_name))
    else:
        node_nic_names = get_athena_node_nic_names()

    for nname, nicname in node_nic_names:
        req = api.Trigger_CreateExecuteCommandsRequest()

        cmd = "ps -aef | grep athena_app | grep soft-init | grep -v grep"
        api.Trigger_AddNaplesCommand(req, nname, cmd, nicname)

        resp = api.Trigger(req)
        ps_cmd_resp = resp.commands[0]
        api.PrintCommandResults(ps_cmd_resp)

        if "athena_app" in ps_cmd_resp.stdout:
            athena_sec_app_pid = ps_cmd_resp.stdout.strip().split()[1]

            api.Logger.info("athena sec app already running on node %s "
                            "nic %s with pid %s. Killing it." %
                            (nname, nicname, athena_sec_app_pid))

            req = api.Trigger_CreateExecuteCommandsRequest()
            api.Trigger_AddNaplesCommand(req, nname, "pkill -n athena_app",
                                         nicname)

            resp = api.Trigger(req)
            pkill_cmd_resp = resp.commands[0]
            api.PrintCommandResults(pkill_cmd_resp)

            if pkill_cmd_resp.exit_code != 0:
                api.Logger.info("pkill failed for athena sec app")
                return api.types.status.FAILURE

            # sleep for kill to complete
            misc_utils.Sleep(ATHENA_SEC_APP_KILL_WAIT_TIME)

        else:
            api.Logger.info("athena sec app not running on node %s nic %s" %
                            (nname, nicname))

    return api.types.status.SUCCESS
コード例 #19
0
def Trigger(tc):
    if tc.skip:
        return api.types.status.SUCCESS

    status = api.types.status.SUCCESS
    # clean up resources before run
    subif_utils.clearAll()

    # initialize config:
    subif_utils.initialize_tagged_config_workloads()

    # Delete existing subinterfaces
    __delete_subifs()

    time.sleep(3)
    # Create subinterfaces for every workload/host interface
    # as per <subif_count>
    __create_subifs()

    time.sleep(2)
    tc.workload_pairs = api.GetRemoteWorkloadPairs()
    tc.cmd_cookies = []
    req1 = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    __run_ping_test(req1, tc)
    tc.resp2 = api.Trigger(req1)

    result = changeMacAddrTrigger(tc)
    time.sleep(5)
    api.Logger.debug("UC MAC filter : Trigger -> Change MAC addresses result ", result)

    api.Logger.debug("UC MAC filter : LIF reset ")
    # reset the LIFs if requested
    lif_reset  = getattr(tc.args, 'lif_reset', False)
    if lif_reset:
        if do_bsd_lif_resets():
            api.Logger.error("UC MAC filter : LIF reset failed")
            status = api.types.status.FAILURE

    tc.wload_ep_set, tc.host_ep_set, tc.naples_ep_set, tc.hal_ep_set = ValidateMacRegistration(tc)

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    tc.workload_pairs = api.GetRemoteWorkloadPairs()
    __run_ping_test(req, tc)
    tc.resp = api.Trigger(req)

    return status
コード例 #20
0
    def __node_api_handler(self, url, json_data=None, oper=CfgOper.ADD):
        if oper == CfgOper.DELETE:
            oper = "DELETE"
        elif oper == CfgOper.ADD:
            oper = "POST"
        elif oper == CfgOper.UPDATE:
            oper = "PUT"
        elif oper == CfgOper.GET:
            oper = "GET"
        else:
            print(oper)
            assert (0)
        if GlobalOptions.debug:
            api.Logger.info("Url : %s" % url)

        cmd = None
        if json_data and len(json.dumps(json_data)) > 100000:
            filename = "/tmp/temp_config.json"
            with open(filename, 'w') as outfile:
                json.dump(json_data, outfile)

            req = api.Trigger_CreateAllParallelCommandsRequest()
            cmd = ["rm", "-rf", "temp_config.json"]
            cmd = " ".join(cmd)
            api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600)
            api.Trigger(req)

            resp = api.CopyToHost(self.host_name, [filename], "")
            if not api.IsApiResponseOk(resp):
                assert (0)
            cmd = [
                "curl", "-X", oper, "-d", "@temp_config.json", "-k", "-H",
                "\"Content-Type:application/json\"", url
            ]
        else:
            cmd = [
                "curl", "-X", oper, "-k", "-d",
                "\'" + json.dumps(json_data) + "\'" if json_data else " ",
                "-H", "\"Content-Type:application/json\"", url
            ]
        cmd = " ".join(cmd)
        req = api.Trigger_CreateAllParallelCommandsRequest()
        api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600)

        resp = api.Trigger(req)
        if GlobalOptions.debug:
            print(" ".join(cmd))
        return resp.commands[0].stdout
コード例 #21
0
def bsd_flow_ctrl(node, inf, fc_type, fc_val, pattern):
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    api.Trigger_AddHostCommand(req, node, 'sysctl dev.%s.flow_ctrl=%d' %
                               (host.GetNaplesSysctl(inf), fc_type))
    api.Trigger_AddHostCommand(req, node, 'sysctl dev.%s.link_pause=%d' %
                               (host.GetNaplesSysctl(inf), fc_val))
    api.Trigger_AddHostCommand(req, node, BSD_IFCONFIG_MEDIA_CMD  % inf)
    api.Logger.info("Setting %s link type: %d value: %d pattern: %s" %
                    (inf, fc_type, fc_val, pattern))
    resp = api.Trigger(req)
    if resp is None:
        return -1
    # We are interested in only last command response.
    cmd = resp.commands[2]

    if cmd.exit_code != 0:
        api.Logger.error("Failed exit code: %d link type: %d value: %d, stderr: %s" %
                         (cmd.exit_code, fc_type, fc_val, cmd.stderr))
        api.PrintCommandResults(cmd)
        return -1

    if cmd.stdout.find("[\n\t]*" + pattern + "[\n\t]*") != -1:
        api.Logger.error("Failed link type: %d value: %d, stdout: %s" %
                         (cmd.exit_code, fc_type, fc_val, cmd.stdout))
        api.PrintCommandResults(cmd)
        return -1
    return 0
コード例 #22
0
def getLinuxStats(node, intf, pat1):
    stats_map = []
    
    cmd = 'ethtool -S ' + intf + ' | grep -e ' + pat1 + ' | cut -d ":" -f 2'
    
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    #api.Logger.info("Getting stats for: %s on host: %s intf: %s" 
    #                %(cmd, node.node_name, intf))
    api.Trigger_AddHostCommand(req, node.node_name, cmd)
    resp = api.Trigger(req)
    if resp is None:
        api.Logger.error("Failed to run: %s on host: %s intf: %s" 
                         %(cmd, node.node_name, intf))
        return None

    cmd = resp.commands[0]
    if cmd.exit_code != 0:
        api.Logger.error(
            "Failed to run: %s for host: %s, stderr: %s"
            %(cmd, node.node_name, cmd.stderr))
        api.PrintCommandResults(cmd)
        return None

    if cmd.stdout == "":
        api.Logger.error("Output is empty for: %s on host: %s intf: %s" 
                         %(cmd, node.node_name, intf))
        api.PrintCommandResults(cmd)
        return None
    
    stats_map = cmd.stdout.splitlines()
    stats_map = list(map(int,stats_map))
    
    return stats_map
コード例 #23
0
def Trigger(tc):

    #==============================================================
    # trigger the commands
    #==============================================================

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    w1 = tc.w[0]
    w2 = tc.w[1]

    cmd = 'sysctl dev.ionic.0.qos.classification_type=' + str(tc.class_type)

    # Trigger Classification type config
    if w1.IsNaples():
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd)
    if w2.IsNaples():
        api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, cmd)

    #==============================================================
    # trigger the request
    #==============================================================
    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    return api.types.status.SUCCESS
コード例 #24
0
def SetupDNSServer(server):
    node = server.node_name
    workload = server.workload_name
    dir_path = os.path.dirname(os.path.realpath(__file__))
    zonefile = dir_path + '/' + "example.com.zone"
    api.Logger.info("fullpath %s" % (zonefile))
    resp = api.CopyToWorkload(node, workload, [zonefile], 'dnsdir')
    if resp is None:
        return None

    named_conf = dir_path + '/' + "named.conf"
    resp = api.CopyToWorkload(node, workload, [named_conf], 'dnsdir')
    if resp is None:
        return None

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    api.Trigger_AddCommand(req, node, workload,
                           "yes | cp dnsdir/named.conf /etc/")
    api.Trigger_AddCommand(
        req, node, workload,
        "ex -s -c \'%s/192.168.100.102/%s/g|x\' /etc/named.conf" %
        ("%s", server.ip_address))
    api.Trigger_AddCommand(req, node, workload,
                           "yes | cp dnsdir/example.com.zone /var/named/")
    api.Trigger_AddCommand(req, node, workload, "systemctl start named")
    api.Trigger_AddCommand(req, node, workload, "systemctl enable named")
    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    for cmd in trig_resp.commands:
        api.PrintCommandResults(cmd)
    return api.types.status.SUCCESS
コード例 #25
0
def getWindowsStats(node, intf, pat1):
    name = host.GetWindowsIntName(node.node_name, intf)
    cmd = "/mnt/c/Windows/Temp/drivers-windows/IonicConfig.exe DevStats -n '%s' | grep -e %s |" \
           " cut -d ':' -f 2" % (name, pat1)
    
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    api.Trigger_AddHostCommand(req, node.node_name, cmd)
    resp = api.Trigger(req)
    if resp is None:
        api.Logger.error("Failed to run: %s on host: %s intf: %s" 
                         %(cmd, node.node_name, intf))
        return None

    cmd = resp.commands[0]
    if cmd.exit_code != 0:
        api.Logger.error(
            "Failed to run: %s for host: %s, stderr: %s"
            %(cmd, node.node_name, cmd.stderr))
        api.PrintCommandResults(cmd)
        return None

    if cmd.stdout == "":
        api.Logger.error("Output is empty for: %s on host: %s intf: %s" 
                         %(cmd, node.node_name, intf))
        api.PrintCommandResults(cmd)
        return None
    
    stats_map = cmd.stdout.splitlines()
    stats_map = list(map(int,stats_map))
    
    return stats_map
コード例 #26
0
def pingAllRemoteWloadPairs(workload_pairs, iterators):
    cmd_cookies = []
    req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    for pair in workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        if iterators.ipaf == 'ipv4':
            cmd_cookie = "ping -c 5 -i 0.2 -W 2000 -s %d -S %s %s" % (
                iterators.pktsize, w1.ip_address, w2.ip_address)
            api.Logger.verbose(
                "Ping test cmd %s from %s(%s %s) --> %s(%s %s)" %
                (cmd_cookie, w1.workload_name, w1.ip_address, w1.interface,
                 w2.workload_name, w2.ip_address, w2.interface))
        else:
            cmd_cookie = "ping6 -c 5 -i 0.2 -s %d -S %s -I %s %s" % (
                iterators.pktsize, w1.ipv6_address, w1.interface,
                w2.ipv6_address)
            api.Logger.verbose(
                "Ping test cmd %s from %s(%s %s) --> %s(%s %s)" %
                (cmd_cookie, w1.workload_name, w1.ipv6_address, w1.interface,
                 w2.workload_name, w2.ipv6_address, w2.interface))
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd_cookie)
        cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return cmd_cookies, resp
コード例 #27
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    req = None
    interval = "0.2"
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        interval = "3"
    tc.cmd_cookies = []

    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        if tc.iterators.ipaf == 'ipv6':
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ipv6_address, w2.workload_name, w2.ipv6_address)
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                   "ping6 -i %s -c 20 -s %d %s" % (interval, tc.iterators.pktsize, w2.ipv6_address))
        else:
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                   "ping -i %s -c 20 -s %d %s" % (interval, tc.iterators.pktsize, w2.ip_address))
        api.Logger.info("Ping test from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
コード例 #28
0
def start_single_pcap_capture(tc):
    try:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
        nodes = api.GetWorkloadNodeHostnames()
        tc.pcap_cmds = []
        intf = api.GetRemoteWorkloadPairs()[0][0].interface
        tc.pcap_filename = pcap_file_name(intf)
        cmd = cmd_builder.tcpdump_cmd(intf, tc.pcap_filename)
        api.Trigger_AddHostCommand(req, n, cmd, background=True)
        resp = api.Trigger(req)
        for cmd in resp.commands:
            if cmd.handle == None or len(cmd.handle) == 0:
                api.Logger.error("Error starting pcap : %s " % cmd.command)
                api.Logger.error("Std Output : %s " % cmd.stdout)
                api.Logger.error("Std Err :  %s " % cmd.stdout)
                return api.types.status.FAILURE
            api.Logger.info("Success running cmd : %s" % cmd.command)
        tc.pcap_trigger = resp
        return api.types.status.SUCCESS
    except:
        api.Logger.info("failed to start single pcap capture")
        api.Logger.debug(
            "failed to start single pcap capture. error was: {0}".format(
                traceback.format_exc()))
        return api.types.status.SUCCESS
コード例 #29
0
def HitlessTriggerUpdateRequest(tc):
    result = api.types.status.SUCCESS
    if api.IsDryrun():
        return result

    backgroun_req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    # start upgrade manager process
    for node in tc.nodes:
        cmd = "/nic/tools/start-upgmgr.sh -n "
        api.Logger.info("Starting Upgrade Manager %s" % (cmd))
        api.Trigger_AddNaplesCommand(backgroun_req, node, cmd, background=True)
    api.Trigger(backgroun_req)

    # wait for upgrade manager to comeup
    misc_utils.Sleep(10)
    for node in tc.nodes:
        # initiate upgrade client objects
        # Generate Upgrade objects
        UpgradeClient.GenerateUpgradeObjects(node, api.GetNicMgmtIP(node))

        upg_obj = UpgradeClient.GetUpgradeObject(node)
        upg_obj.SetPkgName(tc.pkg_name)
        upg_obj.SetUpgMode(upgrade_pb2.UPGRADE_MODE_HITLESS)
        upg_status = upg_obj.UpgradeReq()
        api.Logger.info(
            f"Hitless Upgrade request for {node} returned status {upg_status}")
        if upg_status != upgrade_pb2.UPGRADE_STATUS_OK:
            api.Logger.error(f"Failed to start upgrade manager on {node}")
            result = api.types.status.FAILURE
            continue
    return result
コード例 #30
0
def Trigger(tc):
    tc.contexts = []
    ctxt = IperfTestContext()
    ctxt.req = api.Trigger_CreateAllParallelCommandsRequest()
    ctxt.cmd_cookies = []
    for tunnel in tc.tunnels:
        w1 = tunnel.ltep
        w2 = tunnel.rtep

        cmd_cookie = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

        basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
        if tc.iterators.proto == 'udp':
            basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
        api.Trigger_AddCommand(ctxt.req, w1.node_name, w1.workload_name,
                               "%s -s -t 300" % basecmd, background = True)
        api.Trigger_AddCommand(ctxt.req, w2.node_name, w2.workload_name,
                               "%s -c %s" % (basecmd, w1.ip_address))

        ctxt.cmd_cookies.append(cmd_cookie)
        ctxt.cmd_cookies.append(cmd_cookie)
    trig_resp = api.Trigger(ctxt.req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    ctxt.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    tc.context = ctxt

    return api.types.status.SUCCESS