def Trigger(tc): naples_nodes = api.GetNaplesHostnames() for node in naples_nodes: api.Logger.info("Start second athena_app to pick up policy.json") req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, "/nic/tools/start-sec-agent.sh") api.Trigger_AddNaplesCommand(req, node, "\r\n") resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Start second athena_app failed on node {}".format(node)) return api.types.status.FAILURE time.sleep(10) for node in naples_nodes: req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, "ps -aef | grep athena_app") resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("ps failed on Node {}".format(node)) return api.types.status.FAILURE if "athena_app" not in cmd.stdout: # TODO: If athena_app is not running, run start_agent.sh manually api.Logger.error("no athena_app running on Node {}, need to start athena_app first".format(node)) return api.types.status.FAILURE athena_sec_app_pid = cmd.stdout.strip().split()[1] api.Logger.info("athena_app up and running on Node {} with PID {}".format(node, athena_sec_app_pid)) return api.types.status.SUCCESS
def SetVFMac(hostname, pf, vfid, mac): try: cmd = "ip link set %s vf %d mac %s" % (pf, vfid, mac) resp = __execute_cmd(hostname, pf, cmd) if resp.exit_code != 0: api.Logger.error("get vf %d info failed on host %s PF %s" % (vfid, hostname, pf)) api.PrintCommandResults(resp) if resp.stderr.find("RTNETLINK answers: Input/output error") != -1: api.Logger.error("Is the VF API not supported in this FW?") return api.types.status.FAILURE # ip link set does not update VF's netdev, so using ifconfig to update it. vf_intf = GetVFName(hostname, pf, vfid) cmd = "ifconfig %s hw ether %s" % (vf_intf, mac) resp = __execute_cmd(hostname, pf, cmd) if resp.exit_code != 0: api.Logger.error( "Failed to set mac addr for vf_intf %s on host %s" % (vf_intf, hostname)) api.PrintCommandResults(resp) return api.types.status.FAILURE except: api.Logger.error(traceback.format_exc()) return api.types.status.FAILURE return api.types.status.SUCCESS
def Teardown(tc): if tc.skip: return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest() for node in tc.Nodes: api.Trigger_AddNaplesCommand(req, node, "rm -rf /data/upgrade_to_same_firmware_allowed") resp = api.Trigger(req) try: for cmd_resp in resp.commands: if cmd_resp.exit_code != 0: api.PrintCommandResults(cmd_resp) api.Logger.error("Teardown failed %s", cmd_resp.command) except: api.Logger.error("EXCEPTION occured in Naples command") return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() for n in tc.Nodes: cmd = 'curl -k -X DELETE https://' + api.GetNicIntMgmtIP(n) + ':'+utils.GetNaplesMgmtPort()+'/api/v1/naples/rollout/' api.Trigger_AddHostCommand(req, n, cmd, timeout=100) tc.resp = api.Trigger(req) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def bsd_flow_ctrl(node, inf, fc_type, fc_val, pattern): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand(req, node, 'sysctl dev.%s.flow_ctrl=%d' % (host.GetNaplesSysctl(inf), fc_type)) api.Trigger_AddHostCommand(req, node, 'sysctl dev.%s.link_pause=%d' % (host.GetNaplesSysctl(inf), fc_val)) api.Trigger_AddHostCommand(req, node, BSD_IFCONFIG_MEDIA_CMD % inf) api.Logger.info("Setting %s link type: %d value: %d pattern: %s" % (inf, fc_type, fc_val, pattern)) resp = api.Trigger(req) if resp is None: return -1 # We are interested in only last command response. cmd = resp.commands[2] if cmd.exit_code != 0: api.Logger.error("Failed exit code: %d link type: %d value: %d, stderr: %s" % (cmd.exit_code, fc_type, fc_val, cmd.stderr)) api.PrintCommandResults(cmd) return -1 if cmd.stdout.find("[\n\t]*" + pattern + "[\n\t]*") != -1: api.Logger.error("Failed link type: %d value: %d, stdout: %s" % (cmd.exit_code, fc_type, fc_val, cmd.stdout)) api.PrintCommandResults(cmd) return -1 return 0
def Teardown(tc): req = api.Trigger_CreateExecuteCommandsRequest() for node in tc.Nodes: api.Trigger_AddNaplesCommand( req, node, "rm -rf /update/upgrade_halt_state_machine") api.Trigger_AddNaplesCommand(req, node, "rm -rf /update/pcieport_upgdata") api.Trigger_AddNaplesCommand(req, node, "rm -rf /update/pciemgr_upgdata") api.Trigger_AddNaplesCommand(req, node, "rm -rf /update/pciemgr_upgrollback") api.Trigger_AddNaplesCommand(req, node, "rm -rf /update/nicmgr_upgstate") api.Trigger_AddNaplesCommand( req, node, "rm -rf /data/upgrade_to_same_firmware_allowed") resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("Setup failed %s", cmd_resp.command) req = api.Trigger_CreateExecuteCommandsRequest() for n in tc.Nodes: cmd = 'curl -k -X DELETE https://' + api.GetNicIntMgmtIP( n) + ':8888/api/v1/naples/rollout/' api.Trigger_AddHostCommand(req, n, cmd) tc.resp = api.Trigger(req) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def _run_vmedia_traffic(node_name): retries = 10 for _i in range(retries): cddev = "/dev/sr0" cmd = "ls %s" % cddev resp = __execute_cmd(node_name, cmd) cmd = resp.commands.pop() api.PrintCommandResults(cmd) if cmd.exit_code != 0: cddev = "/dev/sr1" cmd = "ls %s" % cddev resp = __execute_cmd(node_name, cmd) cmd = resp.commands.pop() api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.info("Device not available %s" % cddev) if _i < (retries - 1): api.Logger.info("Retrying after 30s...") time.sleep(30) continue api.Logger.info("Vmedia is mapped to device %s" % (cddev)) cmd = "dd if=%s of=/dev/null bs=1M" % cddev resp = __execute_cmd(node_name, cmd, timeout=3600) cmd = resp.commands.pop() api.PrintCommandResults(cmd) if cmd.exit_code != 0: raise RuntimeError("Vmedia traffic test is not successfull") return raise RuntimeError("Vmedia device was not detected on the host after %d retries" % (retries))
def getWindowsStats(node, intf, pat1): name = host.GetWindowsIntName(node.node_name, intf) cmd = "/mnt/c/Windows/Temp/drivers-windows/IonicConfig.exe DevStats -n '%s' | grep -e %s |" \ " cut -d ':' -f 2" % (name, pat1) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand(req, node.node_name, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to run: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) return None cmd = resp.commands[0] if cmd.exit_code != 0: api.Logger.error( "Failed to run: %s for host: %s, stderr: %s" %(cmd, node.node_name, cmd.stderr)) api.PrintCommandResults(cmd) return None if cmd.stdout == "": api.Logger.error("Output is empty for: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) api.PrintCommandResults(cmd) return None stats_map = cmd.stdout.splitlines() stats_map = list(map(int,stats_map)) return stats_map
def Verify(tc): if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS api.Logger.info("Results for %s" % (tc.cmd_descr)) cookie_idx = 0 for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): if (tc.cmd_cookies[cookie_idx].find("Before") != -1 or \ tc.cmd_cookies[cookie_idx].find("After") != -1): result = api.types.status.SUCCESS else: result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("show security flow-gate") != -1 and \ cmd.stdout == '': result = api.types.status.FAILURE cookie_idx += 1 for cmd in tc.resp2.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): if tc.cmd_cookies[cookie_idx].find("Hping") != -1 or \ tc.cmd_cookies[cookie_idx].find("show session reverse direction") != -1: result = api.types.status.SUCCESS else: result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("show session different dir") != -1 and \ cmd.stdout != '': result = api.types.status.FAILURE cookie_idx += 1 return result
def LoadDriver (os_type, node): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if os_type == OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, node, "insmod " + LinuxDriverPath) elif os_type == OS_TYPE_BSD: api.Trigger_AddHostCommand(req, node, "kldload " + FreeBSDDriverPath) else: api.Logger.info("Unknown os_type - %s" % os_type) return api.types.status.FAILURE resp = api.Trigger(req) if resp is None: return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: if os_type == OS_TYPE_LINUX: if cmd.stdout.find("File exists") != -1: api.Logger.info("Load Driver Failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE elif os_type == OS_TYPE_BSD: if cmd.stdout.find("already loaded") != -1: api.Logger.info("Load Driver Failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE else: api.Logger.info("Driver was already loaded. Load is expected to fail") else: api.Logger.info("Unknown os_type - %s" % os_type) return api.types.status.FAILURE return api.types.status.SUCCESS
def getLinuxStats(node, intf, pat1): stats_map = [] cmd = 'ethtool -S ' + intf + ' | grep -e ' + pat1 + ' | cut -d ":" -f 2' req = api.Trigger_CreateExecuteCommandsRequest(serial=True) #api.Logger.info("Getting stats for: %s on host: %s intf: %s" # %(cmd, node.node_name, intf)) api.Trigger_AddHostCommand(req, node.node_name, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to run: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) return None cmd = resp.commands[0] if cmd.exit_code != 0: api.Logger.error( "Failed to run: %s for host: %s, stderr: %s" %(cmd, node.node_name, cmd.stderr)) api.PrintCommandResults(cmd) return None if cmd.stdout == "": api.Logger.error("Output is empty for: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) api.PrintCommandResults(cmd) return None stats_map = cmd.stdout.splitlines() stats_map = list(map(int,stats_map)) return stats_map
def Verify(tc): api.Logger.info(f"Sleeping for {tc.args.sleep} ") time.sleep(tc.args.sleep) if tc.resp is None: api.Logger.error("No response for Naples Upgrade POST request") return api.types.status.FAILURE for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() for n in tc.Nodes: cmd = 'curl -k https://' + api.GetNicIntMgmtIP( n) + ':8888/api/v1/naples/rollout/' api.Trigger_AddHostCommand(req, n, cmd) tc.resp = api.Trigger(req) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.info("cmd returned failure") return api.types.status.FAILURE resp = json.loads(cmd.stdout) try: for item in resp['Status']['status']: if not item['Op'] == 4: api.Logger.info("opcode is bad") return api.types.status.FAILURE if "fail" in tc.iterators.option: if not item['opstatus'] == 'failure': api.Logger.info("opstatus is bad") return api.types.status.FAILURE if tc.iterators.option not in item['Message']: api.Logger.info("message is bad") return api.types.status.FAILURE else: if not item['opstatus'] == 'success': api.Logger.info("opstatus is bad") return api.types.status.FAILURE except: api.Logger.info("resp: ", json.dumps(resp, indent=1)) ping.TestTerminateBackgroundPing(tc, tc.pkt_size) ping_loss_duration = ping.GetMaxPktLossDuration(tc, tc.interval) api.Logger.info(f"Traffic dropped for {ping_loss_duration} sec") if enable_ssh.Main(None) != api.types.status.SUCCESS: api.Logger.error("Enabling SSH failed after upgrade") return api.types.status.FAILURE if upgrade_utils.VerifyUpgLog(tc.Nodes, tc.GetLogsDir()): api.Logger.error("Failed to verify the upgrade logs") return api.types.status.FAILURE if verify_connectivity(tc) != api.types.status.SUCCESS: api.Logger.error("Post trigger connectivity test failed.") return api.types.status.FAILURE return api.types.status.SUCCESS
def Verify(tc): result = api.types.status.SUCCESS #print krping output for cmd in tc.krping_resp.commands: api.PrintCommandResults(cmd) #print dmesg output for cmd in tc.dmesg_resp.commands: api.PrintCommandResults(cmd) #verify rdma perf test result = rdma_perf.Verify(tc) if result != api.types.status.SUCCESS: return result #print ping output and check if pipline is stuck for cmd in tc.ping_resp.commands: api.PrintCommandResults(cmd) for ping_output in cmd.stdout.splitlines(): if "packets transmitted" in ping_output: ping_result = ping_output.split() pkts_sent = int(ping_result[0]) pkts_rcvd = int(ping_result[3]) if pkts_sent != 10 or pkts_rcvd != 10: api.Logger.error("PING not successful") return api.types.status.FAILURE return result
def Verify(tc): if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS cookie_idx = 0 api.Logger.info("Results for %s" % (tc.cmd_descr)) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): if (tc.cmd_cookies[cookie_idx].find("Run RTSP client") != -1 or \ tc.cmd_cookies[cookie_idx].find("Before RTSP") != -1 or \ tc.cmd_cookies[cookie_idx].find("show flow-gate") != -1): result = api.types.status.SUCCESS else: result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("show session RTSP") != -1 and \ cmd.stdout == '': result = api.types.status.FAILURE cookie_idx += 1 for cmd in tc.resp2.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): if tc.cmd_cookies[cookie_idx].find("Run RTP client") != -1: result = api.types.status.SUCCESS else: result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("show session RTP") != -1 and \ cmd.stdout == '': result = api.types.status.FAILURE cookie_idx += 1 return result
def Verify(tc): subif_utils.clearAll() if tc.skip: return api.types.status.SUCCESS if tc.resp2 is None: return api.types.status.FAILURE if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS # Check if MACs in "halctl show endpoint" match with host & workload interface MAC if not verifyEndPoints(tc): api.Logger.error("UC MAC filter : Verify failed for verifyEndPoints") result = api.types.status.FAILURE else: api.Logger.info("UC MAC filter : Verify - verifyEndPoints SUCCESS ") cookie_idx = 0 for cmd in tc.resp2.commands: api.Logger.info("Ping Results for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE cookie_idx += 1 api.Logger.info("Ping results after MAC Change\n") for cmd in tc.resp.commands: api.Logger.info("Ping Results for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE cookie_idx += 1 return result
def Verify(tc): if tc.resp is None: return api.types.status.FAILURE if tc.memleak == 1: for cmd in tc.resp.commands: api.PrintCommandResults(cmd) api.Logger.info("MEMORY LEAK DETECTED") return api.types.status.FAILURE result = api.types.status.SUCCESS api.Logger.info("Results for %s" % (tc.cmd_descr)) cookie_idx = 0 for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): if (tc.cmd_cookies[cookie_idx].find("Before") != -1 or \ tc.cmd_cookies[cookie_idx].find("After") != -1 or \ tc.cmd_cookies[cookie_idx].find("After flow-gate ageout") != -1): result = api.types.status.SUCCESS else: result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("show session") != -1 and \ cmd.stdout == '': result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("show security flow-gate") != -1 and \ cmd.stdout == '': result = api.types.status.FAILURE if tc.cmd_cookies[cookie_idx].find("After flow-gate ageout") != -1 and \ cmd.stdout != '': result = api.types.status.FAILURE cookie_idx += 1 return result
def Verify(tc): time.sleep(tc.args.sleep) if tc.resp is None: return api.types.status.FAILURE for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() for n in tc.Nodes: cmd = 'curl -k https://' + api.GetNicIntMgmtIP( n) + ':8888/api/v1/naples/rollout/' api.Trigger_AddHostCommand(req, n, cmd) tc.resp = api.Trigger(req) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if netagent_cfg_api.switch_profile(push_base_profile=True) != \ api.types.status.SUCCESS: api.Logger.warn("Failed to push base profile") return api.types.status.FAILURE if netagent_cfg_api.PushBaseConfig(ignore_error = False) != \ api.types.status.SUCCESS: api.Logger.info("policy push failed") return api.types.status.FAILURE for cmd in tc.resp.commands: if cmd.exit_code != 0: api.Logger.info("cmd returned failure") return api.types.status.FAILURE if arping.ArPing(tc) != api.types.status.SUCCESS: api.Logger.info("arping failed on verify") if ping.TestPing(tc, 'local_only', 'ipv4', 64) != api.types.status.SUCCESS or ping.TestPing( tc, 'remote_only', 'ipv4', 64) != api.types.status.SUCCESS: api.Logger.info("ping test failed") return api.types.status.FAILURE resp = json.loads(cmd.stdout) try: for item in resp['Status']['status']: if not item['Op'] == 4: api.Logger.info("opcode is bad") return api.types.status.FAILURE else: if not item['opstatus'] == 'success': api.Logger.info("opstatus is bad") return api.types.status.FAILURE except: api.logger.info("resp : ", json.dumps(resp, indent=4)) return api.types.status.SUCCESS
def Trigger(tc): names = api.GetNaplesHostnames() hostname = names[0] if api.GetNodeOs(hostname) != host.OS_TYPE_LINUX: return api.types.status.SUCCESS for intf in api.GetNaplesHostInterfaces(hostname): api.Logger.info("Checking event queue use on host %s interface %s" % (hostname, intf)) pci = host.GetNaplesPci(hostname, intf) if pci is None: return api.types.status.FAILURE # get eth_eq_count and number of eq interrupts req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "awk '/eth_eq_count/ {print $2}' < /sys/kernel/debug/ionic/" + pci + "/identity" api.Trigger_AddHostCommand(req, hostname, cmd) cmd = "grep -c -e 'ionic-" + pci + "-eq' /proc/interrupts" api.Trigger_AddHostCommand(req, hostname, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to get values from host %s interface %s" % (hostname, intf)) return api.types.status.FAILURE cmd = resp.commands.pop() if cmd.exit_code > 1: # exit code 1 from grep is "string not found", which is a valid answer here api.Logger.error( "Failed to get eth_eq_count from host %s interface %s" % (hostname, intf)) api.PrintCommandResults(cmd) return api.types.status.FAILURE eth_eq_count = int(cmd.stdout.strip()) cmd = resp.commands.pop() if cmd.exit_code != 0: api.Logger.error( "Failed to get interrupt count from host %s interface %s" % (hostname, intf)) api.PrintCommandResults(cmd) return api.types.status.FAILURE intr_count = int(cmd.stdout.strip()) api.Logger.info( "Found eth_eq_count %d and interrupt count %d from host %s interface %s" % (eth_eq_count, intr_count, hostname, intf)) if eth_eq_count == 0 and intr_count != 0: api.Logger.error("eq interrupts found when eth_eq_count == 0") return api.types.status.FAILURE elif eth_eq_count != 0 and intr_count == 0: api.Logger.error("No eq interrupts found when eth_eq_count != 0") return api.types.status.FAILURE return api.types.status.SUCCESS
def __installPenCtl(node): fullpath = api.GetTopDir() + '/' + common.PENCTL_PKG resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy penctl to Node: %s" % node) return api.types.status.FAILURE fullpath = api.GetTopDir() + '/' + common.PENCTL_TOKEN_FILE resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy penctl token to Node: %s" % node) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddHostCommand(req, node, "tar -xvf %s" % os.path.basename(common.PENCTL_PKG) + " && sync", background = False) #Create a symlink at top level execName = __penctl_exec(node) realPath = "realpath %s/%s " % (common.PENCTL_DEST_DIR, execName) api.Trigger_AddHostCommand(req, node, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE common.PENCTL_EXEC[node] = resp.commands[1].stdout.split("\n")[0] req = api.Trigger_CreateExecuteCommandsRequest() #Create a symlink at top level realPath = "realpath %s " % (common.PENCTL_TOKEN_FILE_NAME) api.Trigger_AddHostCommand(req, node, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE common.PENCTL_TOKEN[node] = resp.commands[0].stdout.split("\n")[0] return api.types.status.SUCCESS
def Trigger(tc): # move device.json cmd = "mv /device.json /nic/conf/" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # load drivers cmd = "insmod /nic/bin/ionic_mnic.ko && insmod /nic/bin/mnet_uio_pdrv_genirq.ko && insmod /nic/bin/mnet.ko" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # start athena app cmd = "/nic/tools/start-agent-skip-dpdk.sh" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # wait for athena app to be up utils.Sleep(80) # configure int_mnic0 cmd = "ifconfig int_mnic0 " + tc.int_mnic_ip + " netmask 255.255.255.0" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # run plugctl to gracefully bring up the PCI device on host req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "./plugctl.sh in" api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to gracefully bring up the PCI device on host %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # get host internal mgmt intf host_intfs = naples_host.GetHostInternalMgmtInterfaces(tc.bitw_node_name) # Assuming single nic per host if len(host_intfs) == 0: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE intf = host_intfs[0] ip_addr = str(ip_address(tc.int_mnic_ip.rstrip()) + 1) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "ifconfig " + str(intf) + " " + ip_addr + "/24 up" api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to gracefully bring up the internal mgmt intf on host %s" % \ tc.bitw_node_name) return api.types.status.FAILURE return api.types.status.SUCCESS
def initiateMCtraffic(w1, w2, statsCount): result = api.types.status.SUCCESS #Get a new multicast IPv4 address mcast_ip = str(ipv4_allocator.Alloc()) #Get corresponding multicast mac address mcast_mac = address_utils.convertMcastIP2McastMAC(mcast_ip) api.Logger.verbose("Mcast address ", mcast_ip, mcast_mac) #install rx filter for this mcast mac (derived from mcast IP) on w1.interface cmd = host_utils.AddMcastMAC(w1.node_name, w1.interface, mcast_mac) if cmd.exit_code != 0: api.Logger.critical("AddMcastMAC failed for w1 ", w1.node_name, w1.interface, mcast_mac) api.PrintCommandResults(cmd) result = api.types.status.FAILURE #install route for this mcast IP with w2.interface for hping3 to work cmd = host_utils.AddIPRoute(w2.node_name, w2.interface, mcast_ip) if cmd.exit_code != 0: api.Logger.critical("AddIPRoute failed for w2 ", w2.node_name, w2.interface, mcast_ip) api.PrintCommandResults(cmd) result = api.types.status.FAILURE # Without sleep here, mc packets are not getting received. # need to check if route / maddr install taking time for propagation. time.sleep(1) req = api.Trigger_CreateExecuteCommandsRequest(serial = True) # hping from remote node # exit_code will be 1 but that's ok as the intention here is to generate packets from remote cmd_cookie = "hping3 -c %d --faster %s -I %s -a %s" % (__HPING_COUNT, mcast_ip, w2.interface, w2.ip_address) api.Trigger_AddHostCommand(req, w2.node_name, cmd_cookie) resp = api.Trigger(req) #Increment stats txrxmcframes = statsCount[w1.parent_interface] #increase rx by hping_count txrxmcframes[1] += __HPING_COUNT statsCount[w1.parent_interface] = txrxmcframes # rollback #remove route for this mcast IP with w1.interface cmd = host_utils.DeleteMcastMAC(w1.node_name, w1.interface, mcast_mac) if cmd.exit_code != 0: api.Logger.critical("DeleteMcastMAC failed for w1 ", w1.node_name, w1.interface, mcast_mac) api.PrintCommandResults(cmd) result = api.types.status.FAILURE #remove route for this mcast IP with w2.interface cmd = host_utils.DelIPRoute(w2.node_name, w2.interface, mcast_ip) if cmd.exit_code != 0: api.Logger.critical("DelIPRoute failed for w2 ", w2.node_name, w2.interface, mcast_ip) api.PrintCommandResults(cmd) result = api.types.status.FAILURE return result
def Verify(tc): cmd_cnt = 0 for node in tc.Nodes: # Increment 2 for first two commands (clear && sleep) cmd_cnt += 2 # Find any leak from memslab command output memslab_cmd = tc.trig_resp.commands[cmd_cnt] api.PrintCommandResults(memslab_cmd) alg_meminfo = get_meminfo(memslab_cmd, 'alg') for info in alg_meminfo: if (info['inuse'] != 0 or info['allocs'] != info['frees']): api.Logger.info("Memleak detected in Slab %d" %info['inuse']) tc.memleak = 1 cmd_cnt += 1 # Find any leak from mtrack command output memtrack_cmd = tc.trig_resp.commands[cmd_cnt] api.PrintCommandResults(memtrack_cmd) # Alloc ID 90 & 91 is for VMOTION vm_memtrack = [] vm_memtrack.append(vm_utils.get_memtrack(memtrack_cmd, 90)) vm_memtrack.append(vm_utils.get_memtrack(memtrack_cmd, 91)) vm_dbg_stats = vm_utils.get_dbg_vmotion_stats(tc, node) tot_vmotions = int(vm_dbg_stats['vMotion']) # we limit dbg records to a max of 40 tot_vmotions = min(tot_vmotions,40) # 3 chunks of vMotion alloc assigned for global usage chunks_in_use = tot_vmotions + 3 for info in vm_memtrack: if 'allocs' in info and 'frees' in info: # Subtract -3 in vMotion Init, 3 chunks of vMotion alloc will be done if info['allocid'] == 90 and ((info['allocs'] - chunks_in_use) != info['frees']): api.Logger.info("Leak detected in Mtrack for id %d A %d F %d" %(info['allocid'], info['allocs'], info['frees'])) tc.memleak = 1 elif info['allocid'] == 91: api.Logger.info("Leak detected in Mtrack for id %d A %d F %d" %(info['allocid'], info['allocs'], info['frees'])) tc.memleak = 1 cmd_cnt += 1 if tc.memleak == 1: api.Logger.info("Memleak failure detected") return api.types.status.FAILURE api.Logger.info("Results for vMotion Memleak test, PASS") return api.types.status.SUCCESS
def athena_sec_app_start(node_name=None, nic_name=None, init_wait_time=INIT_WAIT_TIME_DEFAULT): node_nic_names = [] if (not node_name and nic_name) or (node_name and not nic_name): raise Exception("specify both node_name and nic_name or neither") if node_name and nic_name: node_nic_names.append((node_name, nic_name)) else: node_nic_names = get_athena_node_nic_names() for nname, nicname in node_nic_names: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "/nic/tools/start-sec-agent-iota.sh" api.Trigger_AddNaplesCommand(req, nname, cmd, nicname, background=True) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("command to start athena sec app failed on " "node %s nic %s" % (nname, nicname)) return api.types.status.FAILURE # sleep for init to complete misc_utils.Sleep(init_wait_time) for nname, nicname in node_nic_names: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "ps -aef | grep athena_app | grep soft-init | grep -v grep" api.Trigger_AddNaplesCommand(req, nname, cmd, nicname) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("ps failed or athena_app failed to start " "on node %s nic %s" % (nname, nicname)) return api.types.status.FAILURE if "athena_app" in cmd.stdout: athena_sec_app_pid = cmd.stdout.strip().split()[1] api.Logger.info("Athena sec app came up on node %s nic %s and " "has pid %s" % (nname, nicname, athena_sec_app_pid)) return api.types.status.SUCCESS
def Verify(tc): for cmd in tc.install_cmd_resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error("Installation failed on Node %s\n" % (cmd.node_name)) return api.types.status.FAILURE for cmd in tc.deploy_cmd_resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error("Deploy failed on Node %s\n" % (cmd.node_name)) return api.types.status.FAILURE for cmd in tc.validate_cmd_resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error("Validation procedure failed on Node %s\n" % (cmd.node_name)) return api.types.status.FAILURE dir_path = os.path.dirname(os.path.realpath(__file__)) api.Logger.info("Dir Path is: %s\n" % (dir_path)) for n in tc.nodes: api.Logger.info("Getting data for: %s\n" % (n)) resp = api.CopyFromHost(n, ["pencap/linux/techsupport/techsupport.json"], dir_path) if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy client file: %s\n" % (n)) return api.types.status.FAILURE jdata = {} try: with open(dir_path + "/techsupport.json") as f: jdata = json.load(f) except: api.Logger.error("Unable to open local file from node: %s\n" % (n)) return api.types.status.FAILURE for data in jdata: array_of_dumps = jdata[data] for dump in array_of_dumps: dump_menu_title = "" for item in dump: if (isinstance(dump[item], str) == True): dump_menu_title += dump[item] + "_" api.Logger.info("Dumped data: %s\n" % (dump_menu_title)) return api.types.status.SUCCESS
def UnloadDriver (os_type, node, whichdriver = "all" ): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if os_type == None or node == None: api.Logger.info("Undefined parameters in Unload Driver") return api.types.status.FAILURE if os_type != OS_TYPE_ESX: if os_type == OS_TYPE_LINUX: command = "rmmod" elif os_type == OS_TYPE_BSD: command = "kldunload" if whichdriver == "eth": api.Trigger_AddHostCommand(req, node, "%s ionic" % command) elif whichdriver == "rdma": api.Trigger_AddHostCommand(req, node, "%s ionic_rdma" % command) elif whichdriver == "ionic_fw": api.Trigger_AddHostCommand(req, node, "%s ionic_fw" % command) else: api.Trigger_AddHostCommand(req, node, "%s ionic_rdma" % command) api.Trigger_AddHostCommand(req, node, "%s ionic" % command) elif os_type == OS_TYPE_ESX: # Or could use # api.Trigger_AddHostCommand(req, node, "vmkload_mod -u ionic_en") api.Trigger_AddHostCommand(req, node, "sshpass -p %s ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s@%s esxcli software vib remove -n=ionic-en -f" % (api.GetTestbedEsxPassword(), api.GetTestbedEsxUsername(), api.GetEsxHostIpAddress(node))) resp = api.Trigger(req) if resp is None: return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: if os_type == OS_TYPE_LINUX: if cmd.stdout.find("is not currently loaded") != -1: api.Logger.info("Unload Driver Failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE elif os_type == OS_TYPE_BSD: if cmd.stdout.find("can't find file") != -1: api.Logger.info("Unload Driver Failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE else: api.Logger.info("Driver was NOT loaded. %s is expected to fail" % command) elif os_type == OS_TYPE_ESX: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def acquire_dhcp_ips(workload_pairs): global workloads workloads.clear() for pair in workload_pairs: workloads[pair[0]] = True workloads[pair[1]] = True if not api.IsSimulation(): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) else: req = api.Trigger_CreateExecuteCommandsRequest(serial=False) for workload in workloads.keys(): api.Logger.info( "DHCP: %s %s %s" % (workload.node_name, workload.workload_name, workload.interface)) cmd = "dhclient -r " + workload.interface api.Trigger_AddCommand(req, workload.node_name, workload.workload_name, cmd) cmd = "ifconfig " + workload.interface + " 0.0.0.0" api.Trigger_AddCommand(req, workload.node_name, workload.workload_name, cmd) cmd = "ifconfig " + workload.interface api.Trigger_AddCommand(req, workload.node_name, workload.workload_name, cmd) cmd = "dhclient " + workload.interface api.Trigger_AddCommand(req, workload.node_name, workload.workload_name, cmd, timeout=60) resp = api.Trigger(req) for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.info("Couldn't reacquire IP addresses over DHCP") api.PrintCommandResults(cmd) return if ("ifconfig" in cmd.command and "0.0.0.0" not in cmd.command and "inet" in cmd.stdout): api.Logger.info("Couldn't clear static IP address") api.PrintCommandResults(cmd) return add_routes.AddRoutes() return
def Verify(tc): if tc.resp is None: return api.types.status.FAILURE #if tc.config_update_fail == 1: # return api.types.status.FAILURE result = api.types.status.SUCCESS #Verify Half close timeout & session state cookie_idx = 0 for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): #This is expected so dont set failure for this case if (tc.cmd_cookies2[cookie_idx].find("After timeout") != -1) or \ (tc.cmd_cookies2[cookie_idx].find("Send FIN ACK") != -1): result = api.types.status.SUCCESS else: result = api.types.status.FAILURE elif tc.cmd_cookies2[cookie_idx].find("Before timeout") != -1: #Session were not established ? if cmd.stdout.find("FIN_RCVD") == -1: result = api.types.status.FAILURE elif tc.cmd_cookies2[cookie_idx].find("After timeout") != -1: #Check if sessions are aged and new session is not created if cmd.stdout != '': result = api.types.status.FAILURE cookie_idx += 1 #Verify TCP DUMP responses cookie_idx = 0 for cmd in tc.tcpdump_resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): if tc.cmd_cookies3[cookie_idx].find("Check FIN Received") != -1: result = api.types.status.SUCCESS else: result = api.types.status.FAILURE elif tc.cmd_cookies3[cookie_idx].find("Check sent FIN") != -1: #make sure FIN was sent if cmd.stdout == -1: result = api.types.status.FAILURE elif tc.cmd_cookies3[cookie_idx].find("Check FIN Received") != -1: #Check if FIN wasnt received on that other side if cmd.stdout != '': result = api.types.status.FAILURE cookie_idx += 1 return result
def athena_sec_app_kill(node_name=None, nic_name=None): node_nic_names = [] if (not node_name and nic_name) or (node_name and not nic_name): raise Exception("specify both node_name and nic_name or neither") if node_name and nic_name: node_nic_names.append((node_name, nic_name)) else: node_nic_names = get_athena_node_nic_names() for nname, nicname in node_nic_names: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "ps -aef | grep athena_app | grep soft-init | grep -v grep" api.Trigger_AddNaplesCommand(req, nname, cmd, nicname) resp = api.Trigger(req) ps_cmd_resp = resp.commands[0] api.PrintCommandResults(ps_cmd_resp) if "athena_app" in ps_cmd_resp.stdout: athena_sec_app_pid = ps_cmd_resp.stdout.strip().split()[1] api.Logger.info("athena sec app already running on node %s " "nic %s with pid %s. Killing it." % (nname, nicname, athena_sec_app_pid)) req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, nname, "pkill -n athena_app", nicname) resp = api.Trigger(req) pkill_cmd_resp = resp.commands[0] api.PrintCommandResults(pkill_cmd_resp) if pkill_cmd_resp.exit_code != 0: api.Logger.info("pkill failed for athena sec app") return api.types.status.FAILURE # sleep for kill to complete misc_utils.Sleep(ATHENA_SEC_APP_KILL_WAIT_TIME) else: api.Logger.info("athena sec app not running on node %s nic %s" % (nname, nicname)) return api.types.status.SUCCESS
def SetupDNSServer(server): node = server.node_name workload = server.workload_name dir_path = os.path.dirname(os.path.realpath(__file__)) zonefile = dir_path + '/' + "example.com.zone" api.Logger.info("fullpath %s" % (zonefile)) resp = api.CopyToWorkload(node, workload, [zonefile], 'dnsdir') if resp is None: return None named_conf = dir_path + '/' + "named.conf" resp = api.CopyToWorkload(node, workload, [named_conf], 'dnsdir') if resp is None: return None req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, node, workload, "yes | cp dnsdir/named.conf /etc/") api.Trigger_AddCommand( req, node, workload, "ex -s -c \'%s/192.168.100.102/%s/g|x\' /etc/named.conf" % ("%s", server.ip_address)) api.Trigger_AddCommand(req, node, workload, "yes | cp dnsdir/example.com.zone /var/named/") api.Trigger_AddCommand(req, node, workload, "systemctl start named") api.Trigger_AddCommand(req, node, workload, "systemctl enable named") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def initPeerNode(tc, naples_node, new_mtu=None): """ initialize MTU of interfaces on non 'naples_node' to __MAX_MTU """ if new_mtu is None: new_mtu = __MAX_MTU result = api.types.status.SUCCESS # configure native workload interfaces before tagged workload intf configureNativeIntfMTU(tc, new_mtu, True, local_naples_node=naples_node) workloads = api.GetWorkloads() for w in workloads: if naples_node == w.node_name: continue if w.interface == w.parent_interface or w.interface in tc.mgmt_intf_list[w.node_name]: # native interfaces are already configured continue cmd = host_utils.setInterfaceMTU(w.node_name, w.interface, new_mtu) if cmd.exit_code != 0: api.Logger.error("MTU filter : initPeerNode failed for ", w.node_name, w.interface, new_mtu) api.PrintCommandResults(cmd) host_utils.debug_dump_interface_info(w.node_name, w.interface) result = api.types.status.FAILURE #TODO: Determine how much time to sleep time.sleep(40) api.Logger.info("MTU filter : hack - Slept for 40 secs") return result
def changeIntfMacAddr(node, intf_mac_dict, on_naples=False, isRollback=False, device_name=None): result = api.types.status.SUCCESS mac_offset = 200 for intf, mac_addr in intf_mac_dict.items(): if isRollback: mac_addr_str = mac_addr else: mac_addr_int = address_utils.convertMacStr2Dec(mac_addr) # New MAC = (int(Old_MAC)+ 30 + running_no) #TODO: Check what happens when we scale to 2k sub-if. mac_addr_int += mac_offset if api.GetNodeOs(node) == "linux": # In case of FreeBSD, hitting "PS-728". Based on its resolution, will remove OS check here. mac_offset += 1 mac_addr_str = address_utils.formatMacAddr(mac_addr_int) if on_naples: cmd = naples_utils.SetMACAddress(node, intf, mac_addr_str, device_name=device_name) else: cmd = host_utils.SetMACAddress(node, intf, mac_addr_str) if cmd.exit_code != 0: api.Logger.critical("changeIntfMacAddr failed ", node, intf, mac_addr_str) api.PrintCommandResults(cmd) result = api.types.status.FAILURE return result