def Setup(tc): parse_args(tc) api.SetTestsuiteAttr("mfg_test_intf", tc.test_intf) api.SetTestsuiteAttr("mfg_mode", "yes") api.SetTestsuiteAttr("preinit_script_path", NAPLES_PREINIT_SCRIPT_PATH) api.SetTestsuiteAttr("start_agent_script_path", NAPLES_START_AGENT_SCRIPT_PATH) # get node info tc.bitw_node_name = None tc.wl_node_name = None bitw_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() classic_node_nic_pairs = utils.get_classic_node_nic_pairs() # Only one node for single-nic topology tc.bitw_node_name = bitw_node_nic_pairs[0][0] tc.wl_node_name = classic_node_nic_pairs[0][0] host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE up0_intf = host_intfs[0] up1_intf = host_intfs[1] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE wl_vlans = [] for wl in workloads: if (wl.parent_interface == up0_intf and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf and tc.test_intf == 'up1'): if wl.uplink_vlan != 0: # Tagged workload wl_vlans.append(wl.uplink_vlan) if len(wl_vlans) < NUM_MFG_TEST_VLANS: api.Logger.error('Failed to fetch %d tagged workloads for mfg test' ' on uplink %s' % (NUM_MFG_TEST_VLANS, tc.test_intf)) return api.types.status.FAILURE # generate start agent script with testbed vlans if gen_start_agent_script(wl_vlans) != api.types.status.SUCCESS: return api.types.status.FAILURE # copy preinit script and start agent script to naples preinit_filename = api.GetTopDir() + '/' + WS_PREINIT_SCRIPT_PATH start_agent_filename = api.GetTopDir( ) + '/' + WS_IOTA_START_AGENT_SCRIPT_PATH api.CopyToNaples(tc.bitw_node_name, [preinit_filename, start_agent_filename], "") os.remove(start_agent_filename) return api.types.status.SUCCESS
def Setup(tc): parse_args(tc) if tc.policy_type == "default": tc.sec_app_restart_sleep = 120 tc.flow_cache_read_sleep = 15 else: tc.sec_app_restart_sleep = 180 tc.flow_cache_read_sleep = 45 tc.node_nic_pairs = athena_app_utils.get_athena_node_nic_names() tc.custom_policy_path = api.GetTopDir() + '/' + CUSTOM_PLCY_JSON_DIR + '/' \ + CUSTOM_PLCY_JSON_FNAME tc.default_policy_path = api.GetTopDir() + '/' + DEFAULT_PLCY_JSON_FILEPATH tc.gen_custom_plcy_fname = '' # if file is already there, it will overwrite the old file cmd = "" for node, nic in tc.node_nic_pairs: # because new logic in athena_app is to read policy.json in /data # if we want to test default policy.json, we have to clean /data first if tc.policy_type == "default": api.Logger.info("Test default policy.json") api.Logger.info("Clean old policy.json file in /data") cmd = "rm -f /data/policy.json" else: api.Logger.info("Test Custom policy.json") if (gen_custom_policy_cfg(tc) != api.types.status.SUCCESS): return api.types.status.FAILURE api.Logger.info( "Copy policy.json file from IOTA dir to /data/ on Naples") api.CopyToNaples(node, [tc.gen_custom_plcy_fname], "") cmd = "mv /" + GEN_CUSTOM_PLCY_JSON_FNAME + " /data/policy.json" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, cmd, nic) api.Trigger_AddNaplesCommand(req, node, "sync", nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: if 'rm' in cmd.command: api.Logger.error("removing /data/policy.json failed " "on {}".format((node, nic))) return api.types.status.FAILURE if 'mv' in cmd.command: api.Logger.error("moving policy.json to /data/ failed " "on {}".format((node, nic))) return api.types.status.FAILURE if 'sync' in cmd.command: api.Logger.error("sync failed on {}".format((node, nic))) return api.types.status.FAILURE return api.types.status.SUCCESS
def __installPenCtl(node): fullpath = api.GetTopDir() + '/' + common.PENCTL_PKG resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy penctl to Node: %s" % node) return api.types.status.FAILURE fullpath = api.GetTopDir() + '/' + common.PENCTL_TOKEN_FILE resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy penctl token to Node: %s" % node) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddHostCommand(req, node, "tar -xvf %s" % os.path.basename(common.PENCTL_PKG) + " && sync", background = False) #Create a symlink at top level execName = __penctl_exec(node) realPath = "realpath %s/%s " % (common.PENCTL_DEST_DIR, execName) api.Trigger_AddHostCommand(req, node, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE common.PENCTL_EXEC[node] = resp.commands[1].stdout.split("\n")[0] req = api.Trigger_CreateExecuteCommandsRequest() #Create a symlink at top level realPath = "realpath %s " % (common.PENCTL_TOKEN_FILE_NAME) api.Trigger_AddHostCommand(req, node, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE common.PENCTL_TOKEN[node] = resp.commands[0].stdout.split("\n")[0] return api.types.status.SUCCESS
def Setup(tc): tc.nodes = api.GetNaplesHostnames() tc.files = [] for cfgfile in tc.args.cfg: tc.files.append("%s/%s/%s" % (api.GetTopDir(), tc.args.dir, cfgfile)) tc.files.append("%s/%s/%s" % (api.GetTopDir(), tc.args.dir, tc.args.test)) for n in tc.nodes: tc.files.append("%s/iota/test/iris/testcases/storage/pnsotest_%s.py" % (api.GetTopDir(), api.GetNodeOs(n))) resp = api.CopyToHost(n, tc.files) if not api.IsApiResponseOk(resp): return api.types.status.FAILURE return api.types.status.SUCCESS
def LoadDriver(node_names, node_os, target_version='latest'): if target_version == 'latest': api.Logger.info('Target version is latest - nothing to change') else: resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return api.types.status.FAILURE manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') for node in node_names: if node_os == OS_TYPE_LINUX: if __load_linux_driver(tc, node, manifest_file) == api.types.status.SUCCESS: api.Logger.info("Release Driver %s reload on %s" % (tc.target_version, node)) else: api.Logger.error("Failed to load release driver %s reload on %s" % (tc.target_version, node)) return api.types.status.FAILURE # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) elif tc.os == OS_TYPE_ESX: host.UnloadDriver(tc.os, node) tc.driver_changed = True if __load_esxi_driver(node, node_os, manifest_file) == api.types.status.SUCCESS: api.Logger.info("Release Driver %s reload on %s" % (tc.target_version, node)) else: api.Logger.error("Failed to load release driver %s reload on %s" % (tc.target_version, node)) return api.types.status.FAILURE api.RestartNodes([node])
def Main(step): if GlobalOptions.skip_setup: return api.types.status.SUCCESS naplesHosts = api.GetNaplesHostnames() assert(len(naplesHosts) != 0) binary_path = os.path.join(BIN_PATH, testupgapp_utils.UPGRADE_TEST_APP + "_bin", testupgapp_utils.UPGRADE_TEST_APP + ".bin") fullpath = os.path.join(api.GetTopDir(), binary_path) if not os.path.isfile(fullpath): ALT_BIN_PATH="nic/build/aarch64/iris/capri/out/" binary_path = os.path.join(ALT_BIN_PATH, testupgapp_utils.UPGRADE_TEST_APP + "_bin", testupgapp_utils.UPGRADE_TEST_APP + ".bin") for naplesHost in naplesHosts: testupgapp_utils.stopTestUpgApp(naplesHost, True) ret = utils.installBinary(naplesHost, binary_path) if ret != api.types.status.SUCCESS: api.Logger.error("Failed in test upgrade app %s copy to Naples" % binary_path) return ret req = api.Trigger_CreateExecuteCommandsRequest() for n in naplesHosts: pencommon.AddPenctlCommand(req, n, "update time") api.Trigger(req) return api.types.status.SUCCESS
def installBinary(node, img): fullpath = os.path.join(api.GetTopDir(), img) api.Logger.info("fullpath for binary: " + fullpath) resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE resp = api.CopyToNaples(node, [fullpath], "", naples_dir="/update") if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand( req, node, "chmod 777 /update/{}".format(os.path.basename(fullpath))) resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("failed to change permission %s", cmd_resp.command) return api.types.status.FAILURE return api.types.status.SUCCESS
def Teardown(tc): if tc.skip: return api.types.status.SUCCESS if not tc.fw_changed: return api.types.status.SUCCESS # Restore the workspace and testbed to continue manifest_file = os.path.join(api.GetTopDir(), 'images', 'latest.json') # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized if compat.LoadFirmware(tc.nodes, tc.os, 'latest') == api.types.status.SUCCESS: api.Logger.info("Loaded latest Fw on %s" % node) else: api.Logger.error("Failed to load latest Fw on %s" % node) return api.types.status.FAILURE if tc.os == compat.OS_TYPE_LINUX: for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def __load_bundle(self): pkg_base = self.__parent.GetPackages()[0].replace(".", "/") fullpath = "%s/%s/testbundles/%s" % (api.GetTopDir(), pkg_base, self.__bunfile) Logger.debug("Importing Testbundle %s" % fullpath) self.__spec = self.__read_spec(fullpath) return
def Setup(tc): tc.skip = False tc.workload_pairs = tc.selected if len(tc.workload_pairs) == 0: api.Logger.info("Skipping Testcase due to no workload pairs.") tc.skip = True wloads = set() for w1, w2, port in tc.workload_pairs: wloads.add(w1) wloads.add(w2) fullpath = api.GetTopDir() + '/iota/bin/fuz' for w in wloads: resp = api.CopyToWorkload(w.node_name, w.workload_name, [fullpath], '') #Create a symlink at top level realPath = "realpath fuz" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddCommand(req, w.node_name, w.workload_name, realPath, background=False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE FUZ_EXEC[cmd.entity_name] = cmd.stdout.split("\n")[0] return api.types.status.SUCCESS
def SetupPacketScapy(tc): pktscapyscript = api.GetTopDir() + '/' + dir_path + "packet_scapy.py" for wl in tc.workloads: resp = api.CopyToWorkload(wl.node_name, wl.workload_name, [pktscapyscript], "") if resp is None: api.Logger.info("Failed to copy packet scapy script to WL:%s"%wl.workload_name) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): __setup_default_params(tc) tc.files = [] tc.tcdir = "%s/%s" % (api.GetTopDir(), pnsodefs.PNSO_TCDIR) try: tc.tmpdir = "/tmp/%s_%d" % (os.environ["USER"], os.getpid()) except: tc.tmpdir = "/tmp/%s_%d" % (os.environ["HOSTNAME"], os.getpid()) tc.ymldir = "%s/%s" % (api.GetTopDir(), pnsodefs.YMLDIR) tc.os = __get_param(tc, 'os', ['linux', 'freebsd', 'esx']) inputlen = __get_param(tc, 'inputlen', pnsodefs.PNSO_TEST_DEFAULT_INPUT_LEN) tc.args.x2inputlen = inputlen * 2 tc.args.x4inputlen = inputlen * 4 tc.args.x8inputlen = inputlen * 8 tc.args.x16inputlen = inputlen * 16 flags = __get_param(tc, 'flags', '0') nblks = inputlen / pnsodefs.PNSO_TEST_DEFAULT_INPUT_LEN if tc.args.test == 'hash.yml': if flags == '0': tc.args.hash_compare_val1x = 64 tc.args.hash_compare_val2x = 64 tc.args.hash_compare_val16x = 64 else: tc.args.hash_compare_val1x = 64 * nblks tc.args.hash_compare_val2x = 128 * nblks tc.args.hash_compare_val16x = 1024 * nblks if tc.args.test == 'chksum.yml': if flags == '0': tc.args.chksum_compare_val1x = 8 tc.args.chksum_compare_val2x = 8 else: tc.args.chksum_compare_val1x = 8 * nblks tc.args.chksum_compare_val2x = 16 * nblks __prepare_ymls(tc) tc.files.append(tc.blocksize_yml) tc.files.append(tc.globals_yml) tc.files.append(tc.test_yml) return
def Setup(tc): # parse tc args parse_args(tc) tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.intfs = api.GetTestsuiteAttr("inb_mnic_intfs") device_json_fname = api.GetTopDir() + '/nic/conf/athena/device.json' api.CopyToNaples(tc.bitw_node_name, [device_json_fname], "") return api.types.status.SUCCESS
def __installNaplesFwImage(node): fullpath = api.GetTopDir() + '/' + common.PENCTL_NAPLES_PKG resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE return api.types.status.SUCCESS
def gen_start_agent_script(wl_vlans): # parse template start agent script in_script = [] with open(api.GetTopDir() + '/' + WS_NIC_START_AGENT_SCRIPT_PATH, 'r') as fd: in_script = fd.readlines() if not in_script: api.Logger.error('Unable to read start agent script') return api.types.status.FAILURE line_num, tokens = None, None for idx, line in enumerate(in_script): if 'athena_app' and '--mfg-mode' in line: line_num = idx tokens = line.split() break if not tokens: api.Logger.error('Failed to parse start agent script') return api.types.status.FAILURE # get mfg vlans and add them to cmdline args vlan_nums = wl_vlans[:NUM_MFG_TEST_VLANS] vlan_nums = [str(vlan) for vlan in vlan_nums] vlan_list = ','.join(vlan_nums) insert_after = tokens.index('--mfg-mode') + 1 tokens.insert(insert_after, '--vlans {}'.format(vlan_list)) mod_line = ' '.join(tokens) in_script[line_num] = mod_line # create a new copy of start agent script with vlan args new_filename = api.GetTopDir() + '/' + WS_IOTA_START_AGENT_SCRIPT_PATH with open(new_filename, 'w') as fd: fd.writelines(in_script) os.chmod(new_filename, 0o777) return api.types.status.SUCCESS
def UpdateNaplesPipelines(self, pipelines=[]): if GlobalOptions.skip_firmware_upgrade: Logger.debug("user requested to skip firmware upgrade so skipping naples pipeline install") return if not pipelines: pipelines = self.GetNaplesPipelines() if not pipelines: Logger.debug("no pipelines found") return nwarmd = "{0}/iota/warmd.json".format(api.GetTopDir()) with open(GlobalOptions.testbed_json, "r") as warmdFile: updated = False alreadyDownloaded = [] warmd = json.load(warmdFile) for pl in pipelines: if not types.nicModes.valid(pl.mode.upper()): raise ValueError("nic mode {0} is not valid. must be one of: {1}".format(pl.mode, types.nicModes.str_enums.values())) if not types.pipelines.valid(pl.pipeline.upper()): raise ValueError("nic pipeline {0} is not valid. must be one of: {1}".format(pl.pipeline, types.pipelines.str_enums.values())) if pl.nicNumber < 1: raise ValueError("nic number must be >= 1. value from testsuite files was {0}".format(pl.nicNumber)) Logger.debug("checking pipeline info for {0}".format(pl)) topoNode = self.__getNodeByName(pl.node) if not topoNode: Logger.warn("failed to find node {0} in topology node list".format(node.name)) continue instId = topoNode.GetNodeInfo()["InstanceID"] for node in warmd['Instances']: if instId == node.get('ID',None): device = topoNode.GetDeviceByNicNumber(pl.nicNumber) device.SetMode(pl.mode) device.SetNaplesPipeline(pl.pipeline) nic = node['Nics'][pl.nicNumber-1] nic['version'] = pl.version nic['pipeline'] = pl.pipeline nic['mode'] = pl.mode updated = True if pl.version not in alreadyDownloaded: api.DownloadAssets(pl.version) alreadyDownloaded.append(pl.version) Logger.info("upgrading node:nic {0}:{1}".format(topoNode.MgmtIpAddress(),pl.nicNumber)) devices = {instId : { "nics":[pl.nicNumber], "nodeName":pl.node, "pipeline":pl.pipeline} } Logger.debug("writing updated warmd.json to {0}".format(nwarmd)) with open(nwarmd,'w') as outfile: json.dump(warmd,outfile,indent=4) resp = api.ReInstallImage(fw_version=pl.version, dr_version=pl.version, devices=devices) if resp != api.types.status.SUCCESS: Logger.error(f"Failed to install images on the node:nic {0}:{1}".format(topoNode.MgmtIpAddress(),pl.nicNumber)) break else: Logger.warn("failed to find node {0} / id {1} in warmd".format(topoNode.MgmtIpAddress(),instId))
def Setup(tc): # get node info tc.bitw_node_name = None tc.wl_node_name = None tc.wl_node = None # Assuming only one bitw node and one workload node nics = store.GetTopology().GetNicsByPipeline("athena") for nic in nics: tc.bitw_node_name = nic.GetNodeName() break workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.wl_node_name = workloads[0].node_name tc.nodes = api.GetNodes() for node in tc.nodes: if node.Name() == tc.wl_node_name: tc.wl_node = node api.SetTestsuiteAttr("bitw_node_name", tc.bitw_node_name) api.SetTestsuiteAttr("wl_node_name", tc.wl_node_name) api.SetTestsuiteAttr("wl_node", tc.wl_node) api.SetTestsuiteAttr( "send_pkt_path", api.GetTopDir() + '/iota/test/athena/testcases/networking/scripts/send_pkt.py') api.SetTestsuiteAttr( "recv_pkt_path", api.GetTopDir() + '/iota/test/athena/testcases/networking/scripts/recv_pkt.py') return api.types.status.SUCCESS
def copy_to_entity(entity): fullpath = api.GetTopDir() + '/iota/bin/fuz' resp = api.CopyToWorkload(entity.node_name, entity.workload_name, [fullpath], '') #Create a symlink at top level realPath = "realpath fuz" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddCommand(req, entity.node_name, entity.workload_name, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE tc.fuz_exec[cmd.entity_name] = cmd.stdout.split("\n")[0] return api.types.status.SUCCESS
def Setup(tc): api.Logger.info("Pencap - Techsupport Sysinfo") tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) #Copy pencap package to the host platform_gendir = api.GetTopDir() + '/platform/drivers/pencap/' listDir = os.listdir(platform_gendir) api.Logger.info("Content of {dir} : {cnt}".format(dir=platform_gendir, cnt=listDir)) for n in tc.nodes: api.Logger.info( "Copying Pencap package to the host{node}".format(node=n)) resp = api.CopyToHost(n, [platform_gendir + "pencap.tar.gz"]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy {pkg} to {node}: {resp}".format( pkg="pencap.tar.gz", node=n, resp=resp)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): tc.dmesg_commands = [] tc.output_commands = [] node_list = api.GetNaplesHostnames() # Run it only on first Naples #tc.nodes = [ node_list[0] ] tc.nodes = node_list tc.os = api.GetNodeOs(tc.nodes[0]) tc.sonicpkg = api.GetTopDir() + '/' + tc.args.package ret = __copy_sonic_to_all_naples(tc) if ret != api.types.status.SUCCESS: return ret api.SetTestsuiteAttr(pnsodefs.PNSO_TEST_MAXCPUS_ATTR, tc.args.maxcpus) return api.types.status.SUCCESS
def GetICMPv6PacketPcap(wl, test_type): PktFunc = { "ICMPv6-NS":FormICMPv6NSPkt, "ICMPv6-NA":FormICMPv6NAPkt, "ICMPv6-RS":FormICMPv6RSPkt, "ICMPv6-RA":FormICMPv6RAPkt } pcap_file = None pcap_file_full_path = None # Get the Packet form function func = PktFunc.get(test_type, None) # Execute the function if func: pcap_file = "%s.pcap"%test_type pkt = func(wl) pcap_file_full_path = (api.GetTopDir() + '/' + dir_path + pcap_file) wrpcap(pcap_file_full_path, pkt) return (pcap_file_full_path, pcap_file)
def Verify(tc): # skip some iterator cases if skip_curr_test(tc): return api.types.status.SUCCESS if len(tc.resp) == 0: return api.types.status.FAILURE failed = False for resp in tc.resp: for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("DTS shutdown failed") api.Logger.info('DTS shutdown successful!!') os.chdir(api.GetTopDir()) return api.types.status.SUCCESS
def installNaplesFwLatestImage(node, img): fullpath = api.GetTopDir() + '/nic/' + img api.Logger.info("fullpath for upg image: " + fullpath) resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE resp = api.CopyToNaples(node, [fullpath], "", naples_dir="/update") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): api.SetTestsuiteAttr("driver_path", api.GetHostToolsDir() + '/') tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.other_nodes = api.GetWorkloadNodeHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) platform_gendir = api.GetTopDir()+'/platform/gen/' if tc.os == host.OS_TYPE_LINUX: tc.pkgname = 'drivers-linux.tar.xz' tc.showgid = 'drivers-linux/show_gid' else: tc.pkgname = 'drivers-freebsd.tar.xz' tc.showgid = 'drivers-freebsd/show_gid' # Copy RDMA driver to naples nodes for n in tc.nodes: api.Logger.info("Copying {pkg} to {node}" .format(pkg=tc.pkgname, node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.pkgname]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy {pkg} to {node}: {resp}" .format(pkg=tc.pkgname, node=n, resp=resp)) return api.types.status.FAILURE # Copy show_gid to other nodes for n in tc.other_nodes: if n in tc.nodes: continue api.Logger.info("Copying show_gid to {node}" .format(node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.showgid]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy show_gid to {node}: {resp}" .format(node=n, resp=resp)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): tc.dualnic = False if hasattr(tc.args, 'dualnic'): tc.dualnic = tc.args.dualnic # Set absolute path for json files. api.SetTestsuiteAttr("template_policy_json_path", api.GetTopDir() + \ TEMPLATE_PLCY_JSON_PATH) tc.template_policy_json_path = api.GetTestsuiteAttr("template_policy_json_path") tc.athena_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() tc.wl_node_nic_pairs = utils.get_classic_node_nic_pairs() tc.host_ifs = {} if tc.dualnic: for node, nic in tc.wl_node_nic_pairs: tc.host_ifs[(node, nic)] = api.GetNaplesHostInterfaces(node, nic) workloads = api.GetWorkloads(node) for wl in workloads: tc.host_ifs[(node, nic)].append(wl.interface) gen_plcy_cfg_e2e_wl_topo(tc) wl_nodes = [nname for nname, nic in tc.wl_node_nic_pairs] # Install python scapy packages install.InstallScapyPackge(tc, wl_nodes) else: # Assuming only one bitw node and one workload node tc.bitw_node_name, tc.bitw_nic = tc.athena_node_nic_pairs[0] tc.wl_node_name, tc.classic_nic = tc.wl_node_nic_pairs[0] gen_plcy_cfg_local_wl_topo(tc) # Install python scapy packages install.InstallScapyPackge(tc, [tc.wl_node_name]) return api.types.status.SUCCESS
def LoadFwDriver(os_type, node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if os_type != host.OS_TYPE_BSD: api.Logger.info("Unknown os_type - %s" % os_type) return api.types.status.FAILURE fullpath = api.GetTopDir() + '/nic/naples_fw.tar' api.Logger.info("Fullpath for firmware image: " + fullpath) resp = api.CopyToHost(node, [fullpath], "") if resp is None: api.Logger.info("Failed to copy naples_fw.tar") return api.types.status.FAILURE api.Trigger_AddHostCommand(req, node, "cp naples_fw.tar " + IONIC_FW_DRV_PATH) api.Trigger_AddHostCommand(req, node, "make -C " + IONIC_FW_DRV_PATH) api.Trigger_AddHostCommand(req, node, "kldload " + IONIC_FW_DRV_PATH + "/ionic_fw.ko") resp = api.Trigger(req) if resp is None: return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: if os_type == host.OS_TYPE_BSD: if cmd.stdout.find("already loaded") != -1: api.Logger.info("ionic_fw load failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE else: api.Logger.info( "ionic_fw already loaded, reload is expected to fail without unload" ) else: api.Logger.info("Unknown os_type - %s" % os_type) return api.types.status.FAILURE return api.types.status.SUCCESS
def __generate_config(cfgMetaFile, cfgOutputDir): def __generate_heimdall_template(): tmpl_file = _netagent_cfg_dir + "/heimdall.tmpl.json" tmpl_cfg = json.load(open(tmpl_file)) tmpl_cfg["vlan-offset"] = api.Testbed_AllocateVlan() tmpl_output = cfgOutputDir + "/heimdall.tmpl.json" with open(tmpl_output, 'w') as outfile: json.dump(tmpl_cfg, outfile) return tmpl_output os.system("rm -rf " + cfgOutputDir) os.mkdir(cfgOutputDir) tmpl_file = __generate_heimdall_template() heimdallExec = api.GetTopDir() + '/iota/bin/heimdall' cmd = heimdallExec + " gen -f " + cfgMetaFile + " -t " + tmpl_file + " -o " + cfgOutputDir + " -u node1 --remote-uuid node2" api.Logger.info("Generating config %s" % cmd) ret = os.system(cmd) if ret != 0: api.Logger.error("Failed to generate heimdall config") return api.types.status.FAILURE os.system("rm -f " + tmpl_file) return api.types.status.SUCCESS
#! /usr/bin/python3 import re import iota.harness.api as api from collections import defaultdict MemStatsToolHostPath = api.GetTopDir() + "/iota/scripts/naples/ps_mem.py" MemStatsToolNaplesPath = "/data/ps_mem.py" ''' Class to collect and maintain the output of the memory usage stats tool ps_mem.py Allows to run and snapshot memory usage statistics during the test and run memleak check at end of test. Usage: * Import memStatsObjClient * In setup stage, initialize the memstats object by calling InitNodesForMemUsageStats() * To collect mem stats throughout the tests, periodically call CollectMemUsageSnapshot() * At end of test, after cleaning up the resources used by the test call once CollectMemUsageSnapshot(). * To print usage statistics use PrintMemUsageHistory() * To run mem leak check for a process call CheckMemLeakFromHistory(pname) * Makesure before start & end of test, process free memory is released from heap back to the system in order to get accurate memory usage stats. ''' class MemLeakObject(): def __init__(self, node): self.node = node self.build_history = False self.mem_use_history_dict = defaultdict(lambda: dict())
#! /usr/bin/python3 import subprocess from iota.harness.infra.glopts import GlobalOptions as GlobalOptions import iota.harness.api as api SRC_FILE_NAME = "arm-iperf" SRC_FILE_PATH = "/iota/images/" SRC_FILE = api.GetTopDir() + SRC_FILE_PATH + SRC_FILE_NAME DST_FILE_NAME = SRC_FILE_NAME DST_FILE_PATH = "/tmp/" DST_FILE = DST_FILE_PATH + DST_FILE_NAME class __TCData: pass def delete_file(tc): result = True req = api.Trigger_CreateAllParallelCommandsRequest() for node in tc.nodes: api.Trigger_AddNaplesCommand(req, node, tc.delete_cmd) resp = api.Trigger(req) for cmd in resp.commands: if cmd.exit_code != 0: api.PrintCommandResults(cmd) result = False return result
def LoadFirmware(nodes, node_os, target_version): if target_version != 'latest': resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return resp manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') image_manifest = parser.JsonParse(manifest_file) fw_images = list(filter(lambda x: x.naples_type == "capri", image_manifest.Firmwares))[0] if fw_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE api.Logger.info("Fullpath for firmware image to load: %s " % fw_images.image) if node_os == OS_TYPE_LINUX: fw_version, _ = GetLinuxFwDriverVersion(node) else: fw_version = None fwImgFile = os.path.join(GlobalOptions.topdir, fw_images.image) for node in nodes: if fw_version == '1.1.1-E-15': # Naples with Fw 1.1.1-E-15 has no OOB and IntMgmt Ip is fixed resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data", nic_mgmt_ip=GetNicIntMgmtIP(node)) if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) return api.types.status.FAILURE else: resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=True, naples_dir="data") if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) # Try with via_oob=False resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data") if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to copy naples_fw.tar to target naples") return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddNaplesCommand(req, node, "sync") api.Trigger_AddNaplesCommand(req, node, "/nic/tools/sysupdate.sh -p /data/naples_fw.tar", timeout=120) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("sysupdate.sh cmd failed") return api.types.status.FAILURE api.RestartNodes([node]) if node_os == OS_TYPE_LINUX: return LinuxReInitMgmtIP(node) elif node_os == OS_TYPE_ESX: return ESXiReInitMgmtIP(node) return api.types.status.SUCCESS