def Verify(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("Not implemented") return api.types.status.IGNORED result = api.types.status.SUCCESS for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) if tc.resp is None: return api.types.status.FAILURE if api.GetConfigNicMode() == 'classic': expect_exit_code = 0 else: # expect OS's EPERM (FreeBSD/Linux is 1) (IONIC_RC_EPERM is 4) expect_exit_code = 1 for cmd in tc.resp.commands: if cmd.exit_code != expect_exit_code: api.PrintCommandResults(cmd) api.Logger.error("Expected exit code %d" % expect_exit_code) api.Logger.error("Actual exit code %d" % cmd.exit_code) result = api.types.status.FAILURE return result
def Verify(tc): for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def Teardown(tc): if tc.skip: return api.types.status.SUCCESS if not tc.fw_changed: return api.types.status.SUCCESS # Restore the workspace and testbed to continue manifest_file = os.path.join(api.GetTopDir(), 'images', 'latest.json') # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized if compat.LoadFirmware(tc.nodes, tc.os, 'latest') == api.types.status.SUCCESS: api.Logger.info("Loaded latest Fw on %s" % node) else: api.Logger.error("Failed to load latest Fw on %s" % node) return api.types.status.FAILURE if tc.os == compat.OS_TYPE_LINUX: for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def Trigger(tc): resp = api.SaveIotaAgentState(tc.nodes) if resp != api.types.status.SUCCESS: api.Logger.error("Failed to save node") return resp api.Logger.info("Saved IotaAgent state...") pdb.set_trace() resp = api.ReInstallImage(fw_version=None, dr_version="latest") if resp != api.types.status.SUCCESS: api.Logger.error("Failed to reimage naples-node") return resp api.Logger.info("Restore IotaAgent...") tc.resp = api.RestoreIotaAgentState(tc.nodes) if tc.resp != api.types.status.SUCCESS: api.Logger.error( "Failed to restore IotaAgent after host reboot/reimage") else: api.Logger.info("Restore IotaAgent successful post reboot/reimage") api.Logger.info("Attempting ReAddWorkloads on all naples nodes...") for node in tc.nodes: wl_api.ReAddWorkloads(node) return resp
def Trigger(tc): for node in tc.naples_nodes: # save api.Logger.info(f"Saving node: {node.Name()}") if api.SaveIotaAgentState([node.Name()]) == api.types.status.FAILURE: raise OfflineTestbedException node_data = tc.node_bmc_data[node.Name()] cimc_info = node.GetCimcInfo() # Reboot Node. # Reboot method (APC, IPMI, OS Reboot) is passed as a testcase parameter for reboot in range(tc.args.reboots + 1): if tc.iterators.powercycle_method == "apc": api.ApcNodes([node.Name()], skip_agent_init=True, skip_restore=True) elif tc.iterators.powercycle_method == "ipmi": api.IpmiNodes([node.Name()], skip_agent_init=True, skip_restore=True) rand_sleep = random.randint(5, node_data.MeanBootTime) api.Logger.info( f"Sleeping after reset via {tc.iterators.powercycle_method} of node: {node.Name()} for: {rand_sleep}sec" ) time.sleep(rand_sleep) tag = '%s#%d' % (tc.iterators.powercycle_method, reboot) bmc_logs = iota_log_api.CollectBmcLogs(cimc_info.GetIp(), cimc_info.GetUsername(), cimc_info.GetPassword()) node_data.BmcLogs['%s#%d' % (tc.iterators.powercycle_method, reboot)] = bmc_logs if bmc_utils.verify_bmc_logs( node.Name(), bmc_logs, tag=tag, save_logs=True) != api.types.status.SUCCESS: api.Logger.error( f"Bmc Log verification failure detected: {node.Name()} - ABORT" ) tc.resp = api.types.status.FAILURE break api.Logger.info( f"Sleeping for 180-sec node: {node.Name()} - end of test-cycle") time.sleep(180) resp = api.RestoreIotaAgentState([node.Name()]) if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to restore agent state after reboot") raise OfflineTestbedException api.Logger.info(f"Reboot SUCCESS") wl_api.ReAddWorkloads(node.Name()) if tc.resp == api.types.status.FAILURE: break return api.types.status.SUCCESS
def do_lif_reset_test(node, os): for i in range(3): api.Logger.info("LIF reset and driver reload test loop %d" % i) if host.UnloadDriver(os, node, "all") is api.types.status.FAILURE: api.Logger.error("ionic unload failed loop %d" % i) return api.types.status.FAILURE if host.LoadDriver(os, node) is api.types.status.FAILURE: api.Logger.error("ionic load failed loop %d" % i) return api.types.status.FAILURE wl_api.ReAddWorkloads(node) if api.GetNaplesHostInterfaces(node) is None: api.Logger.error("No ionic interface after loop %d" % i) return api.types.status.FAILURE for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) vlan_list = getVlanList(node, intf) filter_list = getFilterList(node, intf) # Single LIF reset api.Trigger_AddHostCommand( req, node, "sysctl dev.%s.reset=1" % (host.GetNaplesSysctl(intf))) resp = api.Trigger(req) time.sleep(5) vlan_list1 = getVlanList(node, intf) filter_list1 = getFilterList(node, intf) if vlan_list != vlan_list1: api.Logger.error( "VLAN list doesn't match for %s, before: %s after: %s" % (intf, str(vlan_list), str(vlan_list1))) return api.types.status.FAILURE if filter_list != filter_list1: api.Logger.error( "Filter list doesn't match for %s, before: %s after: %s" % (intf, str(filter_list), str(filter_list1))) return api.types.status.FAILURE api.Logger.info( "Success running LIF reset test on %s VLAN: %s, Filters; %s" % (intf, str(vlan_list), str(filter_list))) # Now stress test LIF reset for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand( req, node, "for ((i=0;i<10;i++)); do sysctl dev.%s.reset=1; done &" % (host.GetNaplesSysctl(intf))) # Some of LIF reset will fill fail since it will be running in background # with reload of driver. resp = api.Trigger(req) return api.types.status.SUCCESS
def Verify(tc): # TODO: verify driver load/unload timing. Should never cross magic # of sec for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def Verify(tc): hostname = tc.nodes[0] # restore the workloads on the one host we tested if host.UnloadDriver(tc.os, hostname, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, hostname) is api.types.status.FAILURE: return api.types.status.FAILURE wl_api.ReAddWorkloads(hostname) return api.types.status.SUCCESS
def Teardown(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("Not implemented") return api.types.status.IGNORED # for every node, cycle through unload/load sequence for node in tc.nodes: if host.UnloadDriver(tc.os, node, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE # this is required to bring the testbed into operation state; # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def Verify(tc): for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) if tc.resp is None: return api.types.status.FAILURE for cmd in tc.resp.commands: if cmd.exit_code != 0: if tc.os == host.OS_TYPE_BSD: if cmd.stdout is not None: api.Logger.error("Stats is not cleared") api.PrintCommandResults(cmd) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): api.Logger.info("Fw compat test") if api.IsDryrun(): return api.types.status.SUCCESS tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) tc.skip = False if tc.os == compat.OS_TYPE_BSD: # Not supportig BSD & ESXi right now tc.skip = True return api.types.status.SUCCESS # Intention to test locally built FW with target-version driver tc.target_version = getattr(tc.iterators, 'release', 'latest') # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized tc.fw_changed = False if compat.LoadFirmware(tc.nodes, tc.os, tc.target_version) == api.types.status.SUCCESS: api.Logger.info("Loaded %s Fw on %s" % (tc.target_version, node)) else: return api.types.status.FAILURE tc.fw_changed = True if tc.os == compat.OS_TYPE_LINUX: for node in tc.nodes: # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) if getattr(tc.args, 'type', 'local_only') == 'local_only': tc.workload_pairs = api.GetLocalWorkloadPairs() else: tc.workload_pairs = api.GetRemoteWorkloadPairs() if len(tc.workload_pairs) == 0: api.Logger.info( "Skipping ping part of testcase due to no workload pairs.") tc.skip = True return api.types.status.SUCCESS
def Verify(tc): if tc.os != host.OS_TYPE_BSD: return api.types.status.SUCCESS for node in tc.nodes: # this is required to bring the testbed into operation state wl_api.ReAddWorkloads(node) if tc.resp is None: return api.types.status.FAILURE before = "" cookie_idx = 0 testStatus = api.types.status.SUCCESS for cmd in tc.resp.commands: # cmds are per node in sequence; resp's are per node & intf in seq. # so, we can compare before and after data, per node & intf right in the loop api.PrintCommandResults(cmd) if (cmd.command.find("reset") != -1): # skip the reset cmd continue if (cmd.command.find("sleep") != -1): # skip the sleep cmd continue if (tc.cmd_cookies[cookie_idx] == 'before'): before = cmd.stdout if (tc.cmd_cookies[cookie_idx] == 'after'): cmdArr = cmd.stdout.split('.media_status:') if (before == cmd.stdout): api.Logger.info("Node %s transceiver data matches after LIF reset" % cmdArr[0]) else: api.Logger.error("Node %s transceiver data does not match after LIF reset" % cmdArr[0]) testStatus = api.types.status.FAILURE cookie_idx += 1 # is EXIT code !0? if cmd.exit_code != 0: return api.types.status.FAILURE return testStatus
def Trigger(tc): errcnt = 0 tc.intf_id = {} # assume that the driver is loaded for node in api.GetNaplesHostnames(): errcnt += testLoadedMulticast(node, 2) errcnt += testLoadedLifs(tc, node) for node in api.GetNaplesHostnames(): # set up background tests # each of these seems to take about 4 seconds, so be sure to # increase the command delays for each when another is added prepUnloadedMulticastTest(node) prepUnloadedLifs(tc, node) # unload driver and give time for the background tests to finish api.Logger.info("Start driver unload on " + node) if host.UnloadDriver(tc.os, node, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE time.sleep(10) # restart the drivers and gather the results if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE wl_api.ReAddWorkloads(node) api.Logger.info("Driver reload on " + node) errcnt += gatherUnloadedMulticastTest(node) errcnt += gatherUnloadedLifs(tc, node) if errcnt != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): api.Logger.info("Server Compatiblity Random-Wait Reboot") nodes = api.GetNodes() tc.naples_nodes = [] tc.node_bmc_data = dict() tc.resp = api.types.status.SUCCESS #for every node in the setup for node in nodes: if api.IsNaplesNode(node.Name()): api.Logger.info(f"Found Naples Node: [{node.Name()}]") tc.naples_nodes.append(node) tc.node_bmc_data[node.Name()] = iota_util_parser.Dict2Object({}) else: api.Logger.info(f"Skipping non-Naples Node: [{node.Name()}]") if len(tc.naples_nodes) == 0: api.Logger.error(f"Failed to find a Naples Node!") tc.resp = api.types.status.IGNORE return api.types.status.IGNORE # Check for for node in tc.naples_nodes: node_data = tc.node_bmc_data[node.Name()] # save api.Logger.info(f"Saving node: {node.Name()}") if api.SaveIotaAgentState([node.Name()]) == api.types.status.FAILURE: raise OfflineTestbedException # power-cycle nodes if tc.iterators.powercycle_method == "apc": api.ApcNodes([n.Name() for n in tc.naples_nodes]) elif tc.iterators.powercycle_method == "ipmi": api.IpmiNodes([n.Name() for n in tc.naples_nodes]) else: api.Logger.error( f"Powercycle-method: {tc.iterators.powercycle_method} unknown") return api.types.status.IGNORE time.sleep(180) for node in tc.naples_nodes: resp = api.RestoreIotaAgentState([node.Name()]) if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to restore agent state after reboot") raise OfflineTestbedException api.Logger.info(f"Reboot SUCCESS") wl_api.ReAddWorkloads(node.Name()) setattr(node_data, 'BmcLogs', dict()) cimc_info = node.GetCimcInfo() node_data.BmcLogs['Init'] = iota_log_api.CollectBmcLogs( cimc_info.GetIp(), cimc_info.GetUsername(), cimc_info.GetPassword()) # Check for any errors if bmc_utils.verify_bmc_logs( node.Name(), node_data.BmcLogs['Init'], tag='Init', save_logs=True) != api.types.status.SUCCESS: tc.resp = api.types.status.IGNORE break # TODO: Process BMC logs to get boot time-profile setattr(node_data, 'MeanBootTime', 120) # FIXME return tc.resp
def Setup(tc): #map iterators from IOTA infra to the enum for the toeplitz utilitys iterators = {'tcp4' : RSS.IPV4_TCP, 'udp4':RSS.IPV4_UDP, 'tcp6' : RSS.IPV6_TCP, \ 'udp6':RSS.IPV6_UDP, 'ip6':RSS.IPV6, 'ip4tcp':RSS.IPV4, 'ip4udp':RSS.IPV4, 'ip6tcp':RSS.IPV6, 'ip6udp':RSS.IPV6} tc.rss_enum = iterators.get(tc.iterators.rxflowhash, "none") # iperf options for iterators # IPv4 vs IPv6 iterator ip_proto_iterators = {'tcp4' : 'v4', 'udp4':'v4', 'tcp6' : 'v6', 'udp6':'v6', 'ip4tcp':'v4', \ 'ip4udp':'v4', 'ip6tcp':'v6', 'ip6udp':'v6'} tc.tc_ip_proto = ip_proto_iterators.get(tc.iterators.rxflowhash, "none") # UDP vs TCP iterator proto_iterators = {'tcp4' : 'tcp', 'udp4':'udp', 'tcp6':'tcp', 'udp6':'udp', 'ip4tcp':'tcp', 'ip4udp':'udp', 'ip6tcp':'tcp', 'ip6udp':'udp'} tc.proto = proto_iterators.get(tc.iterators.rxflowhash, "none") if tc.proto == "none" or tc.tc_ip_proto == "none " or tc.rss_enum == "none": api.Logger.error(f"Not able to map the iterators. {tc.iterators.rxflowhash} {tc.tc_ip_proto} {tc.proto} {tc.rss_enum}") return api.types.status.FAILURE # number of sessions iterator tc.num_sessions = int(getattr(tc.args, "num_sessions", 1)) # log which iterration is in progress: api.Logger.info(f"==================== %s ====================" % tc.rss_enum) api.Logger.info(f"ip_proto:{tc.tc_ip_proto}, proto: {tc.proto}, rss: {tc.iterators.rss}, iperf_sessions: {tc.num_sessions}") tc.nodes = api.GetWorkloadNodeHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) if tc.os == 'freebsd': return api.types.status.SUCCESS # Identify the receiving node(testing RSS) # This will be the client node for iPerf # All configuration, and testing and verification will be done on this node for n in tc.nodes: if api.IsNaplesNode(n): api.Logger.info(f"Found Naples Node: [{n}]") receive_node = n break else: api.Logger.error(f"Failed to find a Naples Node!") return api.types.status.FAILURE # Get workload pars for iperf sessions workload_pairs = api.GetRemoteWorkloadPairs() if not workload_pairs: api.Logger.info("Skipping Testcase due to no workload pairs.") tc.skip = True # assign client/server node based on selected Receiving Node for pair in workload_pairs: if receive_node == pair[0].node_name: tc.client = pair[1] tc.server = pair[0] else: tc.client = pair[0] tc.server = pair[1] break # unload driver, to clear stats (server node only) # Re-add workloads if host.UnloadDriver(tc.os, tc.server.node_name, "all") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, tc.server.node_name) is api.types.status.FAILURE: return api.types.status.FAILURE wl_api.ReAddWorkloads(tc.server.node_name) if tc.tc_ip_proto == 'v6': tc.server_ip = ipaddress.ip_address(tc.server.ipv6_address) tc.server_ip = str(tc.server_ip.exploded) tc.client_ip = ipaddress.ip_address(tc.client.ipv6_address) tc.client_ip = str(tc.client_ip.exploded) else: tc.server_ip = tc.server.ip_address tc.client_ip = tc.client.ip_address return api.types.status.SUCCESS
def setup_features(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) driverReloaded = False for wl in tc.workloads: intf = wl.interface n = wl.node_name reloadDone = False for feature, cmdBuilderDict in feature_cmd_map.items(): feature_value = getattr(tc.iterators, feature, None) if not feature_value: api.Logger.debug("Feature %s not provided, skipping..." % feature) continue os_type = api.GetNodeOs(n) callback = cmdBuilderDict[os_type]["cmd"] timeout = cmdBuilderDict[os_type].get("timeout") if not timeout: timeout = api.DEFAULT_COMMAND_TIMEOUT cmds = callback(n, intf, feature_value) if not isinstance(cmds, list): cmds = [cmds] for cmd in cmds: api.Trigger_AddCommand(req, n, wl.workload_name, cmd, timeout=timeout) if cmdBuilderDict[os_type].get("reloadCmd"): #Driver reload, just break as no need to setup for each interface. if reloadDone: api.Logger.error( "Driver reload already added for feature, can't do it again" ) assert (0) reloadDone = True driverReloaded = True break resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Error running cmd : %s " % cmd.command) api.Logger.error("Std Output : %s " % cmd.stdout) api.Logger.error("Std Err : %s " % cmd.stderr) if api.IsNaplesNode(cmd.node_name): if "parameters changed" in cmd.stderr: api.Logger.info("Ignoring 'no change' error") return api.types.status.SUCCESS return api.types.status.FAILURE else: api.Logger.info("Ignoring cmd error its non-naples node : %s" % cmd.command) api.Logger.info("Success running cmd : %s" % cmd.command) if driverReloaded: nodes = set() for wl in tc.workloads: if api.IsNaplesNode(wl.node_name): nodes.add(wl.node_name) time.sleep(5) for node in nodes: wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def Trigger(tc): cimc_info = tc.test_node.GetCimcInfo() cimc_ip_address = cimc_info.GetIp() cimc_username = cimc_info.GetUsername() cimc_password = cimc_info.GetPassword() host_ipaddr = api.GetMgmtIPAddress(tc.test_node_name) if reboot.checkLinks(tc, tc.test_node_name) is api.types.status.FAILURE: api.Logger.error("Error verifying uplink interfaces") return api.types.status.FAILURE for install in range(tc.args.install_iterations): # save api.Logger.info(f"Saving node: {tc.test_node_name}") if api.SaveIotaAgentState([tc.test_node_name]) == api.types.status.FAILURE: raise OfflineTestbedException # touch the file on server to ensure this instance of OS is gone later req = api.Trigger_CreateExecuteCommandsRequest() touch_file_cmd = "touch /naples/oldfs" api.Trigger_AddHostCommand(req, tc.test_node_name, touch_file_cmd) resp = api.Trigger(req) if api.Trigger_IsSuccess(resp) is not True: api.Logger.error(f"Failed to run command on host {tc.test_node_name}, {touch_file_cmd}") return api.types.status.FAILURE # Boot from PXE to intall an OS api.Logger.info(f"Starting PXE Install Loop # {install} on {tc.test_node_name}") cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev pxe options=efiboot" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(5) # reboot server cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(180) # wait for installation to finish and server to come back api.Logger.info(f"Waiting for host to come up: {host_ipaddr}") if not waitforssh(host_ipaddr): raise OfflineTestbedException # Boot from HDD to run the test api.Logger.info(f"Setting Boot Order to HDD and rebooting {tc.test_node_name}") cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev disk options=efiboot" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(5) #reboot server cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) api.Logger.info(f"Waiting for host to come up: {host_ipaddr}") time.sleep(180) if not waitforssh(host_ipaddr): raise OfflineTestbedException # restore api.Logger.info(f"Restoring node: {tc.test_node_name}") resp = api.ReInstallImage(fw_version=None, dr_version="latest") if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to install images on the testbed") raise OfflineTestbedException resp = api.RestoreIotaAgentState([tc.test_node_name]) if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to restore agent state after PXE install") raise OfflineTestbedException api.Logger.info(f"PXE install iteration #{install} - SUCCESS") try: wl_api.ReAddWorkloads(tc.test_node_name) except: api.Logger.error(f"ReaddWorkloads failed with exception - See logs for details") return api.types.status.FAILURE # check touched file is not present, to ensure this is a new OS instance oldfs_command = "ls /naples/oldfs" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddHostCommand(req, tc.test_node_name, oldfs_command) resp = api.Trigger(req) if api.IsApiResponseOk(resp) is not True: api.Logger.error(f"Failed to run command on host {tc.test_node_name} {oldfs_command}") return api.types.status.FAILURE cmd = resp.commands.pop() if cmd.exit_code == 0: api.Logger.error(f"Old file is present in FS after PXE install") return api.types.status.FAILURE api.Logger.info("PXE boot completed! Host is up.") return api.types.status.SUCCESS