def Main(args): #time.sleep(120) api.Logger.info("Testsuite NIC Mode is %s" % (api.GetConfigNicMode())) agent_nodes = api.GetNaplesHostnames() netagent_api.Init(agent_nodes, hw=True) netagent_api.ReadConfigs(api.GetTopologyDirectory(), reset=False) ret = api.types.status.SUCCESS if api.GetConfigNicMode() in ['unified']: ret = UpdateNetworkAndEnpointObject() if ret != api.types.status.SUCCESS: return ret #Delete path is not stable yet #netagent_api.DeleteBaseConfig() if GlobalOptions.skip_setup: ret = RestoreWorkloads() else: nic_mode = api.GetConfigNicMode() if nic_mode not in ['classic', 'sriov']: kinds = ["SecurityProfile"] if nic_mode == 'unified' else None netagent_api.PushBaseConfig(kinds=kinds) ret = __add_workloads(api.GetNodes()) return ret
def PrepareWorkloadVMotion(tc, candidateWorkloads): tc.vmotion_cntxt = parser.Dict2Object({}) tc.vmotion_cntxt.OrigHome = {} tc.vmotion_cntxt.CurrentHome = {} tc.vmotion_cntxt.MoveRequest = {} tc.vmotion_cntxt.UUIDMap = api.GetNaplesNodeUuidMap() tc.vmotion_cntxt.TimeProfile = [] for wl in candidateWorkloads: other_hosts = [ _n.Name() for _n in api.GetNodes() if _n.Name() != wl.node_name ] if other_hosts: dest_host = other_hosts[0] else: api.Logger.warn("No alternative host found to move %s" % wl.workload_name) continue tc.vmotion_cntxt.OrigHome[wl] = wl.node_name tc.vmotion_cntxt.CurrentHome[wl] = wl.node_name if dest_host not in tc.vmotion_cntxt.MoveRequest: tc.vmotion_cntxt.MoveRequest[dest_host] = [] tc.vmotion_cntxt.MoveRequest[dest_host].append(wl) api.Logger.info("Preparing to move %s to %s" % (wl.workload_name, dest_host)) return
def Setup(tc): vm_threads = [] node_list = [] node = getattr(tc.args, "node", None) if node: node_list.append(node) else: ''' add all nodes in the topo ''' nodes = api.GetNodes() for node in nodes: node_list.append(node.Name()) tc.uuidMap = api.GetNaplesNodeUuidMap() for node in node_list: (wls, new_node) = getWorkloadsToRemove(tc, node) for wl in wls: api.Logger.info("Moving wl {} from node {} to node {}".format( wl.workload_name, wl.node_name, new_node)) vm_thread = threading.Thread(target=triggerVmotion, args=( tc, wl, new_node, )) vm_threads.append(vm_thread) vm_thread.start() if (api.IsNaplesNode(new_node)): create_ep_info(tc, wl, new_node, "START", node) for vm_thread in vm_threads: vm_thread.join() for wl in wls: if (api.IsNaplesNode(node)): delete_ep_info(tc, wl, node) return api.types.status.SUCCESS
def tuneTcpLinux(): tune_cmds = [ "sysctl -w net.core.rmem_max=134217728", "sysctl -w net.core.wmem_max=134217728", "sysctl -w net.ipv4.tcp_rmem='4096 87380 67108864'", "sysctl -w net.ipv4.tcp_wmem='4096 65536 67108864'", "sysctl -w net.ipv4.tcp_congestion_control=htcp", "sysctl -w net.ipv4.tcp_mtu_probing=1", "sysctl -w net.core.default_qdisc=fq", ] nodes = api.GetNodes() for node in nodes: node_name = node.Name() if api.GetNodeOs(node_name) != "linux": continue req = api.Trigger_CreateExecuteCommandsRequest() for cmd in tune_cmds: api.Trigger_AddHostCommand(req, node_name, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to trigger on host %s cmd %s" % (node_name, cmd)) return api.types.status.FAILURE rcmd = resp.commands.pop() if rcmd.exit_code != 0: api.Logger.error("CMD %s failed with exit code %d on host %s" % (cmd, rcmd.exit_code, node_name)) api.PrintCommandResults(rcmd) return api.types.status.FAILURE return api.types.status.SUCCESS
def get_classic_node_nic_pairs(): classic_node_nic_pairs = [] for node in api.GetNodes(): for dev_name in api.GetDeviceNames(node.Name()): if api.GetTestbedNicMode(node.Name(), dev_name) == 'classic': classic_node_nic_pairs.append((node.Name(), dev_name)) return classic_node_nic_pairs
def Setup(tc): api.Logger.info("PXE Install") tc.test_node = api.GetNodes()[0] tc.test_node_name = tc.test_node.Name() # if len(tc.test_node) == 0: # api.Logger.error("Did not find Naples Nodes") # return api.types.status.FAILURE return api.types.status.SUCCESS
def setMaxMTU(): MAX_MTU = 9000 nodes = api.GetNodes() for node in nodes: host_intfs = api.GetWorkloadNodeHostInterfaces(node.Name()) for intf in host_intfs: if setCheckMTU(node.Name(), intf, MAX_MTU) != api.types.status.SUCCESS: api.Logger.error("MTU change unsucessfull on ", node.Name(), intf) return api.types.status.FAILURE workloads = api.GetWorkloads() api.Logger.info(api.GetWorkloadNodeHostInterfaces( api.GetNodes()[0].Name())) for w in workloads: if setCheckMTU(w.node_name, w.interface, MAX_MTU) != api.types.status.SUCCESS: api.Logger.error("MTU change unsucessfull on ", w.node_name, w.interface) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): #import pdb; pdb.set_trace() req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for node in api.GetNodes(): if not node.IsNaples(): continue api.Trigger_AddHostCommand(req, node.Name(), 'ping -c{count} {oob_mnic0_ip}'.format( oob_mnic0_ip = node.GetNicMgmtIP(), count = 3)) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Setup(tc): vm_threads = [] node_list = [] node = getattr(tc.args, "node", None) if node: node_list.append(node) else: ''' add all nodes in the topo ''' nodes = api.GetNodes() for node in nodes: node_list.append(node.Name()) tc.Nodes = api.GetNaplesHostnames() tc.AllNodes = api.GetWorkloadNodeHostnames() tc.uuidMap = api.GetNaplesNodeUuidMap() tc.move_info = [] tc.vm_dsc_to_dsc = True tc.num_moves = 0 if hasattr(tc.args, "conntrack"): tc.detailed = True else: tc.detailed = False getNonNaplesNodes(tc) if arping.ArPing(tc) != api.types.status.SUCCESS: api.Logger.info("arping failed on setup") if ping.TestPing(tc, 'local_only', 'ipv4', 64) != api.types.status.SUCCESS or ping.TestPing(tc, 'remote_only', 'ipv4', 64) != api.types.status.SUCCESS: api.Logger.info("ping test failed on setup") return api.types.status.FAILURE for node in node_list: (wls,new_node) = getWorkloadsToRemove(tc, node) tc.num_moves = len(wls) vm_utils.update_move_info(tc, wls, False, new_node) #Start Fuz ret = vm_utils.start_fuz(tc) if ret != api.types.status.SUCCESS: api.Logger.error("Fuz start failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): tc.skip = False tc.result = api.types.status.SUCCESS tc.node_names = [ node.Name() for node in api.GetNodes() if node.IsNaplesHw() ] if not tc.node_names: api.Logger.info('Skipping Testcase due to no naples hosts.') tc.skip = True return api.types.status.SUCCESS tc.reboot_count = int(getattr(tc.args, 'reboot_count', 1)) tc.restart_mode = str(getattr(tc.args, 'restart_mode', 'reboot')) api.Logger.info('reboot_count: %d' % tc.reboot_count) api.Logger.info('restart_mode: %s' % tc.restart_mode) api.Logger.info('nodes: %s' % ' '.join(tc.node_names)) return api.types.status.SUCCESS
def Setup(tc): test_node = api.GetNodes()[0] tc.test_node = test_node try: #check_set_ncsi(tc.cfg) # Create a Redfish client object (cimc_username, cimc_password) = test_node.GetCimcCredentials() tc.RF = redfish_client(base_url="https://%s" % (test_node.GetCimcIP()), username=cimc_username, password=cimc_password) # Login with the Redfish client tc.RF.login() except ServerDownOrUnreachableError: api.Logger.error( "%s ILO ip not reachable or does not support RedFish" % test_node.GetCimcIP()) return api.types.status.ERROR except: api.Logger.error(traceback.format_exc()) return api.types.status.ERROR return api.types.status.SUCCESS
def Setup(tc): # parse iterator args # parse_args(tc) # skip some iterator cases if skip_curr_test(tc): return api.types.status.SUCCESS # node init tc.tester_node = None tc.tester_node_name = None tc.dut_node = None # init response list tc.resp = [] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE # initialize tester-node and dut-node. tc.nodes = api.GetNodes() for node in tc.nodes: if api.GetNicType(node.Name()) == 'intel': tc.tester_node = node tc.tester_node_name = node.Name() tc.tester_node_mgmt_ip = api.GetMgmtIPAddress(node.Name()) api.Logger.info('tester node: %s mgmt IP: %s' % (node.Name(), tc.tester_node_mgmt_ip)) else: tc.dut_node = node tc.dut_node_mgmt_ip = api.GetMgmtIPAddress(node.Name()) api.Logger.info('dut node: %s mgmt IP: %s' % (node.Name(), tc.dut_node_mgmt_ip)) return api.types.status.SUCCESS
def Setup(tc): # get node info tc.bitw_node_name = None tc.wl_node_name = None tc.wl_node = None # Assuming only one bitw node and one workload node nics = store.GetTopology().GetNicsByPipeline("athena") for nic in nics: tc.bitw_node_name = nic.GetNodeName() break workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.wl_node_name = workloads[0].node_name tc.nodes = api.GetNodes() for node in tc.nodes: if node.Name() == tc.wl_node_name: tc.wl_node = node api.SetTestsuiteAttr("bitw_node_name", tc.bitw_node_name) api.SetTestsuiteAttr("wl_node_name", tc.wl_node_name) api.SetTestsuiteAttr("wl_node", tc.wl_node) api.SetTestsuiteAttr( "send_pkt_path", api.GetTopDir() + '/iota/test/athena/testcases/networking/scripts/send_pkt.py') api.SetTestsuiteAttr( "recv_pkt_path", api.GetTopDir() + '/iota/test/athena/testcases/networking/scripts/recv_pkt.py') return api.types.status.SUCCESS
def Setup(tc): # parse iterator args # parse_args(tc) # skip some iterator cases if skip_curr_test(tc): return api.types.status.SUCCESS # node init tc.tester_node = None tc.tester_node_name = None tc.dut_node = None # init response list tc.resp = [] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE # initialize tester-node and dut-node. tc.nodes = api.GetNodes() for node in tc.nodes: if api.GetNicType(node.Name()) == 'intel': tc.tester_node = node tc.tester_node_name = node.Name() tc.tester_node_mgmt_ip = api.GetMgmtIPAddress(node.Name()) api.Logger.info('tester node: %s mgmt IP: %s' % (node.Name(), tc.tester_node_mgmt_ip)) else: tc.dut_node = node tc.dut_node_mgmt_ip = api.GetMgmtIPAddress(node.Name()) api.Logger.info('dut node: %s mgmt IP: %s' % (node.Name(), tc.dut_node_mgmt_ip)) # create tar.gz file of dpdk and dpdk-test sdk_fullpath = api.GetTopDir() + SDK_SRC_PATH dpdk_tar_path = api.GetTopDir() + DPDK_TAR_FILE tar = tarfile.open(dpdk_tar_path, "w:gz") os.chdir(sdk_fullpath) tar.add("dpdk") os.chdir("dpdk-test") for name in os.listdir("."): tar.add(name) tar.close() api.Logger.info("dpdk-test tarfile location is: " + dpdk_tar_path) api.Logger.info("Configuring DTS on " + tc.tester_node_mgmt_ip) # copy dpdk-test.tar.gz to tester node. api.CopyToHost(tc.tester_node.Name(), [dpdk_tar_path], "") # untar dpdk-test.tar.gz and configure tester to run DTS req = api.Trigger_CreateExecuteCommandsRequest() trig_cmd1 = "tar -xzvf dpdk-test.tar.gz" trig_cmd2 = "scripts/config_tester.sh %s %s" % (tc.dut_node_mgmt_ip, tc.tester_node_mgmt_ip) api.Trigger_AddHostCommand(req, tc.tester_node.Name(), trig_cmd1, timeout=60) api.Trigger_AddHostCommand(req, tc.tester_node.Name(), trig_cmd2, timeout=60) trig_resp = api.Trigger(req) tc.resp.append(trig_resp) # disable internal mnic cmd = "ifconfig inb_mnic0 down && ifconfig inb_mnic1 down" resp = api.RunNaplesConsoleCmd(tc.dut_node.Name(), cmd) return api.types.status.SUCCESS
def AddWorkloads(nodes=None): if not nodes: nodes = api.GetNodes() return __add_workloads(nodes)
def Setup(tc): # parse iterator args parse_args(tc) # setup policy.json obj tc.plcy_obj = None # read from policy.json with open(api.GetTestsuiteAttr("dp_policy_json_path")) as fd: tc.plcy_obj = json.load(fd) # get vnic tc.vnic = utils.get_vnic(tc.plcy_obj, tc.vnic_type, tc.nat, _stateful=True) # get vnic id tc.vnic_id = utils.get_vnic_id(tc.plcy_obj, tc.vnic_type, tc.nat, _stateful=True) api.Logger.info('vnic id: {}'.format(tc.vnic_id)) # get node info tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.wl_node = api.GetTestsuiteAttr("wl_node") tc.nodes = api.GetNodes() host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE tc.up0_intf = host_intfs[0] tc.up1_intf = host_intfs[1] # get uplink vlans tc.up0_vlan = tc.vnic['rewrite_underlay']['vlan_id'] tc.up1_vlan = tc.vnic['vlan_id'] # get uplink mac tc.up0_mac = tc.vnic['rewrite_underlay']['dmac'] tc.up1_mac = tc.vnic['session']['to_switch']['host_mac'] if not tc.up0_mac or not tc.up1_mac: api.Logger.error('Failed to get workload sub-intf mac addresses') return api.types.status.FAILURE if not tc.up0_vlan or not tc.up1_vlan: api.Logger.error('Failed to get workload sub-intf vlan value') return api.types.status.FAILURE api.Logger.info('Workload0: up0_intf %s up0_vlan %s up0_mac %s' % (tc.up0_intf, tc.up0_vlan, tc.up0_mac)) api.Logger.info('Workload1: up1_intf %s up1_vlan %s up1_mac %s' % (tc.up1_intf, tc.up1_vlan, tc.up1_mac)) # fetch flows needed for the test tc.flows = get_flows(tc) # copy send/recv scripts to node send_pkt_script_fname = api.GetTestsuiteAttr("send_pkt_path") recv_pkt_script_fname = api.GetTestsuiteAttr("recv_pkt_path") for node in tc.nodes: if node is tc.wl_node: api.CopyToHost(node.Name(), [send_pkt_script_fname], "") api.CopyToHost(node.Name(), [recv_pkt_script_fname], "") # init response list tc.resp = [] return api.types.status.SUCCESS
def Setup(tc): api.Logger.info("Server Compatiblity Random-Wait Reboot") nodes = api.GetNodes() tc.naples_nodes = [] tc.node_bmc_data = dict() tc.resp = api.types.status.SUCCESS #for every node in the setup for node in nodes: if api.IsNaplesNode(node.Name()): api.Logger.info(f"Found Naples Node: [{node.Name()}]") tc.naples_nodes.append(node) tc.node_bmc_data[node.Name()] = iota_util_parser.Dict2Object({}) else: api.Logger.info(f"Skipping non-Naples Node: [{node.Name()}]") if len(tc.naples_nodes) == 0: api.Logger.error(f"Failed to find a Naples Node!") tc.resp = api.types.status.IGNORE return api.types.status.IGNORE # Check for for node in tc.naples_nodes: node_data = tc.node_bmc_data[node.Name()] # save api.Logger.info(f"Saving node: {node.Name()}") if api.SaveIotaAgentState([node.Name()]) == api.types.status.FAILURE: raise OfflineTestbedException # power-cycle nodes if tc.iterators.powercycle_method == "apc": api.ApcNodes([n.Name() for n in tc.naples_nodes]) elif tc.iterators.powercycle_method == "ipmi": api.IpmiNodes([n.Name() for n in tc.naples_nodes]) else: api.Logger.error( f"Powercycle-method: {tc.iterators.powercycle_method} unknown") return api.types.status.IGNORE time.sleep(180) for node in tc.naples_nodes: resp = api.RestoreIotaAgentState([node.Name()]) if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to restore agent state after reboot") raise OfflineTestbedException api.Logger.info(f"Reboot SUCCESS") wl_api.ReAddWorkloads(node.Name()) setattr(node_data, 'BmcLogs', dict()) cimc_info = node.GetCimcInfo() node_data.BmcLogs['Init'] = iota_log_api.CollectBmcLogs( cimc_info.GetIp(), cimc_info.GetUsername(), cimc_info.GetPassword()) # Check for any errors if bmc_utils.verify_bmc_logs( node.Name(), node_data.BmcLogs['Init'], tag='Init', save_logs=True) != api.types.status.SUCCESS: tc.resp = api.types.status.IGNORE break # TODO: Process BMC logs to get boot time-profile setattr(node_data, 'MeanBootTime', 120) # FIXME return tc.resp