def Verify(tc): if tc.skip or api.IsDryrun(): return api.types.status.SUCCESS if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS # Verify pdsctl and host commands results for wl_name, tuples in tc.cmd_status.items(): if api.IsApiResponseOk(tuples[0]): tx_enable = 'on' if tc.args.tx else 'off' api.Logger.info("SUCCESS: Name: %s, tx_status: %s" % (wl_name, tx_enable)) else: result = api.types.status.FAILURE api.Logger.info("FAILURE: Name: %s, tx_status: %s" % (wl_name, tuples[0].stdout)) if api.IsApiResponseOk(tuples[1]): rx_enable = 'on' if tc.args.rx else 'off' api.Logger.info("SUCCESS: Name: %s, rx_status: %s" % (wl_name, rx_enable)) else: result = api.types.status.FAILURE api.Logger.info("FAILURE: Name: %s, rx_status: %s" % (wl_name, tuples[1].stdout)) api.Logger.warn( "XXX 'pdsctl show lif' does not support --yaml output, and does not show the mode flag for validation" ) #if tc.pds_verify.get(wl_name, None) != None: # tx_chk = tc.pds_verify[wl_name][0] # rx_chk = tc.pds_verify[wl_name][1] # if api.GetNodeOs(wl_name.split('_')[0]) == 'freebsd': # # NOTE: freebsd has only one flag for both rx and tx vlan offload # # we overwrite tx flag using rx flag hence check tx flag with rx flag in verification # if str(tx_chk[1]) != str(rx_chk[0]): # result = api.types.status.FAILURE # api.Logger.info("FAILURE: Name: %s, tx_chk: %s" % (wl_name, tx_chk)) # if str(rx_chk[1]) != str(rx_chk[0]): # result = api.types.status.FAILURE # api.Logger.info("FAILURE: Name: %s, rx_chk: %s" % (wl_name, rx_chk)) # else: # if tx_chk[0] != tx_chk[1]: # result = api.types.status.FAILURE # api.Logger.info("FAILURE: Name: %s, expected tx-flag: %s, in PDS: %s" % (wl_name, tx_chk[0], tx_chk[1])) # if rx_chk[0] != rx_chk[1]: # result = api.types.status.FAILURE # api.Logger.info("FAILURE: Name: %s, expected rx-flag: %s, in PDS: %s" % (wl_name, rx_chk[0], rx_chk[1])) # Verify traffic result vp_result = traffic_utils.verifyPing(tc.cmd_cookies, tc.resp) if vp_result is not api.types.status.SUCCESS: result = vp_result api.Logger.info("TC.Verify result: %s" % result) return result
def __load_linux_driver(node, node_os, manifest_file): image_manifest = parser.JsonParse(manifest_file) driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0] if driver_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE drImgFile = os.path.join(Gl, driver_images.drivers_pkg) api.Logger.info("Fullpath for driver image: " + drImgFile) resp = api.CopyToHost(node, [drImgFile], "") if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy %s" % drImgFile) return api.types.status.FAILURE rundir = os.path.basename(driver_images.drivers_pkg).split('.')[0] req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, "tar -xf " + os.path.basename(driver_images.drivers_pkg)) api.Trigger_AddHostCommand(req, node, "./build.sh", rundir=rundir) resp = api.Trigger(req) if not api.IsApiResponseOk(resp): api.Logger.error("TriggerCommand for driver build failed") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0 and cmd.command != './build.sh': # Build.sh could fail -ignored (FIXME) api.Logger.error("Failed to exec cmds to build/load new driver") return api.types.status.FAILURE api.Logger.info("New driver image is built on target host. Prepare to load") if host.UnloadDriver(node_os, node) != api.types.status.SUCCESS: api.Logger.error("Failed to unload current driver - proceeding") req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if node_os == OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, node, "insmod " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) elif node_os == OS_TYPE_BSD: api.Trigger_AddHostCommand(req, node, "kldload " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("TriggerCommand for driver installation failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def LoadDriver(node_names, node_os, target_version='latest'): if target_version == 'latest': api.Logger.info('Target version is latest - nothing to change') else: resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return api.types.status.FAILURE manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') for node in node_names: if node_os == OS_TYPE_LINUX: if __load_linux_driver(tc, node, manifest_file) == api.types.status.SUCCESS: api.Logger.info("Release Driver %s reload on %s" % (tc.target_version, node)) else: api.Logger.error("Failed to load release driver %s reload on %s" % (tc.target_version, node)) return api.types.status.FAILURE # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) elif tc.os == OS_TYPE_ESX: host.UnloadDriver(tc.os, node) tc.driver_changed = True if __load_esxi_driver(node, node_os, manifest_file) == api.types.status.SUCCESS: api.Logger.info("Release Driver %s reload on %s" % (tc.target_version, node)) else: api.Logger.error("Failed to load release driver %s reload on %s" % (tc.target_version, node)) return api.types.status.FAILURE api.RestartNodes([node])
def Trigger(tc): tc.resp = api.types.status.SUCCESS # Execute a command on ESXi hypervisor and collect output req = api.Trigger_CreateExecuteCommandsRequest() for node in tc.nodes: api.Trigger_AddESXCommand(req, node, "esxcli device driver list") resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): tc.resp = api.types.status.FAILURE else: for cmd in resp.commands: api.PrintCommandResults(cmd) # Upload a file (testbed.json file) to each ESX hypervisor if tc.resp == api.types.status.SUCCESS: for node in tc.nodes: resp = api.CopyToEsx(node, [GlobalOptions.testbed_json], host_dir=".", esx_dir="/tmp") if not api.IsApiResponseOk(resp): tc.resp = api.types.status.FAILURE break # Download a file from ESX hypervisor if tc.resp == api.types.status.SUCCESS: for node in tc.nodes: wloads = api.GetWorkloads(node) for wl in wloads: folder = os.path.join(api.topdir, node, wl.workload_name) os.makedirs(folder) resp = api.CopyFromESX(node, ["vmware.log"], dest_dir=folder, esx_dir="/vmfs/volumes/datastore1/" + wl.workload_name) if not api.IsApiResponseOk(resp): tc.resp = api.types.status.FAILURE break return api.types.status.SUCCESS
def SetupQoS(self): #First Unset the Switch setMsg = topo_pb2.SwitchMsg() setMsg.op = topo_pb2.CREATE_QOS_CONFIG switch_ips = {} for nodeIndex,instance in enumerate(self.__tbspec.Instances): self.__prepare_SwitchMsg(setMsg, instance, switch_ips, nodeIndex+1, setup_qos=True) resp = api.DoSwitchOperation(setMsg) if not api.IsApiResponseOk(resp): return types.status.FAILURE setMsg = topo_pb2.SwitchMsg() return types.status.SUCCESS
def Setup(tc): api.SetTestsuiteAttr("driver_path", api.GetHostToolsDir() + '/') tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.other_nodes = api.GetWorkloadNodeHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) platform_gendir = api.GetTopDir()+'/platform/gen/' if tc.os == host.OS_TYPE_LINUX: tc.pkgname = 'drivers-linux.tar.xz' tc.showgid = 'drivers-linux/show_gid' else: tc.pkgname = 'drivers-freebsd.tar.xz' tc.showgid = 'drivers-freebsd/show_gid' # Copy RDMA driver to naples nodes for n in tc.nodes: api.Logger.info("Copying {pkg} to {node}" .format(pkg=tc.pkgname, node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.pkgname]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy {pkg} to {node}: {resp}" .format(pkg=tc.pkgname, node=n, resp=resp)) return api.types.status.FAILURE # Copy show_gid to other nodes for n in tc.other_nodes: if n in tc.nodes: continue api.Logger.info("Copying show_gid to {node}" .format(node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.showgid]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy show_gid to {node}: {resp}" .format(node=n, resp=resp)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): api.Logger.info("Iterators: %s" % tc.iterators.Summary()) pnsoutils.Setup(tc) # Run it only on first Naples node_list = api.GetNaplesHostnames() tc.nodes = [node_list[0]] for n in tc.nodes: tc.files.append("%s/pnsotest_%s.py" % (tc.tcdir, api.GetNodeOs(n))) api.Logger.debug("Copying testyml files to Node:%s" % n) resp = api.CopyToHost(n, tc.files) if not api.IsApiResponseOk(resp): return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): tc.nodes = api.GetNaplesHostnames() tc.files = [] for cfgfile in tc.args.cfg: tc.files.append("%s/%s/%s" % (api.GetTopDir(), tc.args.dir, cfgfile)) tc.files.append("%s/%s/%s" % (api.GetTopDir(), tc.args.dir, tc.args.test)) for n in tc.nodes: tc.files.append("%s/iota/test/iris/testcases/storage/pnsotest_%s.py" % (api.GetTopDir(), api.GetNodeOs(n))) resp = api.CopyToHost(n, tc.files) if not api.IsApiResponseOk(resp): return api.types.status.FAILURE return api.types.status.SUCCESS
def Teardown(tc): result = api.types.status.SUCCESS # Rollback to original flag values for wl in api.GetWorkloads(): if wl.workload_name in tc.orig_hwtag_flags: api.Logger.info( "Rolling back: wl_name: %s, tx_enable: %s" % (wl.interface, tc.orig_hwtag_flags[wl.workload_name][0])) tx_resp = naples_host.Toggle_TxVlanOffload( wl.node_name, wl.interface, tc.orig_hwtag_flags[wl.workload_name][0]) if not api.IsApiResponseOk(tx_resp): result = api.types.status.FAILURE api.Logger.info( "Rolling back: wl_name: %s, rx_enable: %s" % (wl.interface, tc.orig_hwtag_flags[wl.workload_name][1])) rx_resp = naples_host.Toggle_RxVlanOffload( wl.node_name, wl.interface, tc.orig_hwtag_flags[wl.workload_name][1]) if not api.IsApiResponseOk(rx_resp): result = api.types.status.FAILURE return result
def __init_testbed(self): self.__tbid = getattr(self.__tbspec, 'TestbedID', 1) self.__vlan_base = getattr(self.__tbspec, 'TestbedVlanBase', 1) self.__vlan_allocator = resmgr.TestbedVlanAllocator(self.__vlan_base, self.curr_ts.GetDefaultNicMode()) #TODO: merge single allocator into list below self.__multi_vlan_allocators = [] self.__nextVlanAllocator = 0 self.__image_manifest_file = self.curr_ts.GetImageManifestFile() self.curr_ts.DownloadReleaseImages() resp = None msg = self.__prepare_TestBedMsg(self.curr_ts) if not GlobalOptions.skip_setup: status = self.CleanupTestbed() if status != types.status.SUCCESS: return status try: self.__recover_testbed(self.__image_manifest_file) except: utils.LogException(Logger) Logger.error("Failed to recover testbed") Logger.debug(traceback.format_exc()) return types.status.TESTBED_INIT_FAILURE if GlobalOptions.dryrun: status = types.status.SUCCESS resp = api.InitTestbed(msg) else: resp = api.GetTestbed(msg) if resp is None: Logger.error("Failed to initialize testbed: ") raise OfflineTestbedException #return types.status.FAILURE if not api.IsApiResponseOk(resp): Logger.error("Failed to initialize testbed: ") raise OfflineTestbedException #return types.status.FAILURE for instance,node in zip(self.__tbspec.Instances, resp.nodes): if getattr(instance, 'NodeOs', None) == "esx": instance.esx_ctrl_vm_ip = node.esx_ctrl_node_ip_address Logger.info("Testbed allocated vlans {}".format(resp.allocated_vlans)) if resp.allocated_vlans: tbvlans = [] for vlan in resp.allocated_vlans: tbvlans.append(vlan) self.__vlan_allocator = resmgr.TestbedVlanManager(tbvlans) self.__instpool = copy.deepcopy(self.__tbspec.Instances) return types.status.SUCCESS
def __node_api_handler(self, url, json_data=None, oper=CfgOper.ADD): if oper == CfgOper.DELETE: oper = "DELETE" elif oper == CfgOper.ADD: oper = "POST" elif oper == CfgOper.UPDATE: oper = "PUT" elif oper == CfgOper.GET: oper = "GET" else: print(oper) assert (0) if GlobalOptions.debug: api.Logger.info("Url : %s" % url) cmd = None if json_data and len(json.dumps(json_data)) > 100000: filename = "/tmp/temp_config.json" with open(filename, 'w') as outfile: json.dump(json_data, outfile) req = api.Trigger_CreateAllParallelCommandsRequest() cmd = ["rm", "-rf", "temp_config.json"] cmd = " ".join(cmd) api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600) api.Trigger(req) resp = api.CopyToHost(self.host_name, [filename], "") if not api.IsApiResponseOk(resp): assert (0) cmd = [ "curl", "-X", oper, "-d", "@temp_config.json", "-k", "-H", "\"Content-Type:application/json\"", url ] else: cmd = [ "curl", "-X", oper, "-k", "-d", "\'" + json.dumps(json_data) + "\'" if json_data else " ", "-H", "\"Content-Type:application/json\"", url ] cmd = " ".join(cmd) req = api.Trigger_CreateAllParallelCommandsRequest() api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600) resp = api.Trigger(req) if GlobalOptions.debug: print(" ".join(cmd)) return resp.commands[0].stdout
def UnsetVlansOnTestBed(self): #First Unset the Switch unsetMsg = topo_pb2.SwitchMsg() unsetMsg.op = topo_pb2.VLAN_CONFIG switch_ips = {} for nodeIndex,instance in enumerate(self.__tbspec.Instances): self.__prepare_SwitchMsg(unsetMsg, instance, switch_ips, nodeIndex+1, setup_qos=False) vlans = self.GetVlanRange() unsetMsg.vlan_config.unset = True unsetMsg.vlan_config.vlan_range = vlans unsetMsg.vlan_config.native_vlan = self.GetNativeVlan() resp = api.DoSwitchOperation(unsetMsg) if not api.IsApiResponseOk(resp): return types.status.FAILURE return types.status.SUCCESS
def Setup(tc): api.Logger.info("Pencap - Techsupport Sysinfo") tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) #Copy pencap package to the host platform_gendir = api.GetTopDir() + '/platform/drivers/pencap/' listDir = os.listdir(platform_gendir) api.Logger.info("Content of {dir} : {cnt}".format(dir=platform_gendir, cnt=listDir)) for n in tc.nodes: api.Logger.info( "Copying Pencap package to the host{node}".format(node=n)) resp = api.CopyToHost(n, [platform_gendir + "pencap.tar.gz"]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy {pkg} to {node}: {resp}".format( pkg="pencap.tar.gz", node=n, resp=resp)) return api.types.status.FAILURE return api.types.status.SUCCESS
def __load_esxi_driver(node, node_os, manifest_file): image_manifest = parser.JsonParse(manifest_file) driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0] if driver_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE drImgFile = os.path.join(GlobalOptions.topdir, driver_images.drivers_pkg) if driver_images.pkg_file_type == "SrcBundle": # Copy and extract the file and find ionic*.vib file to load os.system("tar -xf " + drImgFile) pkgdir = os.path.basename(driver_images.drivers_pkg).split('.')[0] plist = list(Path(pkgdir).rglob('*.vib')) if not plist: api.Logger.error("Unable to find VIB file in driver-pkg: %s" % driver_images.drivers_pkg) return api.types.status.FAILURE vib_file = str(plist[0].absolute()) elif driver_images.pkg_file_type == "VIB": vib_file = drImgFile else: api.Logger.error("Unknown format for driver-pkg: %s - aborting" % driver_images.drivers_pkg) return api.types.status.FAILURE api.Logger.info("Loading %s on node: %s" % (vib_file, node)) resp = api.CopyToEsx(node, [vib_file], host_dir="") if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy %s", driver_images.drivers_pkg) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, "sshpass -p %s ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s@%s esxcli software vib install -v=/tmp/%s -f" % (api.GetTestbedEsxPassword(), api.GetTestbedEsxUsername(), api.GetEsxHostIpAddress(node), os.path.basename(vib_file))) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to install driver") return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): cimc_info = tc.test_node.GetCimcInfo() cimc_ip_address = cimc_info.GetIp() cimc_username = cimc_info.GetUsername() cimc_password = cimc_info.GetPassword() host_ipaddr = api.GetMgmtIPAddress(tc.test_node_name) if reboot.checkLinks(tc, tc.test_node_name) is api.types.status.FAILURE: api.Logger.error("Error verifying uplink interfaces") return api.types.status.FAILURE for install in range(tc.args.install_iterations): # save api.Logger.info(f"Saving node: {tc.test_node_name}") if api.SaveIotaAgentState([tc.test_node_name]) == api.types.status.FAILURE: raise OfflineTestbedException # touch the file on server to ensure this instance of OS is gone later req = api.Trigger_CreateExecuteCommandsRequest() touch_file_cmd = "touch /naples/oldfs" api.Trigger_AddHostCommand(req, tc.test_node_name, touch_file_cmd) resp = api.Trigger(req) if api.Trigger_IsSuccess(resp) is not True: api.Logger.error(f"Failed to run command on host {tc.test_node_name}, {touch_file_cmd}") return api.types.status.FAILURE # Boot from PXE to intall an OS api.Logger.info(f"Starting PXE Install Loop # {install} on {tc.test_node_name}") cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev pxe options=efiboot" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(5) # reboot server cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(180) # wait for installation to finish and server to come back api.Logger.info(f"Waiting for host to come up: {host_ipaddr}") if not waitforssh(host_ipaddr): raise OfflineTestbedException # Boot from HDD to run the test api.Logger.info(f"Setting Boot Order to HDD and rebooting {tc.test_node_name}") cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev disk options=efiboot" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(5) #reboot server cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) api.Logger.info(f"Waiting for host to come up: {host_ipaddr}") time.sleep(180) if not waitforssh(host_ipaddr): raise OfflineTestbedException # restore api.Logger.info(f"Restoring node: {tc.test_node_name}") resp = api.ReInstallImage(fw_version=None, dr_version="latest") if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to install images on the testbed") raise OfflineTestbedException resp = api.RestoreIotaAgentState([tc.test_node_name]) if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to restore agent state after PXE install") raise OfflineTestbedException api.Logger.info(f"PXE install iteration #{install} - SUCCESS") try: wl_api.ReAddWorkloads(tc.test_node_name) except: api.Logger.error(f"ReaddWorkloads failed with exception - See logs for details") return api.types.status.FAILURE # check touched file is not present, to ensure this is a new OS instance oldfs_command = "ls /naples/oldfs" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddHostCommand(req, tc.test_node_name, oldfs_command) resp = api.Trigger(req) if api.IsApiResponseOk(resp) is not True: api.Logger.error(f"Failed to run command on host {tc.test_node_name} {oldfs_command}") return api.types.status.FAILURE cmd = resp.commands.pop() if cmd.exit_code == 0: api.Logger.error(f"Old file is present in FS after PXE install") return api.types.status.FAILURE api.Logger.info("PXE boot completed! Host is up.") return api.types.status.SUCCESS
def LoadFirmware(nodes, node_os, target_version): if target_version != 'latest': resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return resp manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') image_manifest = parser.JsonParse(manifest_file) fw_images = list(filter(lambda x: x.naples_type == "capri", image_manifest.Firmwares))[0] if fw_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE api.Logger.info("Fullpath for firmware image to load: %s " % fw_images.image) if node_os == OS_TYPE_LINUX: fw_version, _ = GetLinuxFwDriverVersion(node) else: fw_version = None fwImgFile = os.path.join(GlobalOptions.topdir, fw_images.image) for node in nodes: if fw_version == '1.1.1-E-15': # Naples with Fw 1.1.1-E-15 has no OOB and IntMgmt Ip is fixed resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data", nic_mgmt_ip=GetNicIntMgmtIP(node)) if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) return api.types.status.FAILURE else: resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=True, naples_dir="data") if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) # Try with via_oob=False resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data") if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to copy naples_fw.tar to target naples") return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddNaplesCommand(req, node, "sync") api.Trigger_AddNaplesCommand(req, node, "/nic/tools/sysupdate.sh -p /data/naples_fw.tar", timeout=120) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("sysupdate.sh cmd failed") return api.types.status.FAILURE api.RestartNodes([node]) if node_os == OS_TYPE_LINUX: return LinuxReInitMgmtIP(node) elif node_os == OS_TYPE_ESX: return ESXiReInitMgmtIP(node) return api.types.status.SUCCESS
def Trigger(tc): if tc.skip: return api.types.status.SUCCESS result = api.types.status.SUCCESS tc.orig_hwtag_flags = {} tc.hal_verify = {} tc.cmd_status = {} tc.tx_random = [] tc.rx_random = [] if type(tc.args.tx) == int: tc.tx_random = SetRandom_Offload() if type(tc.args.rx) == int: tc.rx_random = SetRandom_Offload() for wl in api.GetWorkloads(): if wl.parent_interface != wl.interface: continue if wl.IsNaples(): # Save original flag values for rollback tx_status = naples_host.Get_TxVlanOffload_Status( wl.node_name, wl.interface) rx_status = naples_host.Get_RxVlanOffload_Status( wl.node_name, wl.interface) if api.IsApiResponseOk(rx_status): if api.GetNodeOs(wl.node_name) == 'Linux': rx_enable = (rx_status.commands[0].stdout).split(':')[1] elif api.GetNodeOs(wl.node_name) == 'freebsd': options = (rx_status.commands[0].stdout).split(',') if 'VLAN_HWTAGGING' in options: rx_enable = 'on' else: rx_enable = 'off' elif api.GetNodeOs(wl.node_name) == 'windows': rx_status = json.loads(rx_status.commands[0].stdout) api.Logger.info("rx current status : %s" % rx_status) if rx_status["VlanID"]: rx_enable = 'on' else: rx_enable = 'off' else: result = api.types.status.FAILURE if api.IsApiResponseOk(tx_status): if api.GetNodeOs(wl.node_name) == 'Linux': tx_enable = (tx_status.commands[0].stdout).split(':')[1] elif api.GetNodeOs(wl.node_name) == 'freebsd': options = (tx_status.commands[0].stdout).split(',') if 'VLAN_HWTAGGING' in options: tx_enable = 'on' else: tx_enable = 'off' elif api.GetNodeOs(wl.node_name) == 'windows': tx_status = json.loads(tx_status.commands[0].stdout) api.Logger.info("tx current status : %s" % tx_status) if tx_status["VlanID"]: tx_enable = 'on' else: tx_enable = 'off' else: result = api.types.status.FAILURE tc.orig_hwtag_flags[wl.workload_name] = (tx_enable, rx_enable) # Change tx_vlan and rx_vlan as per args if type(tc.args.tx) == int: if wl.workload_name in tc.tx_random: tx_enable = 'off' if tx_enable == 'on' else 'on' else: tx_enable = 'on' if tc.args.tx else 'off' toggle_tx_resp = naples_host.Toggle_TxVlanOffload( wl.node_name, wl.interface, tx_enable, tc.vlan_id) if type(tc.args.rx) == int: if wl.workload_name in tc.rx_random: rx_enable = 'off' if rx_enable == 'on' else 'on' else: rx_enable = 'on' if tc.args.rx else 'off' toggle_rx_resp = naples_host.Toggle_RxVlanOffload( wl.node_name, wl.interface, rx_enable, tc.vlan_id) if not api.IsApiResponseOk(toggle_tx_resp): result = api.types.status.FAILURE if not api.IsApiResponseOk(toggle_rx_resp): result = api.types.status.FAILURE # Validate change using halctl command tc.toggle_resp, is_ok = hal_show.GetHALShowOutput( wl.node_name, 'lif') if api.IsApiResponseOk(tc.toggle_resp): cmd = tc.toggle_resp.commands[0] if cmd.stdout is not None: yml_loaded = yaml.load_all(cmd.stdout, Loader=yaml.FullLoader) for spec in yml_loaded: if spec is not None: name = spec["spec"]["name"] if name == wl.interface: api.Logger.info( "Interface: %s, Vlan Insert Enable: %s, Vlan Strip Enable: %s" % (wl.interface, spec["spec"]["vlaninserten"], spec["spec"]["vlanstripen"])) tx = str(True) if tx_enable == 'on' else str( False) rx = str(True) if rx_enable == 'on' else str( False) tc.hal_verify[wl.workload_name] = [ (tx, spec["spec"]["vlaninserten"]), (rx, spec["spec"]["vlanstripen"]) ] else: result = api.types.status.FAILURE # Store status for verification rx_status = naples_host.Get_RxVlanOffload_Status( wl.node_name, wl.interface) tx_status = naples_host.Get_TxVlanOffload_Status( wl.node_name, wl.interface) tc.cmd_status[wl.workload_name] = (tx_status, rx_status) # Run traffic test tc.cmd_cookies, tc.resp = filters_utils.pingAllRemoteWloadPairs( tc.workload_pairs, tc.iterators) api.Logger.info("TC.Trigger result: %s" % result) return result
def Verify(tc): if tc.skip: return api.types.status.SUCCESS if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS # Verify halctl and host commands results for wl_name, tuples in tc.cmd_status.items(): if api.IsApiResponseOk(tuples[0]): tx_enable = 'on' if tc.args.tx else 'off' api.Logger.info("SUCCESS: Name: %s, tx_status: %s" % (wl_name, tx_enable)) else: result = api.types.status.FAILURE api.Logger.info("FAILURE: Name: %s, tx_status: %s" % (wl_name, tuples[0].stdout)) if api.IsApiResponseOk(tuples[1]): rx_enable = 'on' if tc.args.rx else 'off' api.Logger.info("SUCCESS: Name: %s, rx_status: %s" % (wl_name, rx_enable)) else: result = api.types.status.FAILURE api.Logger.info("FAILURE: Name: %s, rx_status: %s" % (wl_name, tuples[1].stdout)) if tc.hal_verify.get(wl_name, None) != None: tx_chk = tc.hal_verify[wl_name][0] rx_chk = tc.hal_verify[wl_name][1] if api.GetNodeOs(wl_name.split('_')[0]) == 'freebsd': # NOTE: freebsd has only one flag for both rx and tx vlan offload # we overwrite tx flag using rx flag hence check tx flag with rx flag in verification if str(tx_chk[1]) != str(rx_chk[0]): result = api.types.status.FAILURE api.Logger.info("FAILURE: Name: %s, tx_chk: %s" % (wl_name, tx_chk)) if str(rx_chk[1]) != str(rx_chk[0]): result = api.types.status.FAILURE api.Logger.info("FAILURE: Name: %s, rx_chk: %s" % (wl_name, rx_chk)) else: if tx_chk[0] != tx_chk[1]: result = api.types.status.FAILURE api.Logger.info( "FAILURE: Name: %s, expected tx-flag: %s, in HAL: %s" % (wl_name, tx_chk[0], tx_chk[1])) if rx_chk[0] != rx_chk[1]: result = api.types.status.FAILURE api.Logger.info( "FAILURE: Name: %s, expected rx-flag: %s, in HAL: %s" % (wl_name, rx_chk[0], rx_chk[1])) # Verify traffic result cookie_idx = 0 for cmd in tc.resp.commands: if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error("Vlan offload failed for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) result = api.types.status.FAILURE cookie_idx += 1 api.Logger.info("TC.Verify result: %s" % result) return result
def __copy_sonic_to_all_naples(tc): for n in tc.nodes: resp = api.CopyToHost(n, [tc.sonicpkg]) if not api.IsApiResponseOk(resp): return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.skip or api.IsDryrun(): return api.types.status.SUCCESS result = api.types.status.SUCCESS tc.orig_hwtag_flags = {} tc.pds_verify = {} tc.cmd_status = {} tc.tx_random = [] tc.rx_random = [] if type(tc.args.tx) == int: tc.tx_random = SetRandom_Offload() if type(tc.args.rx) == int: tc.rx_random = SetRandom_Offload() for wl in api.GetWorkloads(): if wl.parent_interface != wl.interface: continue if wl.IsNaples(): # Save original flag values for rollback tx_status = naples_workload.Get_TxVlanOffload_Status(wl) rx_status = naples_workload.Get_RxVlanOffload_Status(wl) if api.IsApiResponseOk(rx_status): if api.GetNodeOs(wl.node_name) == 'linux': rx_enable = (rx_status.commands[0].stdout).split(':')[1] elif api.GetNodeOs(wl.node_name) == 'freebsd': options = (rx_status.commands[0].stdout).split(',') if 'VLAN_HWTAGGING' in options: rx_enable = 'on' else: rx_enable = 'off' else: api.Logger.error("Unmatched node os %s" % (api.GetNodeOs(wl.node_name), )) result = api.types.status.FAILURE break else: result = api.types.status.FAILURE break if api.IsApiResponseOk(tx_status): if api.GetNodeOs(wl.node_name) == 'linux': tx_enable = (tx_status.commands[0].stdout).split(':')[1] elif api.GetNodeOs(wl.node_name) == 'freebsd': options = (tx_status.commands[0].stdout).split(',') if 'VLAN_HWTAGGING' in options: tx_enable = 'on' else: tx_enable = 'off' else: api.Logger.error("Unmatched node os %s" % (api.GetNodeOs(wl.node_name), )) result = api.types.status.FAILURE break else: result = api.types.status.FAILURE break tc.orig_hwtag_flags[wl.workload_name] = (tx_enable, rx_enable) # Change tx_vlan and rx_vlan as per args if type(tc.args.tx) == int: if wl.workload_name in tc.tx_random: tx_enable = 'off' if tx_enable == 'on' else 'on' else: tx_enable = 'on' if tc.args.tx else 'off' toggle_tx_resp = naples_workload.Toggle_TxVlanOffload( wl, tx_enable) if type(tc.args.rx) == int: if wl.workload_name in tc.rx_random: rx_enable = 'off' if rx_enable == 'on' else 'on' else: rx_enable = 'on' if tc.args.rx else 'off' toggle_rx_resp = naples_workload.Toggle_RxVlanOffload( wl, rx_enable) if not api.IsApiResponseOk(toggle_tx_resp): result = api.types.status.FAILURE break if not api.IsApiResponseOk(toggle_rx_resp): result = api.types.status.FAILURE break # Validate change using pdsctl command api.Logger.warn( "XXX 'pdsctl show lif' does not support --yaml output, and does not show the mode flag for validation" ) #tc.toggle_resp, is_ok = pdsctl.ExecutePdsctlShowCommand(wl.node_name, 'lif') #if api.IsApiResponseOk(tc.toggle_resp): # cmd = tc.toggle_resp.commands[0] # if cmd.stdout is not None: # yml_loaded = yaml.load_all(cmd.stdout, Loader=yaml.FullLoader) # for spec in yml_loaded: # if spec is not None: # name = spec["spec"]["name"] # if name == wl.interface: # api.Logger.info("Interface: %s, Vlan Insert Enable: %s, Vlan Strip Enable: %s" % (wl.interface, spec["spec"]["vlaninserten"], spec["spec"]["vlanstripen"])) # tx = str(True) if tx_enable == 'on' else str(False) # rx = str(True) if rx_enable == 'on' else str(False) # tc.pds_verify[wl.workload_name] = [(tx, spec["spec"]["vlaninserten"]), (rx, spec["spec"]["vlanstripen"])] #else: # result = api.types.status.FAILURE # break # Store status for verification rx_status = naples_workload.Get_RxVlanOffload_Status(wl) tx_status = naples_workload.Get_TxVlanOffload_Status(wl) tc.cmd_status[wl.workload_name] = (tx_status, rx_status) # Run traffic test tc.cmd_cookies, tc.resp = traffic_utils.pingWorkloads(tc.workload_pairs) api.Logger.info("TC.Trigger result: %s" % result) return result