def Verify(tc): showMirrorCmd = "/nic/bin/halctl show mirror" showFlowMonitorCmd = "/nic/bin/halctl show flow-monitor" req = api.Trigger_CreateExecuteCommandsRequest(serial=False) for node_name in api.GetNaplesHostnames(): api.Trigger_AddNaplesCommand(req, node_name, showMirrorCmd) api.Trigger_AddNaplesCommand(req, node_name, showFlowMonitorCmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if not api.Trigger_IsSuccess(resp): api.Logger.error( "Failed to execute HAL command to dump mirror and flow monitor.") return api.types.status.FAILURE for cmd in resp.commands: if len(cmd.stdout): api.Logger.error("Commad: %s validation failed. Expecting empty" % cmd.command) return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(args): if GlobalOptions.skip_setup: # No profile change is required for skip setup return api.types.status.SUCCESS total_vfs_read_cmd = "cat /sys/class/net/{hostIf}/device/sriov_totalvfs" vfs_write_cmd = "echo {total_vfs} > /sys/class/net/{hostIf}/device/sriov_numvfs" for n in api.GetNaplesHostnames(): # for host-interface and build command to be executed on the host # Commands: # total_vfs=`cat /sys/class/net/<hostif_device>/device/sriov_totalvfs` req = api.Trigger_CreateExecuteCommandsRequest(serial = True) hostIfs = api.GetNaplesHostInterfaces(n) for hostIf in hostIfs: api.Trigger_AddHostCommand(req, n, f"cat /sys/class/net/{hostIf}/device/sriov_totalvfs") resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to execute/collect sriov_totalvfs from hostIfs : %s" % n) return api.types.status.FAILURE if_totalvfs_map = {} for cmd in resp.commands: m = re.search("cat.*/([\w]+)/.*/sriov_totalvfs", cmd.command) if_totalvfs_map[m.group(1)] = cmd.stdout.rstrip() # for each host interface build command to be executed on the host # echo $total_vfs > /sys/class/net/<hostif_device>/device/sriov_numvfs req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for hostIf, total_vfs in if_totalvfs_map.items(): api.Trigger_AddHostCommand(req, n, f"echo {total_vfs} > /sys/class/net/{hostIf}/device/sriov_numvfs") resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to execute/apply sriov_totalvfs for each hostIf : %s" % n) return api.types.status.FAILURE return api.RebuildTopology()
def LinuxReInitMgmtIP(node): # Run nodeinit.sh to restore host->mgmt communication req = api.Trigger_CreateExecuteCommandsRequest(serial = True) cmd = "/naples/nodeinit.sh --mgmt_only --own_ip 0.0.0.0 --trg_ip 0.0.0.0" api.Logger.info("Resetting mgmt-if with : %s" % cmd) api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("nodeinit.sh to change mgmt ip cmd failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def __load_linux_driver(node, node_os, manifest_file): image_manifest = parser.JsonParse(manifest_file) driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0] if driver_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE drImgFile = os.path.join(Gl, driver_images.drivers_pkg) api.Logger.info("Fullpath for driver image: " + drImgFile) resp = api.CopyToHost(node, [drImgFile], "") if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy %s" % drImgFile) return api.types.status.FAILURE rundir = os.path.basename(driver_images.drivers_pkg).split('.')[0] req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, "tar -xf " + os.path.basename(driver_images.drivers_pkg)) api.Trigger_AddHostCommand(req, node, "./build.sh", rundir=rundir) resp = api.Trigger(req) if not api.IsApiResponseOk(resp): api.Logger.error("TriggerCommand for driver build failed") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0 and cmd.command != './build.sh': # Build.sh could fail -ignored (FIXME) api.Logger.error("Failed to exec cmds to build/load new driver") return api.types.status.FAILURE api.Logger.info("New driver image is built on target host. Prepare to load") if host.UnloadDriver(node_os, node) != api.types.status.SUCCESS: api.Logger.error("Failed to unload current driver - proceeding") req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if node_os == OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, node, "insmod " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) elif node_os == OS_TYPE_BSD: api.Trigger_AddHostCommand(req, node, "kldload " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("TriggerCommand for driver installation failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(tc): naples_nodes = api.GetNaplesNodes() if len(naples_nodes) == 0: api.Logger.error("No naples node found") return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for node in naples_nodes: api.Logger.info("Setting Date for Naples: {}".format(node.Name())) if api.GetNodeOs(node.Name()) == host.OS_TYPE_LINUX: cmd = "date -u +%Y-%m-%d+%T" api.Trigger_AddHostCommand(req, node.Name(), cmd) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to retrieve date for {}".format( node.Name())) return api.types.status.ERROR #extract the date output date_str = resp.commands[0].stdout.strip("\n") cmd = "date -u -s {}".format(date_str) api.Trigger_AddNaplesCommand(req, node.Name(), cmd) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error( "Failed to configure date for naples in {}".format( node.Name())) return api.types.status.ERROR api.Logger.info("Naples date set to {}".format( resp.commands[0].stdout.strip("\n"))) return api.types.status.SUCCESS
def __mk_testcase_directory(self, newdir): Logger.debug("Creating Testcase directory: %s" % newdir) command = "mkdir -p %s && chmod 777 %s" % (newdir, newdir) req = api.Trigger_CreateAllParallelCommandsRequest() for nodename in api.GetWorkloadNodeHostnames(): api.Trigger_AddHostCommand(req, nodename, command) for wl in api.GetWorkloads(): if api.IsWorkloadRunning(wl.workload_name): api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, command, timeout=60) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): Logger.error("Failed to create destination directory %s" % newdir) return types.status.FAILURE return types.status.SUCCESS
def GetLinuxFwDriverVersion(node): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) cmd = "/naples/nodeinit.sh --version" api.Logger.info("Collect software version with : %s" % cmd) api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to collect version") return None, None # Parse resp stdout for following # Example: # version: 1.3.0-E-123-13-g827ffc8 # firmware-version: 1.1.1-E-15 ethtool_lines = resp.commands[0].stdout.split('\n') fw_version = list(filter(lambda x: 'firmware-version' in x, ethtool_lines))[0].rstrip().split()[1] dr_version = list(filter(lambda x: 'version' in x, ethtool_lines))[0].rstrip().split()[1] return fw_version, dr_version
def Trigger(tc): tc.resp = api.types.status.SUCCESS # Execute a command on ESXi hypervisor and collect output req = api.Trigger_CreateExecuteCommandsRequest() for node in tc.nodes: api.Trigger_AddESXCommand(req, node, "esxcli device driver list") resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): tc.resp = api.types.status.FAILURE else: for cmd in resp.commands: api.PrintCommandResults(cmd) # Upload a file (testbed.json file) to each ESX hypervisor if tc.resp == api.types.status.SUCCESS: for node in tc.nodes: resp = api.CopyToEsx(node, [GlobalOptions.testbed_json], host_dir=".", esx_dir="/tmp") if not api.IsApiResponseOk(resp): tc.resp = api.types.status.FAILURE break # Download a file from ESX hypervisor if tc.resp == api.types.status.SUCCESS: for node in tc.nodes: wloads = api.GetWorkloads(node) for wl in wloads: folder = os.path.join(api.topdir, node, wl.workload_name) os.makedirs(folder) resp = api.CopyFromESX(node, ["vmware.log"], dest_dir=folder, esx_dir="/vmfs/volumes/datastore1/" + wl.workload_name) if not api.IsApiResponseOk(resp): tc.resp = api.types.status.FAILURE break return api.types.status.SUCCESS
def check_validate_link_up(test_case): # for each node for node in test_case.nodes: naples_devices = api.GetDeviceNames(node) # for each Naples on node for naples_device in naples_devices: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # for each port on Naples # cmds sent follows the ports ordering in ports_ifindex_arr for port_id in range(len(ports_ifindex_arr)): port_name = get_port_name(port_id) api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl show port --port " + port_name + " --yaml", naples_device) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to trigger show port cmd on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE # responses follow the port ordering in ports_ifindex_arr port_id = 0 for cmd in resp.commands: port_name = get_port_name(port_id) port = get_port(node, naples_device, port_name) oper_state_list = Port.parse_yaml_data(cmd.stdout) if not port.check_link_up(oper_state_list): api.Logger.error("Failed to linkup on node %s naples %s port %s" % (node, naples_device, port_name)) return api.types.status.FAILURE if not port.validate_link_up(oper_state_list): api.Logger.error("Failed to validate linkup on node %s naples %s port %s" % (node, naples_device, port_name)) return api.types.status.FAILURE # update port oper states port.set_oper_states(oper_state_list) port.print() port_id += 1 return api.types.status.SUCCESS
def Setup(test_case): api.Logger.info("Link Down count verify after link resets") test_case.nodes = api.GetNaplesHostnames() # for each node for node in test_case.nodes: naples_devices = api.GetDeviceNames(node) # for each Naples on node for naples_device in naples_devices: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # for each port on Naples # cmds sent follows the ports ordering in ports_ifindex_arr for port_id in range(len(ports_ifindex_arr)): port_name = get_port_name(port_id) ifindex = get_port_ifindex(port_id) cmd_str = "/nic/bin/halctl show port --port " + port_name + " --yaml" api.Trigger_AddNaplesCommand(req, node, cmd_str, naples_device) port = Port(node, naples_device, port_name, ifindex) add_to_ports_dict(node, naples_device, port_name, port) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to trigger show port cmd on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE # responses follow the port ordering in ports_ifindex_arr port_id = 0 for cmd in resp.commands: port_name = get_port_name(port_id) port = get_port(node, naples_device, port_name) oper_state_list = Port.parse_yaml_data(cmd.stdout) # set port oper states port.set_oper_states(oper_state_list) port.print() port_id += 1 return api.types.status.SUCCESS
def check_marvell_access(test_case): cmd = "/nic/bin/halctl show port internal statistics | grep InBadOctets" # for each node for node in test_case.nodes: naples_devices = api.GetDeviceNames(node) # for each Naples on node for naples_device in naples_devices: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddNaplesCommand(req, node, cmd, naples_device) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error( "Failed to trigger show port internal cmd on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE for cmd_resp in resp.commands: ''' sample output below: InBadOctets : 0 InBadOctets : 0 InBadOctets : 0 InBadOctets : 0 InBadOctets : 0 InBadOctets : 0 InBadOctets : 0 ''' for output_line in cmd_resp.stdout.splitlines(): api.Logger.info("cmd_resp output: " + output_line) output_line_list = output_line.split(':') val = int(output_line_list[1].strip()) api.Logger.info("val: " + str(val)) if val == 0xFFFFFFFF: api.Logger.error( "Failed Marvell access on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE return api.types.status.SUCCESS
def __load_esxi_driver(node, node_os, manifest_file): image_manifest = parser.JsonParse(manifest_file) driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0] if driver_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE drImgFile = os.path.join(GlobalOptions.topdir, driver_images.drivers_pkg) if driver_images.pkg_file_type == "SrcBundle": # Copy and extract the file and find ionic*.vib file to load os.system("tar -xf " + drImgFile) pkgdir = os.path.basename(driver_images.drivers_pkg).split('.')[0] plist = list(Path(pkgdir).rglob('*.vib')) if not plist: api.Logger.error("Unable to find VIB file in driver-pkg: %s" % driver_images.drivers_pkg) return api.types.status.FAILURE vib_file = str(plist[0].absolute()) elif driver_images.pkg_file_type == "VIB": vib_file = drImgFile else: api.Logger.error("Unknown format for driver-pkg: %s - aborting" % driver_images.drivers_pkg) return api.types.status.FAILURE api.Logger.info("Loading %s on node: %s" % (vib_file, node)) resp = api.CopyToEsx(node, [vib_file], host_dir="") if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy %s", driver_images.drivers_pkg) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, "sshpass -p %s ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s@%s esxcli software vib install -v=/tmp/%s -f" % (api.GetTestbedEsxPassword(), api.GetTestbedEsxUsername(), api.GetEsxHostIpAddress(node), os.path.basename(vib_file))) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to install driver") return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(args): if GlobalOptions.skip_setup: # No profile change is required for skip setup return api.types.status.SUCCESS # Change the grub parameters to enable boot parameters req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for n in api.GetNaplesHostnames(): # for host-interface and build command to be executed on the host # Commands: # total_vfs=`cat /sys/class/net/<hostif_device>/device/sriov_totalvfs` #sudo sed -i '/GRUB_CMDLINE_LINUX/ s/""/"iommu=on"/' /etc/default/grub api.Trigger_AddHostCommand(req, n, '''sudo sed -i '/GRUB_CMDLINE_LINUX/ s/""/"iommu=on"/' /etc/default/grub''') api.Trigger_AddHostCommand(req, n, 'sudo update-grub') resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to execute cmd/change boot parameters") return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(args): if GlobalOptions.skip_setup: # No profile change is required for skip setup return api.types.status.SUCCESS newProfile = getattr(args, 'profile', 'FEATURE_PROFILE_BASE') if newProfile != 'FEATURE_PROFILE_BASE': req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for n in api.GetNaplesHostnames(): common.AddPenctlCommand( req, n, "update dsc --device-profile %s" % newProfile) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): return api.types.status.FAILURE api.Logger.info("Naples profile changed to %s - Restarting nodes" % newProfile) return api.RestartNodes(api.GetNaplesHostnames()) else: api.Logger.info("Profile unchanged, input: %s" % newProfile) return api.types.status.SUCCESS
def Main(tc): naples_nodes = api.GetNaplesNodes() if len(naples_nodes) == 0: api.Logger.error("No naples node found") return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for node in naples_nodes: api.Trigger_AddNaplesCommand(req, node.Name(), "ifconfig oob_mnic0 up") api.Trigger_AddNaplesCommand(req, node.Name(), "dhclient oob_mnic0", timeout=300) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to run cmds") return api.types.status.ERROR for node in naples_nodes: oob_ip = GetOOBMnicIP(node.Name()) if not oob_ip: api.Logger.error("Node %s failed to obtain oob_mnic IP" % (node.Name())) return api.types.status.FAILURE api.Logger.info("Obtained %s oob ip-address %s" % (node.Name(), oob_ip)) return api.types.status.SUCCESS
def flap_port(test_case): # for each node for node in test_case.nodes: naples_devices = api.GetDeviceNames(node) # for each Naples on node for naples_device in naples_devices: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # for each port on Naples # cmds sent follows the ports ordering in ports_ifindex_arr for port_id in range(len(ports_ifindex_arr)): port_name = get_port_name(port_id) api.Logger.info("admin-state toggle trigger on node %s naples %s port %s" % (node, naples_device, port_name)) api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl debug port --port %s --admin-state down" % port_name, naples_device) api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl debug port --port %s --admin-state up" % port_name, naples_device) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to trigger update port cmd on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): cimc_info = tc.test_node.GetCimcInfo() cimc_ip_address = cimc_info.GetIp() cimc_username = cimc_info.GetUsername() cimc_password = cimc_info.GetPassword() host_ipaddr = api.GetMgmtIPAddress(tc.test_node_name) if reboot.checkLinks(tc, tc.test_node_name) is api.types.status.FAILURE: api.Logger.error("Error verifying uplink interfaces") return api.types.status.FAILURE for install in range(tc.args.install_iterations): # save api.Logger.info(f"Saving node: {tc.test_node_name}") if api.SaveIotaAgentState([tc.test_node_name]) == api.types.status.FAILURE: raise OfflineTestbedException # touch the file on server to ensure this instance of OS is gone later req = api.Trigger_CreateExecuteCommandsRequest() touch_file_cmd = "touch /naples/oldfs" api.Trigger_AddHostCommand(req, tc.test_node_name, touch_file_cmd) resp = api.Trigger(req) if api.Trigger_IsSuccess(resp) is not True: api.Logger.error(f"Failed to run command on host {tc.test_node_name}, {touch_file_cmd}") return api.types.status.FAILURE # Boot from PXE to intall an OS api.Logger.info(f"Starting PXE Install Loop # {install} on {tc.test_node_name}") cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev pxe options=efiboot" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(5) # reboot server cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(180) # wait for installation to finish and server to come back api.Logger.info(f"Waiting for host to come up: {host_ipaddr}") if not waitforssh(host_ipaddr): raise OfflineTestbedException # Boot from HDD to run the test api.Logger.info(f"Setting Boot Order to HDD and rebooting {tc.test_node_name}") cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis bootdev disk options=efiboot" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) time.sleep(5) #reboot server cmd = "ipmitool -I lanplus -H %s -U %s -P %s chassis power cycle" %\ (cimc_ip_address, cimc_username, cimc_password) subprocess.check_call(cmd, shell=True) api.Logger.info(f"Waiting for host to come up: {host_ipaddr}") time.sleep(180) if not waitforssh(host_ipaddr): raise OfflineTestbedException # restore api.Logger.info(f"Restoring node: {tc.test_node_name}") resp = api.ReInstallImage(fw_version=None, dr_version="latest") if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to install images on the testbed") raise OfflineTestbedException resp = api.RestoreIotaAgentState([tc.test_node_name]) if resp != api.types.status.SUCCESS: api.Logger.error(f"Failed to restore agent state after PXE install") raise OfflineTestbedException api.Logger.info(f"PXE install iteration #{install} - SUCCESS") try: wl_api.ReAddWorkloads(tc.test_node_name) except: api.Logger.error(f"ReaddWorkloads failed with exception - See logs for details") return api.types.status.FAILURE # check touched file is not present, to ensure this is a new OS instance oldfs_command = "ls /naples/oldfs" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddHostCommand(req, tc.test_node_name, oldfs_command) resp = api.Trigger(req) if api.IsApiResponseOk(resp) is not True: api.Logger.error(f"Failed to run command on host {tc.test_node_name} {oldfs_command}") return api.types.status.FAILURE cmd = resp.commands.pop() if cmd.exit_code == 0: api.Logger.error(f"Old file is present in FS after PXE install") return api.types.status.FAILURE api.Logger.info("PXE boot completed! Host is up.") return api.types.status.SUCCESS
def LoadFirmware(nodes, node_os, target_version): if target_version != 'latest': resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return resp manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') image_manifest = parser.JsonParse(manifest_file) fw_images = list(filter(lambda x: x.naples_type == "capri", image_manifest.Firmwares))[0] if fw_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE api.Logger.info("Fullpath for firmware image to load: %s " % fw_images.image) if node_os == OS_TYPE_LINUX: fw_version, _ = GetLinuxFwDriverVersion(node) else: fw_version = None fwImgFile = os.path.join(GlobalOptions.topdir, fw_images.image) for node in nodes: if fw_version == '1.1.1-E-15': # Naples with Fw 1.1.1-E-15 has no OOB and IntMgmt Ip is fixed resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data", nic_mgmt_ip=GetNicIntMgmtIP(node)) if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) return api.types.status.FAILURE else: resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=True, naples_dir="data") if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) # Try with via_oob=False resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data") if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to copy naples_fw.tar to target naples") return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddNaplesCommand(req, node, "sync") api.Trigger_AddNaplesCommand(req, node, "/nic/tools/sysupdate.sh -p /data/naples_fw.tar", timeout=120) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("sysupdate.sh cmd failed") return api.types.status.FAILURE api.RestartNodes([node]) if node_os == OS_TYPE_LINUX: return LinuxReInitMgmtIP(node) elif node_os == OS_TYPE_ESX: return ESXiReInitMgmtIP(node) return api.types.status.SUCCESS