def Setup(tc): parse_args(tc) api.SetTestsuiteAttr("mfg_test_intf", tc.test_intf) api.SetTestsuiteAttr("mfg_mode", "yes") api.SetTestsuiteAttr("preinit_script_path", NAPLES_PREINIT_SCRIPT_PATH) api.SetTestsuiteAttr("start_agent_script_path", NAPLES_START_AGENT_SCRIPT_PATH) # get node info tc.bitw_node_name = None tc.wl_node_name = None bitw_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() classic_node_nic_pairs = utils.get_classic_node_nic_pairs() # Only one node for single-nic topology tc.bitw_node_name = bitw_node_nic_pairs[0][0] tc.wl_node_name = classic_node_nic_pairs[0][0] host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE up0_intf = host_intfs[0] up1_intf = host_intfs[1] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE wl_vlans = [] for wl in workloads: if (wl.parent_interface == up0_intf and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf and tc.test_intf == 'up1'): if wl.uplink_vlan != 0: # Tagged workload wl_vlans.append(wl.uplink_vlan) if len(wl_vlans) < NUM_MFG_TEST_VLANS: api.Logger.error('Failed to fetch %d tagged workloads for mfg test' ' on uplink %s' % (NUM_MFG_TEST_VLANS, tc.test_intf)) return api.types.status.FAILURE # generate start agent script with testbed vlans if gen_start_agent_script(wl_vlans) != api.types.status.SUCCESS: return api.types.status.FAILURE # copy preinit script and start agent script to naples preinit_filename = api.GetTopDir() + '/' + WS_PREINIT_SCRIPT_PATH start_agent_filename = api.GetTopDir( ) + '/' + WS_IOTA_START_AGENT_SCRIPT_PATH api.CopyToNaples(tc.bitw_node_name, [preinit_filename, start_agent_filename], "") os.remove(start_agent_filename) return api.types.status.SUCCESS
def Main(step): if GlobalOptions.skip_setup: return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.ChangeDirectory("iperf") for naples_host in api.GetNaplesHostnames(): if api.GetNodeOs(naples_host) == host.OS_TYPE_BSD: api.CopyToHost(naples_host, [IONIC_STATS_SCRIPT], "") api.Trigger_AddHostCommand( req, naples_host, "cp ionic_stats.sh " + api.HOST_NAPLES_DIR) api.CopyToNaples(naples_host, [IPERF_BINARY], "", naples_dir="/usr/bin/") api.Trigger_AddNaplesCommand( req, naples_host, "ln -s /usr/bin/iperf3_aarch64 /usr/bin/iperf3") resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE return api.types.status.SUCCESS
def installBinary(node, img): fullpath = os.path.join(api.GetTopDir(), img) api.Logger.info("fullpath for binary: " + fullpath) resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE resp = api.CopyToNaples(node, [fullpath], "", naples_dir="/update") if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand( req, node, "chmod 777 /update/{}".format(os.path.basename(fullpath))) resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("failed to change permission %s", cmd_resp.command) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): parse_args(tc) if tc.policy_type == "default": tc.sec_app_restart_sleep = 120 tc.flow_cache_read_sleep = 15 else: tc.sec_app_restart_sleep = 180 tc.flow_cache_read_sleep = 45 tc.node_nic_pairs = athena_app_utils.get_athena_node_nic_names() tc.custom_policy_path = api.GetTopDir() + '/' + CUSTOM_PLCY_JSON_DIR + '/' \ + CUSTOM_PLCY_JSON_FNAME tc.default_policy_path = api.GetTopDir() + '/' + DEFAULT_PLCY_JSON_FILEPATH tc.gen_custom_plcy_fname = '' # if file is already there, it will overwrite the old file cmd = "" for node, nic in tc.node_nic_pairs: # because new logic in athena_app is to read policy.json in /data # if we want to test default policy.json, we have to clean /data first if tc.policy_type == "default": api.Logger.info("Test default policy.json") api.Logger.info("Clean old policy.json file in /data") cmd = "rm -f /data/policy.json" else: api.Logger.info("Test Custom policy.json") if (gen_custom_policy_cfg(tc) != api.types.status.SUCCESS): return api.types.status.FAILURE api.Logger.info( "Copy policy.json file from IOTA dir to /data/ on Naples") api.CopyToNaples(node, [tc.gen_custom_plcy_fname], "") cmd = "mv /" + GEN_CUSTOM_PLCY_JSON_FNAME + " /data/policy.json" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, cmd, nic) api.Trigger_AddNaplesCommand(req, node, "sync", nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: if 'rm' in cmd.command: api.Logger.error("removing /data/policy.json failed " "on {}".format((node, nic))) return api.types.status.FAILURE if 'mv' in cmd.command: api.Logger.error("moving policy.json to /data/ failed " "on {}".format((node, nic))) return api.types.status.FAILURE if 'sync' in cmd.command: api.Logger.error("sync failed on {}".format((node, nic))) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): # parse tc args parse_args(tc) tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.intfs = api.GetTestsuiteAttr("inb_mnic_intfs") device_json_fname = api.GetTopDir() + '/nic/conf/athena/device.json' api.CopyToNaples(tc.bitw_node_name, [device_json_fname], "") return api.types.status.SUCCESS
def __set_frequency_on_naples(): result = api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for naples_host in api.GetNaplesHostnames(): api.CopyToNaples(naples_host, [__get_local_file_fullpath()], "") api.Trigger_AddNaplesCommand(req, naples_host, "mv /%s %s" % (__get_local_file_name(), __get_naples_file_path())) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code !=0 : result = api.types.status.FAILURE return result
def HitlessAddUpgTestAppToJson(node, naples_json_dir): file = 'upgrade_hitless.json' tmp_dir = f"/tmp/{node}/" # remove already existing one if there are any if os.path.exists(f"{tmp_dir}/{file}"): os.remove(f"{tmp_dir}/{file}") # copy hitless.json from naples api.CopyFromNaples(node, [f"{naples_json_dir}/{file}"], f"{tmp_dir}", via_oob=True) if api.GlobalOptions.dryrun: return api.types.status.SUCCESS if not os.path.exists(f"{tmp_dir}/{file}"): api.Logger.error(f"Upgrade json for {node} not found @ {file}") return api.types.status.FAILURE # delete from the host req = api.Trigger_CreateAllParallelCommandsRequest() api.Trigger_AddHostCommand(req, node, f"rm -f {file}") resp = api.Trigger(req) file = f"{tmp_dir}/{file}" cmd = "cp %s %s.org && " % (file, file) # add upgtestapp to the discovery and serial list cmd = cmd + '''awk ' BEGIN { found=0;line=0 } /"upg_svc"/ { found=1;line=0 } /.*/ { if (found == 1 && line == 1) print " \\\"upgtestapp\\\"," } /.*/ { print $0;line=line+1 } ' %s.org > %s && ''' % (file, file) cmd = cmd + '''sed -i 's/"svc_sequence" : "\([a-z:].*\)"/"svc_sequence" : "\\1:upgtestapp"/' "%s"''' % ( file) rv = subprocess.call(cmd, shell=True) if rv != 0: api.Logger.error(f"Upgrade hitless json modify {cmd} failed") return api.types.status.FAILURE # copy the modified json back resp = api.CopyToNaples(node, [file], "", naples_dir=f"{naples_json_dir}", via_oob=True) if resp.api_response.api_status != types_pb2.API_STATUS_OK: return api.types.status.FAILURE return api.types.status.SUCCESS
def trigger_copy(tc): result = delete_file(tc) if result != True: api.Logger.error("Failed to delete %s before copy" % (DST_FILE)) return result req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for node in tc.nodes: api.CopyToNaples(node, [SRC_FILE], "", via_oob=True) api.Trigger_AddNaplesCommand(req, node, tc.move_cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to copy %s" % (DST_FILE)) result = False return result
def installNaplesFwLatestImage(node, img): fullpath = api.GetTopDir() + '/nic/' + img api.Logger.info("fullpath for upg image: " + fullpath) resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE resp = api.CopyToNaples(node, [fullpath], "", naples_dir="/update") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(step): if GlobalOptions.skip_setup: return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.ChangeDirectory("iperf") for naples_host in api.GetNaplesHostnames(): api.CopyToNaples(naples_host, [source_file], "") api.Trigger_AddNaplesCommand(req, naples_host, "mv /arm-iperf /usr/bin/iperf") resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE return api.types.status.SUCCESS
def showCurrentTimeForValidation(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) pytime = os.path.dirname(os.path.realpath(__file__)) + '/' + "pytime.py" f = open(pytime, "w") f.write("#! /usr/bin/python3\n") f.write("import time\n") f.write("print(time.clock_gettime(time.CLOCK_GETTIME))") f.close() resp = api.CopyToNaples(tc.naples, [pytime], "", naples_dir="/data/") if resp is None: return api.types.status.FAILURE cmd = "chmod +x /data/pytime.py; cd /data" add_naples_command(req, tc.naples, cmd) cmd = "./pytime.py" add_naples_command(req, tc.naples, cmd) timestamp = api.Trigger(req) os.remove(pytime) return timestamp
def Setup(tc): # get node info tc.bitw_node_name = None tc.wl_node_name = None # Assuming only one bitw node and one workload node nics = store.GetTopology().GetNicsByPipeline("athena") for nic in nics: tc.bitw_node_name = nic.GetNodeName() break api.SetTestsuiteAttr("bitw_node_name", tc.bitw_node_name) workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.wl_node_name = workloads[0].node_name api.SetTestsuiteAttr("wl_node_name", tc.wl_node_name) host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE tc.wl = [] for wl in workloads: tc.wl.append(wl) api.Logger.info("wl: vlan: {}, mac: {}, ip: {}".format( wl.uplink_vlan, wl.mac_address, wl.ip_address)) tc.intfs = [] tc.intfs.append({ 'name': 'inb_mnic0', 'ip': str(tc.wl[0].ip_address), 'sub_ip': str(tc.wl[2].ip_address), 'vlan': str(tc.wl[2].uplink_vlan) }) tc.intfs.append({ 'name': 'inb_mnic1', 'ip': str(tc.wl[1].ip_address), 'sub_ip': str(tc.wl[3].ip_address), 'vlan': str(tc.wl[3].uplink_vlan) }) api.SetTestsuiteAttr("inb_mnic_intfs", tc.intfs) # copy device_bootstrap.json to naples bootstrap_json_fname = api.GetTopDir( ) + '/nic/conf/athena/device_bootstrap.json' api.CopyToNaples(tc.bitw_node_name, [bootstrap_json_fname], "") # write and copy pensando_pre_init.sh to naples f = open('pensando_pre_init.sh', "w") f.write('echo "copying device.json"\n') f.write('cp /data/device_bootstrap.json /nic/conf/device.json\n') f.close() api.CopyToNaples(tc.bitw_node_name, ['pensando_pre_init.sh'], "") os.remove('pensando_pre_init.sh') # move pensando_pre_init.sh to /sysconfig/config0/ and restart Athena Node req = api.Trigger_CreateExecuteCommandsRequest() cmd = "mv /pensando_pre_init.sh /sysconfig/config0/" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "mv /device_bootstrap.json /data/" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Bootstrap setup failed on node %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # reboot the node api.Logger.info("Rebooting {}".format(tc.bitw_node_name)) return api.RestartNodes([tc.bitw_node_name], 'reboot')
def LoadFirmware(nodes, node_os, target_version): if target_version != 'latest': resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return resp manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') image_manifest = parser.JsonParse(manifest_file) fw_images = list(filter(lambda x: x.naples_type == "capri", image_manifest.Firmwares))[0] if fw_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE api.Logger.info("Fullpath for firmware image to load: %s " % fw_images.image) if node_os == OS_TYPE_LINUX: fw_version, _ = GetLinuxFwDriverVersion(node) else: fw_version = None fwImgFile = os.path.join(GlobalOptions.topdir, fw_images.image) for node in nodes: if fw_version == '1.1.1-E-15': # Naples with Fw 1.1.1-E-15 has no OOB and IntMgmt Ip is fixed resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data", nic_mgmt_ip=GetNicIntMgmtIP(node)) if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) return api.types.status.FAILURE else: resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=True, naples_dir="data") if not api.IsApiResponseOk(resp): api.Logger.info("Failed to copy naples_fw.tar with via_oob=True") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to copy %s naples_fw.tar via_oob=True" % image_manifest.Version) # Try with via_oob=False resp = api.CopyToNaples(node, [fwImgFile], image_manifest.Version, via_oob=False, naples_dir="data") if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to copy naples_fw.tar to target naples") return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddNaplesCommand(req, node, "sync") api.Trigger_AddNaplesCommand(req, node, "/nic/tools/sysupdate.sh -p /data/naples_fw.tar", timeout=120) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("sysupdate.sh cmd failed") return api.types.status.FAILURE api.RestartNodes([node]) if node_os == OS_TYPE_LINUX: return LinuxReInitMgmtIP(node) elif node_os == OS_TYPE_ESX: return ESXiReInitMgmtIP(node) return api.types.status.SUCCESS
def startTestUpgApp(node, param): stopTestUpgApp(node, False) #/data/sysmgr.json data = [{ "name": "{}".format(UPGRADE_TEST_APP), "command": "/update/{} {}".format(UPGRADE_TEST_APP + ".bin", param), "dependencies": [{ "kind": "service", "service-name": "delphi" }, { "kind": "service", "service-name": "hal" }], "flags": ["save_stdout_on_crash"], "timeout": 3000.0 }] # Write JSON file with open("sysmgr.json", "w") as write_file: json.dump(data, write_file) # Read JSON file with open('sysmgr.json') as data_file: data_loaded = json.load(data_file) print(data == data_loaded) fullpath = api.GetTopDir() + '/iota/sysmgr.json' api.Logger.info("fullpath for binary: " + fullpath) resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE resp = api.CopyToNaples(node, [fullpath], "", naples_dir="/data/") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy Drivers to Node: %s" % node) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand( req, node, "LD_LIBRARY_PATH=/platform/lib:/nic/lib /update/{} {}".format( UPGRADE_TEST_APP + ".bin", param), background=True) resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("Creating core failed {}".format( cmd_resp.command)) return api.types.status.FAILURE return api.types.status.SUCCESS
def gen_plcy_cfg_e2e_wl_topo(tc): api.SetTestsuiteAttr("node1_dp_policy_json_path", api.GetTopDir() + \ E2E_NODE1_DP_PLCY_JSON_PATH) api.SetTestsuiteAttr("node2_dp_policy_json_path", api.GetTopDir() + \ E2E_NODE2_DP_PLCY_JSON_PATH) node1_dp_plcy_json_path = api.GetTestsuiteAttr("node1_dp_policy_json_path") node2_dp_plcy_json_path = api.GetTestsuiteAttr("node2_dp_policy_json_path") # Get list of workloads for nodes nodes = [pair[0] for pair in tc.wl_node_nic_pairs] workloads_node1 = api.GetWorkloads(nodes[0]) workloads_node2 = api.GetWorkloads(nodes[1]) # Read template policy.json file t_plcy_obj = None with open(tc.template_policy_json_path) as fd: t_plcy_obj = json.load(fd) t_vnics = t_plcy_obj['vnic'] n1_plcy_obj = deepcopy(t_plcy_obj) n2_plcy_obj = deepcopy(t_plcy_obj) for idx, t_vnic in enumerate(t_vnics): # Use workloads on up0 for node1 and use workloads # on up1 for node2 since they match switch vlan config node1_wl = workloads_node1[utils.get_wl_idx(0, idx+1)] node2_wl = workloads_node2[utils.get_wl_idx(1, idx+1)] #TODO: tmp fix. Need infra query api # total vlans = 36, so add 12 for vlan in 2nd grp tc.encap_vlan_id = node1_wl.uplink_vlan + 12 api.Logger.info("idx %s vnic: encap vlan %s" % ( idx, tc.encap_vlan_id)) node1_up0_mac = node1_wl.mac_address node2_up1_mac = node2_wl.mac_address for node in nodes: if node == 'node1': vnic = n1_plcy_obj['vnic'][idx] else: vnic = n2_plcy_obj['vnic'][idx] vnic_id = vnic['vnic_id'] api.Logger.info('Setup policy.json file for No.%s vnic ' 'on node %s' % (vnic_id, node)) vlan_id, host_mac = None, None src_slot_id, dst_slot_id = None, None if node == 'node1': vlan_id = node1_wl.uplink_vlan src_slot_id = _get_slot_id('node1', int(vnic_id)) dst_slot_id = _get_slot_id('node2', int(vnic_id)) host_mac = node1_up0_mac else: vlan_id = node2_wl.uplink_vlan src_slot_id = _get_slot_id('node2', int(vnic_id)) dst_slot_id = _get_slot_id('node1', int(vnic_id)) host_mac = node2_up1_mac api.Logger.info("%s workload for vnic %s: vlan %s, " "host mac %s" % (node, vnic_id, vlan_id, host_mac)) # these keys need to be changed for both L2 and L3 with or without NAT. vnic['vlan_id'] = str(vlan_id) vnic['slot_id'] = str(src_slot_id) vnic['session']['to_switch']['host_mac'] = str(host_mac) vnic['rewrite_underlay']['vlan_id'] = str(tc.encap_vlan_id) if vnic['rewrite_underlay']['type'] == 'mplsoudp': vnic['rewrite_underlay']['mpls_label2'] = str(dst_slot_id) elif vnic['rewrite_underlay']['type'] == 'geneve': vnic['rewrite_underlay']['dst_slot_id'] = str(dst_slot_id) # only applicable to L3 vnics if not utils.is_L2_vnic(vnic): if node == 'node1': vnic['rewrite_host']['smac'] = str(node2_up1_mac) vnic['rewrite_host']['dmac'] = str(node1_up0_mac) else: vnic['rewrite_host']['smac'] = str(node1_up0_mac) vnic['rewrite_host']['dmac'] = str(node2_up1_mac) # only applicable to L2 vnics if utils.is_L2_vnic(vnic): if node == 'node1': vnic['l2_flows_range']['h2s_mac_lo'] = str(node2_up1_mac) vnic['l2_flows_range']['h2s_mac_hi'] = str(node2_up1_mac) vnic['l2_flows_range']['s2h_mac_lo'] = str(node1_up0_mac) vnic['l2_flows_range']['s2h_mac_hi'] = str(node1_up0_mac) else: vnic['l2_flows_range']['h2s_mac_lo'] = str(node1_up0_mac) vnic['l2_flows_range']['h2s_mac_hi'] = str(node1_up0_mac) vnic['l2_flows_range']['s2h_mac_lo'] = str(node2_up1_mac) vnic['l2_flows_range']['s2h_mac_hi'] = str(node2_up1_mac) # write modified plcy objects to file with open(node1_dp_plcy_json_path, 'w+') as fd: json.dump(n1_plcy_obj, fd, indent=4) with open(node2_dp_plcy_json_path, 'w+') as fd: json.dump(n2_plcy_obj, fd, indent=4) # copy both policy.json files to respective nodes tmp_plcy_json_path = api.GetTopDir() + DP_PLCY_JSON_PATH node, nic = tc.athena_node_nic_pairs[0] copyfile(node1_dp_plcy_json_path, tmp_plcy_json_path) api.CopyToNaples(node, [tmp_plcy_json_path], "", nic) node, nic = tc.athena_node_nic_pairs[1] copyfile(node2_dp_plcy_json_path, tmp_plcy_json_path) api.CopyToNaples(node, [tmp_plcy_json_path], "", nic) os.remove(tmp_plcy_json_path)
def Setup(tc): tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.intfs = api.GetTestsuiteAttr("inb_mnic_intfs") tc.nodes = api.GetNaplesHostnames() # copy device.json to naples device_json_fname = api.GetTopDir() + '/nic/conf/athena/device.json' api.CopyToNaples(tc.bitw_node_name, [device_json_fname], "") # copy plugctl.sh to host plugctl_fname = api.GetTopDir( ) + '/iota/test/athena/testcases/networking/scripts/plugctl.sh' api.CopyToHost(tc.bitw_node_name, [plugctl_fname], "") # get the IP address of int_mnic and store it req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.int_mnic_ip = None cmd = "ifconfig int_mnic0 | grep inet | cut -d ':' -f 2 | cut -d ' ' -f 1" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to get int_mnic0 IP on node %s" % \ tc.bitw_node_name) return api.types.status.FAILURE else: tc.int_mnic_ip = str(cmd.stdout) # delete pensando_pre_init.sh req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "cd /sysconfig/config0 && touch pensando_pre_init.sh && rm pensando_pre_init.sh" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) # bring down linux interfaces for intf in tc.intfs: # unconfigure inb_mnic0 and inb_mnic1 ip_addr = str(ip_address(intf['ip']) + 1) utils.configureNaplesIntf(req, tc.bitw_node_name, intf['name'], ip_addr, '24', vlan=intf['vlan'], unconfig=True) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to bring down linux interfaces on node %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # unconfigure int_mnic0 cmd = "ifconfig int_mnic0 down && ip addr del " + tc.int_mnic_ip resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # unload drivers cmd = "rmmod mnet && rmmod mnet_uio_pdrv_genirq && rmmod ionic_mnic" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # run plugctl to gracefully bring down the PCI device on host req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "./plugctl.sh out" api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to gracefully bring down the PCI device on host %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # kill athena primary app cmd = "pkill athena_app" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) return api.types.status.SUCCESS
def gen_plcy_cfg_local_wl_topo(tc): api.SetTestsuiteAttr("dp_policy_json_path", api.GetTopDir() + \ DP_PLCY_JSON_PATH) tc.dp_policy_json_path = api.GetTestsuiteAttr("dp_policy_json_path") tc.skip_flow_log_vnics = getattr(tc.args, "skip_flow_log_vnics", []) # Read template policy.json file plcy_obj = None with open(tc.template_policy_json_path) as fd: plcy_obj = json.load(fd) workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE tc.host_ifs[(tc.wl_node_name, tc.classic_nic)] = host_intfs up0_intf = host_intfs[0] up1_intf = host_intfs[1] vnics = plcy_obj['vnic'] for idx, vnic in enumerate(vnics): vnic_id = vnic['vnic_id'] # vnic_type has 2 options: L2 or L3 tc.vnic_type = 'L2' if "vnic_type" in vnic and vnic['vnic_type'] == 'L2' else 'L3' tc.nat = 'yes' if "nat" in vnic else 'no' api.Logger.info('Setup policy.json file for No.%s vnic' % (vnic_id)) up0_vlan, up1_vlan = None, None up0_mac, up1_mac = None, None mac_lo = 'ff:ff:ff:ff:ff:ff' mac_hi = '00:00:00:00:00:00' wl_up0_idx = utils.get_wl_idx(0, idx+1) wl_up1_idx = utils.get_wl_idx(1, idx+1) wl_up0 = workloads[wl_up0_idx] wl_up1 = workloads[wl_up1_idx] if wl_up0.parent_interface == up0_intf: up0_vlan = wl_up0.uplink_vlan up0_mac = wl_up0.mac_address else: api.Logger.error('The interface order prediction is wrong') if wl_up1.parent_interface == up1_intf: up1_vlan = wl_up1.uplink_vlan up1_mac = wl_up1.mac_address else: api.Logger.error('The interface order prediction is wrong') if not up0_mac or not up1_mac: api.Logger.error('Failed to get workload sub-intf mac addresses') return api.types.status.FAILURE if not up0_vlan or not up1_vlan: api.Logger.error('Failed to get workload sub-intf vlan value') return api.types.status.FAILURE mac_lo = min(mac_lo, up0_mac, up1_mac) mac_hi = max(mac_hi, up0_mac, up1_mac) api.Logger.info('Workload0: up0_intf %s up0_vlan %s up0_mac %s' % ( up0_intf, up0_vlan, up0_mac)) api.Logger.info('Workload1: up1_intf %s up1_vlan %s up1_mac %s' % ( up1_intf, up1_vlan, up1_mac)) api.Logger.info('mac_lo %s mac_hi %s' % (mac_lo, mac_hi)) # these keys need to be changed for both L2 and L3 with or without NAT. vnic['vlan_id'] = str(up1_vlan) vnic['rewrite_underlay']['vlan_id'] = str(up0_vlan) vnic['session']['to_switch']['host_mac'] = str(up1_mac) vnic['rewrite_underlay']['dmac'] = str(up0_mac) # these fields need to be changed only for L3 if tc.vnic_type == 'L3': vnic['rewrite_host']['dmac'] = str(up1_mac) # only applicable to L2 vnics if tc.vnic_type == 'L2': vnic['l2_flows_range']['h2s_mac_lo'] = str(mac_lo) vnic['l2_flows_range']['h2s_mac_hi'] = str(mac_hi) vnic['l2_flows_range']['s2h_mac_lo'] = str(mac_lo) vnic['l2_flows_range']['s2h_mac_hi'] = str(mac_hi) # Set skip_flow_log if vnic is part of the skip_flow_log_vnics if int(vnic_id) in tc.skip_flow_log_vnics: api.Logger.info('Setting skip_flow_log for vnic %d' % ( int(vnic_id))) vnic['session']['skip_flow_log'] = "true" # write vlan/mac addr and flow info to actual file with open(tc.dp_policy_json_path, 'w+') as fd: json.dump(plcy_obj, fd, indent=4) # copy policy.json file to node api.CopyToNaples(tc.bitw_node_name, [tc.dp_policy_json_path], "")
def CopyMemStatsTool(self): api.CopyToNaples(self.node, [MemStatsToolHostPath], "", naples_dir=MemStatsToolNaplesPath) api.Logger.info("Successfully copied ps_mem.py tool to naples: %s"%self.node) return api.types.status.SUCCESS
def Main(step): if GlobalOptions.skip_setup: #No mode switch required for skeip setup return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) uuidMap = api.GetNaplesNodeUuidMap() nodes = api.GetNaplesHostnames() for n in nodes: # Touch a file to indicate to NMD that the current mode is emulation cmd = "touch /data/iota-emulation" api.Trigger_AddNaplesCommand(req, n, cmd) # Make sure console is enabled CreateConfigConsoleNoAuth() api.CopyToNaples(n, [NAPLES_CONFIG_SPEC_LOCAL], "") cmd = "mv /system-config.json /sysconfig/config0/system-config.json" api.Trigger_AddNaplesCommand(req, n, cmd) if common.PenctlGetModeStatus(n) != "NETWORK" or common.PenctlGetTransitionPhaseStatus(n) != "VENICE_REGISTRATION_DONE": api.Logger.info("Host [{}] is in HOST mode. Initiating mode change.".format(n)) ret = common.SetNaplesModeOOB_Static(n, "1.1.1.1", "1.1.1.2/24") if ret == None: return api.types.status.FAILURE #hack for now, need to set date cmd = "date -s '{}'".format(datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) api.Trigger_AddNaplesCommand(req, n, cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE num_retries = 60 reboot_nodes = [] while nodes: req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for n in nodes: #hack for now, need to set date api.Logger.info("Checking Transition phase for node : %s" % n) check_state_cmd = "show naples --json" time.sleep(30) common.AddPenctlCommand(req, n, check_state_cmd) api.Trigger_AddNaplesCommand(req, n, "touch /data/no_watchdog") resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE try: out = json.loads(cmd.stdout) except: api.Logger.error("Penctl output not in Json format {}".format(cmd.stdout)) return api.types.status.FAILURE if not hal_show_utils.IsNaplesForwardingModeClassic(n): api.Logger.info("Dataplane already in HOSTPIN mode. Skipping node [{}] for reboot.".format(n)) reboot_nodes.append(n) nodes.remove(n) elif out["status"]["transition-phase"] == "VENICE_UNREACHABLE": api.Logger.info("Reboot pending on node : %s" % n) reboot_nodes.append(n) nodes.remove(n) elif out["status"]["transition-phase"] == "VENICE_REGISTRATION_DONE": api.Logger.info("Node already transitioned : %s" % n) nodes.remove(n) else: api.Logger.info("Reboot not pending on node : %s" % n) time.sleep(1) num_retries = num_retries - 1 if num_retries == 0: api.Logger.error("Reboot pending state not transitioned complete on naples") return api.types.status.FAILURE ret = api.RestartNodes(reboot_nodes) if ret != api.types.status.SUCCESS: api.Logger.error("Node restart failed") return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest(serial = True) #enable_sshd = "system enable-sshd" #copy_key = "update ssh-pub-key -f ~/.ssh/id_rsa.pub" for n in api.GetNaplesHostnames(): #hack for now, need to set date cmd = "date -s '{}'".format(datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")) api.Trigger_AddNaplesCommand(req, n, cmd) #common.AddPenctlCommand(req, n, enable_sshd) #common.AddPenctlCommand(req, n, copy_key) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE #Give some time for naples to admit to venice after mode switch #This is required for now as Iota is setting time manually to make TLS happy time.sleep(30) #Check whether naples has switch mode succesfully req = api.Trigger_CreateExecuteCommandsRequest(serial = False) for n in nodes: cmd = "cat /sysconfig/config0/app-start.conf | grep hostpin" api.Trigger_AddNaplesCommand(req, n, cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Mode switch failed on node : {}".format(cmd.node_name)) result = api.types.status.FAILURE #Verify Agent in right mode. req = api.Trigger_CreateExecuteCommandsRequest(serial = False) for n in nodes: cmd = "curl localhost:8888/api/system/info/" api.Trigger_AddNaplesCommand(req, n, cmd) # Delete the iota-emulation file created earlied for NMD. Mode change would have passed by now if it had to. cmd = "rm -f /data/iota-emulation" api.Trigger_AddNaplesCommand(req, n, cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Agent system get failed : {}".format(cmd.node_name)) result = api.types.status.FAILURE out = None try: out = json.loads(cmd.stdout) except: api.Logger.error("Agent System get out failed {}".format(cmd.stdout)) return api.types.status.FAILURE if out["naples-mode"] != "NETWORK_MANAGED_OOB": api.Logger.error("Agent not in correct mode: {} {} ".format(cmd.node_name, out["naples-mode"])) return api.types.status.FAILURE api.Logger.info("Trying to re-enable ssh on naples nodes") return enable_ssh.Main(None)