def Trigger(tc): result = api.types.status.SUCCESS mirrorPolicies = utils.GetTargetJsons('mirror', "scale") flowmonPolicies = utils.GetTargetJsons('flowmon', "scale") #colPolicies = utils.GetTargetJsons('mirror', "collector") iters = getattr(tc.args, "iters", 10) iters = 1 mpObjs = fpObjs = [] for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies): for i in range(iters): # # Push Mirror Session and Flow Export objects # mpObjs = agent_api.AddOneConfig(mp_json) fpObjs = agent_api.AddOneConfig(fp_json) ret = agent_api.PushConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to push the telemetry objects") return api.types.status.FAILURE # # Update Mirror Session and Flow Export objects # mpObjs = UpdateMirrorSessionObjects(mpObjs) fpObjs = UpdateFlowMonitorObjects(fpObjs) ret = agent_api.UpdateConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to update the telemetry objects") return api.types.status.FAILURE # # Delete Mirror Session and Flow Export objects # ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to delete the telemetry objects") return api.types.status.FAILURE ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to remove the telemetry objects") return api.types.status.FAILURE return result
def Setup(tc): tc.skip_flap = False tc.newObjects = None tc.collector_ip = [] tc.collector_wl = [] tc.collector_type = [] tc.wl_sec_ip_info = defaultdict(lambda: dict()) tc.IsBareMetal = utils.IsBareMetal() tc.port_down_time = getattr(tc.args, "port_down_time", 60) policies = utils.GetTargetJsons('mirror', tc.iterators.proto) policy_json = policies[0] tc.verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) # Push Mirror objects tc.newObjects = agent_api.AddOneConfig(policy_json) ret = agent_api.PushConfigObjects(tc.newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") tc.newObjects = None return api.types.status.FAILURE # Populate secondary IP utils.PopulateSecondaryAddress(tc) # Get collector ret = GetCollectorWorkloadFromObjects(tc) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to get collector workload") return ret ret = utils.DetectUpLinkState(api.GetNaplesHostnames(), utils.PORT_OPER_STATUS_UP, all) if ret != api.types.status.SUCCESS: api.Logger.error("All uplink on Nodes are not in UP state.") tc.skip_flap = True return api.types.status.SUCCESS if api.GetConfigNicMode() in ["classic", "unified"]: api.Logger.info( f"NIC mode: {api.GetConfigNicMode()}, Skipping uplink flap") tc.skip_flap = True api.Logger.info("All uplink on Nodes are UP!") # Bring up inband and reset the active link on bond. ret = utils.SetupInbandInterface() if ret != api.types.status.SUCCESS: return ret return api.types.status.SUCCESS
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE protoDir1 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', 'lif') api.Logger.info("Template Config files location: ", protoDir1) protoDir2 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir2) protoDir3 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir3) result = api.types.status.SUCCESS count = 0 MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto) FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto) LifPolicies = utils.GetTargetJsons('mirror', 'lif') flowmon_policy_idx = 0 ret_count = 0 for mirror_json in MirrorPolicies: # # Get template-Mirror Config # newMirrorObjects = agent_api.AddOneConfig(mirror_json) if len(newMirrorObjects) == 0: api.Logger.error("Adding new Mirror objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newMirrorObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newMirrorObjects[0].spec.collectors) > 1: continue idx = 0 for flowmon_json in FlowMonPolicies: if idx < flowmon_policy_idx: idx += 1 continue # # Get template-FlowMon Config # newFlowMonObjects = agent_api.AddOneConfig(flowmon_json) if len(newFlowMonObjects) == 0: api.Logger.error("Adding new FlowMon objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newFlowMonObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newFlowMonObjects[0].spec.exports) > 1: flowmon_policy_idx += 1 idx += 1 continue # # Modify template-Mirror / template-FlowMon Config to make sure # that Naples-node # act as either source or destination # # Set up Collector in the remote node # eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects) eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects) ret_count = 0 for i in range(0, len(tc.mirror_verif)): # # If Execution-Optimization is enabled, no need to run the # test for the same protocol more than once # if i > 0 and tc.mirror_verif[i]['protocol'] ==\ tc.mirror_verif[i-1]['protocol']: continue # # Flow-ERSPAN for TCP-traffic is not tested (yet) in # Classic-mode until applicable pkt-trigger tools are # identified # if tc.classic_mode == True and\ tc.mirror_verif[i]['protocol'] == 'tcp': continue for policy_json in LifPolicies: # # Get template-Mirror Config # newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE # # Modify template-Mirror Config to make sure that # Naples-node act as either source or destination # # Set up Collector in the remote node # if newObjects[0].kind == 'InterfaceMirrorSession': tc.lif_collector_objects = newObjects agent_api.RemoveConfigObjects(tc.lif_collector_objects) elif newObjects[0].kind == 'Interface': tc.interface_objects = newObjects agent_api.RemoveConfigObjects(tc.interface_objects) # # Push Collector Config to Naples # colObjects = tc.lif_collector_objects ret = eutils.generateLifCollectorConfig(tc, colObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to identify Collector Workload") tc.error = True return api.types.status.FAILURE ret = agent_api.PushConfigObjects(colObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push collector objects") tc.error = True return api.types.status.FAILURE # # Push Mirror / FlowMon Config to Naples # ret = agent_api.PushConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to push mirror objects") tc.error = True return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to push flowmon objects") tc.error = True return api.types.status.FAILURE # # Update Interface objects # ifObjects = tc.interface_objects ret = eutils.generateLifInterfaceConfig( tc, ifObjects, tc.lif_collector_objects) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error( "Unable to identify Uplink/LIF Interfaces") tc.error = True return api.types.status.FAILURE ret = agent_api.UpdateConfigObjects(ifObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to update interface objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and # Collectors # eutils.establishForwardingSetup(tc) # # Give a little time for flows clean-up to happen so that # stale IPFIX records don't show up # if tc.classic_mode == True: time.sleep(1) if tc.collection == 'distinct': req_tcpdump_flow_erspan = \ api.Trigger_CreateExecuteCommandsRequest(serial = True) for c in range(0, len(tc.flow_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flow_collector_idx[c] if tc.flow_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto\ gre and dst {} --immediate-mode -U\ -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip\ proto gre and dst {} --immediate-mode -U\ -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_flow_erspan, tc.flow_collector[c], cmd, True) resp_tcpdump_flow_erspan = api.Trigger(\ req_tcpdump_flow_erspan) for cmd in resp_tcpdump_flow_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(1) req_tcpdump_lif_erspan = \ api.Trigger_CreateExecuteCommandsRequest(serial = True) req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.lif_collector)): # # Set up TCPDUMP's on the collector # idx = tc.lif_collector_idx[c] if tc.lif_collector[c].IsNaples(): cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre\ and dst {} --immediate-mode -U\ -w lif-mirror-{}.pcap"\ .format(tc.lif_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto\ gre and dst {} --immediate-mode -U\ -w lif-mirror-{}.pcap"\ .format(tc.lif_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_lif_erspan, tc.lif_collector[c], cmd, True) for c in range(0, len(tc.flowmon_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flowmon_collector_idx[c] if tc.flowmon_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\ dst port {} and dst {} --immediate-mode\ -U -w flowmon-{}.pcap"\ .format(tc.flowmon_collector[c].interface, tc.export_port[c], tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp\ and dst port {} and dst {}\ --immediate-mode -U -w flowmon-{}.pcap"\ .format(tc.flowmon_collector[c].interface, tc.export_port[c], tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_flowmon, tc.flowmon_collector[c], cmd, True) resp_tcpdump_lif_erspan = api.Trigger(\ req_tcpdump_lif_erspan) for cmd in resp_tcpdump_lif_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(2) resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon) for cmd in resp_tcpdump_flowmon.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN / FLOWMON to take effect # tc.protocol = tc.mirror_verif[i]['protocol'] tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port']) protocol = tc.protocol tc.protocol = 'all' if api.GetNodeOs(tc.naples.node_name) == 'linux': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) tc.protocol = protocol # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.lif_collector, tc.lif_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_lif_erspan = \ api.Trigger_TerminateAllCommands(resp_tcpdump_lif_erspan) tc.resp_tcpdump_lif_erspan = \ api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_lif_erspan, term_resp_tcpdump_lif_erspan) if tc.collection == 'distinct': term_resp_tcpdump_flow_erspan = \ api.Trigger_TerminateAllCommands(resp_tcpdump_flow_erspan) tc.resp_tcpdump_flow_erspan = \ api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_flow_erspan, term_resp_tcpdump_flow_erspan) term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\ resp_tcpdump_flowmon) tc.resp_tcpdump_flowmon = \ api.Trigger_AggregateCommandsResponse(resp_tcpdump_flowmon, term_resp_tcpdump_flowmon) # Delete the objects eutils.deGenerateLifInterfaceConfig(tc, tc.interface_objects, tc.lif_collector_objects) agent_api.UpdateConfigObjects(tc.interface_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) # # Validate ERSPAN packets reception # tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION tc.udp_erspan_pkts_expected = (tc.udp_count << 1) tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\ (tc.icmp_count << 1) if tc.iterators.direction != 'both': tc.tcp_erspan_pkts_expected >>= 1 tc.udp_erspan_pkts_expected >>= 1 tc.icmp_erspan_pkts_expected >>= 1 if tc.dupcheck == 'disable': tc.tcp_erspan_pkts_expected = \ (tc.tcp_erspan_pkts_expected+1) << 1 tc.udp_erspan_pkts_expected <<= 1 tc.icmp_erspan_pkts_expected <<= 1 # # Adjust Expected-pkt-counts taking into account Flow-ERSPAN # Config's # if tc.collection == 'unified': if (tc.protocol == 'tcp' or tc.iterators.proto == 'mixed')\ and tc.iterators.direction != 'both': tc.tcp_erspan_pkts_expected <<= 1 if (tc.protocol == 'udp' or tc.iterators.proto == 'mixed')\ and tc.iterators.direction != 'both': tc.udp_erspan_pkts_expected <<= 1 if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed': if tc.iterators.direction != 'both': tc.icmp_erspan_pkts_expected <<= 1 #if tc.iterators.direction != 'egress': # tc.icmp_erspan_pkts_expected += 1 protocol = tc.protocol tc.protocol = 'all' tc.feature = 'lif-erspan' tc.resp_tcpdump_erspan = tc.resp_tcpdump_lif_erspan res_1 = eutils.validateErspanPackets(tc, tc.lif_collector, tc.lif_collector_idx) if tc.collection == 'distinct': tc.tcp_erspan_pkts_expected = 0 tc.udp_erspan_pkts_expected = 0 tc.icmp_erspan_pkts_expected = 0 tc.protocol = protocol if tc.protocol == 'tcp' or tc.iterators.proto == 'mixed': tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION if tc.protocol == 'udp' or tc.iterators.proto == 'mixed': tc.udp_erspan_pkts_expected = (tc.udp_count << 1) if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed': tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\ (tc.icmp_count << 1) tc.protocol = 'all' tc.feature = 'flow-erspan' tc.resp_tcpdump_erspan = tc.resp_tcpdump_flow_erspan res_f = eutils.validateErspanPackets( tc, tc.flow_collector, tc.flow_collector_idx) if res_f == api.types.status.FAILURE: result = api.types.status.FAILURE # # Validate IPFIX packets reception # tc.feature = 'flowmon' res_2 = eutils.validateIpFixPackets(tc) tc.protocol = protocol # # Validate Config-cleanup # res_3 = eutils.validateConfigCleanup(tc) if res_1 == api.types.status.FAILURE or\ res_2 == api.types.status.FAILURE or\ res_3 == api.types.status.FAILURE: result = api.types.status.FAILURE if result == api.types.status.FAILURE: break ret_count += 1 flowmon_policy_idx += 1 break if result == api.types.status.FAILURE: break count += ret_count tc.SetTestCount(count) return result
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE tc.resp_tcpdump_erspan = None tc.resp_cleanup = None protoDir = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', 'endpoint') api.Logger.info("Template Config files location: ", protoDir) policies = utils.GetTargetJsons('mirror', 'endpoint') for policy_json in policies: # # Get template-Mirror Config # api.Logger.info("Adding one config object for {}", format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE # # Modify template-Mirror Config to make sure that Naples-node # act as either source or destination # # Set up Collector in the remote node # if newObjects[0].kind == 'InterfaceMirrorSession': tc.ep_collector_objects = newObjects agent_api.RemoveConfigObjects(tc.ep_collector_objects) elif newObjects[0].kind == 'Endpoint': tc.endpoint_objects = newObjects agent_api.RemoveConfigObjects(tc.endpoint_objects) updateEndpointObjectTempl(tc, tc.endpoint_objects, tc.store_endpoint_objects[0]) for i in range(0, len(policies)): # # Push Collector object # if i == 0: colObjects = tc.ep_collector_objects if tc.iterators.session == 'single': ret = eutils.generateEpCollectorConfig(tc, colObjects) else: ret = eutils.generateEpCollectorConfigForMultiMirrorSession( tc, colObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to identify Collector Workload") tc.error = True return api.types.status.FAILURE api.Logger.info("Pushing collector objects") ret = agent_api.PushConfigObjects(colObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push collector objects") tc.error = True return api.types.status.FAILURE continue api.Logger.info("Generating Endpoint objects") cfg_api.PrintConfigsObjects(colObjects) # # Update Endpoint objects # epObjects = tc.endpoint_objects ret = eutils.generateEndpointConfig(tc, epObjects, colObjects) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to identify Endpoints") tc.error = True return api.types.status.FAILURE api.Logger.info("Pushing Endpoint objects") ret = agent_api.UpdateConfigObjects(epObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to update endpoint objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and Collectors # eutils.establishForwardingSetup(tc) req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.ep_collector)): # # Set up TCPDUMP's on the collector # idx = tc.ep_collector_idx[c] if tc.ep_collector[c].IsNaples(): ### TODO - run & revisit for windows case and fix any issues. if api.GetNodeOs(tc.naples.node_name) == "windows": intfGuid = ionic_utils.winIntfGuid( tc.ep_collector[c].node_name, tc.ep_collector[c].interface) intfVal = str( ionic_utils.winTcpDumpIdx(tc.ep_collector[c].node_name, intfGuid)) cmd = "sudo /mnt/c/Windows/System32/tcpdump.exe -c 1000 -XX -vv -i {} ip proto 47 and dst {} -U -w ep-mirror-{}.pcap"\ .format(intfVal, tc.collector_ip_address[idx], c) else: intfVal = tc.ep_collector[c].interface cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre and dst {}\ --immediate-mode -U -w ep-mirror-{}.pcap"\ .format(intfVal, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto gre\ and dst {} --immediate-mode -U -w ep-mirror-{}.pcap"\ .format(tc.ep_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_erspan, tc.ep_collector[c], cmd, True) resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan) for cmd in resp_tcpdump_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP background # process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN to take effect # tc.dest_port = '120' if api.GetNodeOs(tc.naples.node_name) == 'linux' or api.GetNodeOs( tc.naples.node_name) == 'windows': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.ep_collector, tc.ep_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\ resp_tcpdump_erspan) tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_erspan, term_resp_tcpdump_erspan) if api.GetNodeOs(tc.naples.node_name) == "windows": req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = api.WINDOWS_POWERSHELL_CMD + " Stop-Process -Name 'tcpdump' -Force" api.Trigger_AddCommand(req, tc.naples.node_name, tc.naples.workload_name, cmd, background=False) resp = api.Trigger(req) # Delete the objects eutils.deGenerateEndpointConfig(tc, tc.endpoint_objects, tc.ep_collector_objects) agent_api.UpdateConfigObjects(tc.endpoint_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) return api.types.status.SUCCESS
def Trigger(tc): #if tc.skip: return api.types.status.SUCCESS policies = utils.GetTargetJsons('mirror', tc.iterators.proto) result = api.types.status.SUCCESS count = 0 ret_count = 0 collector_dest = [] collector_wl = [] collector_type = [] for policy_json in policies: collector_dest.clear() collector_wl.clear() collector_type.clear() verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) api.Logger.info("Using policy_json = {}".format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len (newObjects) == 0: api.Logger.error("Adding new objects to store failed") return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") return api.types.status.FAILURE utils.DumpMirrorSessions() # Get collector to find the workload for obj in newObjects: for obj_collector in obj.spec.collectors: coll_dst = obj_collector.export_config.destination coll_type = obj_collector.type collector_dest.append(coll_dst) collector_type.append(coll_type) api.Logger.info(f"export-dest: {coll_dst}, erspan-type: {coll_type}") for coll_dst in collector_dest: for wl in tc.workloads: if (wl.ip_address == coll_dst) or (coll_dst in wl_sec_ip_info[wl.workload_name]): collector_wl.append(wl) api.Logger.info("collect_dest len: {} collect_wl len: {}".format(len(collector_dest), len(collector_wl))) collector_info = utils.GetMirrorCollectorsInfo(collector_wl, collector_dest, collector_type) ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json)) # Delete the objects agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) break # Update collector newObjects = agent_api.QueryConfigs(kind='MirrorSession') # mirror config update to local collector is applicable only for ESX topology if is_wl_type_bm is False: for obj in newObjects: if obj.spec.collectors[0].type == utils.ERSPAN_TYPE_2: obj.spec.collectors[0].type = utils.ERSPAN_TYPE_3 collector_info[0]['type'] = utils.ERSPAN_TYPE_3 else: obj.spec.collectors[0].type = utils.ERSPAN_TYPE_2 collector_info[0]['type'] = utils.ERSPAN_TYPE_2 break # Now push the update as we modified agent_api.UpdateConfigObjects(newObjects) utils.DumpMirrorSessions() # Rerun the tests ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm) result = ret['res'] ret_count = ret['count'] count = count + ret_count # Delete the objects agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) api.Logger.info("policy_json = {}, count = {}, total_count = {}".format(policy_json, ret_count, count)) if result != api.types.status.SUCCESS: api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json)) break tc.SetTestCount(count) collector_dest.clear() collector_wl.clear() return result
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE protoDir = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir) result = api.types.status.SUCCESS count = 0 policies = utils.GetTargetJsons('mirror', tc.iterators.proto) for policy_json in policies: # # Get template-Mirror Config # newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newObjects[0].spec.collectors) > 1: continue # # Modify template-Mirror Config to make sure that Naples-node # act as either source or destination # # Set up Collector in the remote node # eutils.generateMirrorConfig(tc, policy_json, newObjects) ret_count = 0 for i in range(0, len(tc.mirror_verif)): # # If Execution-Optimization is enabled, no need to run the test # for the same protocol more than once # if i > 0 and tc.mirror_verif[i]['protocol'] ==\ tc.mirror_verif[i-1]['protocol']: continue # # Flow-ERSPAN for TCP-traffic is not tested (yet) in # Classic-mode until applicable pkt-trigger tools are identified # if tc.classic_mode == True and\ tc.mirror_verif[i]['protocol'] == 'tcp': continue # # Push Mirror Config to Naples # ret = agent_api.PushConfigObjects(newObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and Collectors # eutils.establishForwardingSetup(tc) req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.flow_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flow_collector_idx[c] if tc.flow_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto gre and\ dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip proto gre and\ dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_erspan, tc.flow_collector[c], cmd, True) resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan) for cmd in resp_tcpdump_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP background # process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN to take effect # tc.protocol = tc.mirror_verif[i]['protocol'] tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port']) if api.GetNodeOs(tc.naples.node_name) == 'linux': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.flow_collector, tc.flow_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\ resp_tcpdump_erspan) tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_erspan, term_resp_tcpdump_erspan) # Delete the objects agent_api.DeleteConfigObjects(newObjects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) # # Validate ERSPAN packets reception # tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION tc.udp_erspan_pkts_expected = (tc.udp_count << 1) tc.icmp_erspan_pkts_expected = (tc.icmp_count << 1) if tc.protocol == 'udp' and tc.iterators.proto == 'mixed': tc.protocol = 'udp-mixed' tc.icmp_erspan_pkts_expected = tc.udp_erspan_pkts_expected res_1 = eutils.validateErspanPackets(tc, tc.flow_collector, tc.flow_collector_idx) # # Validate Config-cleanup # res_2 = eutils.validateConfigCleanup(tc) if res_1 == api.types.status.FAILURE or\ res_2 == api.types.status.FAILURE: result = api.types.status.FAILURE if result == api.types.status.FAILURE: break ret_count += 1 if result == api.types.status.FAILURE: break count += ret_count tc.SetTestCount(count) return result
def Trigger(tc): #if tc.skip: return api.types.status.SUCCESS policies = utils.GetTargetJsons('flowmon', tc.iterators.proto) result = api.types.status.SUCCESS count = 0 ret_count = 0 export_cfg = [] collector_wl = [] for policy_json in policies: export_cfg.clear() collector_wl.clear() #pdb.set_trace() verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) api.Logger.info("Using policy_json = {}".format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push flowmon objects") return api.types.status.FAILURE # Get collector to find the workload for obj in newObjects: for obj_export_cfg in obj.spec.exports: export_cfg.append(obj_export_cfg) api.Logger.info("export-dest: {} proto: {} port: {}".format( obj_export_cfg.destination, obj_export_cfg.proto_port.protocol, obj_export_cfg.proto_port.port)) for coll_dst in export_cfg: for wl in tc.workloads: if (wl.ip_address == coll_dst.destination) or ( coll_dst.destination in wl_sec_ip_info[wl.workload_name]): collector_wl.append(wl) api.Logger.info("collect_dest len: {} ".format(len(export_cfg))) api.Logger.info("collect_wl len: {} ".format(len(collector_wl))) collector_info = utils.GetFlowmonCollectorsInfo( collector_wl, export_cfg) utils.DumpFlowmonSessions() ret = utils.RunAll(tc, verif_json, 'flowmon', collector_info, is_wl_type_bm) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) api.Logger.info( "policy_json = {}, count = {}, total_count = {}".format( policy_json, ret_count, count)) if result != api.types.status.SUCCESS: api.Logger.info( "policy_json = {}, Encountered FAILURE, stopping".format( policy_json)) break tc.SetTestCount(count) export_cfg.clear() collector_wl.clear() return result
def Trigger(tc): #if tc.skip: return api.types.status.SUCCESS result = api.types.status.SUCCESS count = 0 policies = utils.GetTargetJsons('flowmon', 'crud') for policy_json in policies: api.Logger.info("Policy File: {}".format(policy_json)) flowmon_spec_objects = agent_api.AddOneConfig(policy_json) if len(flowmon_spec_objects) == 0: api.Logger.info("Policy object len {}".format( len(flowmon_spec_objects))) continue verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) #create flowexport rules with 1 export cfg utils.generateFlowmonCollectorConfig(flowmon_spec_objects, num_exports_at_create) port = random.randint(100, 10000) tc.port = port result = ConfigFlowmonSession(tc, num_exports_at_create, flowmon_spec_objects) if result != api.types.status.SUCCESS: api.Logger.error("Failed in Flowmon session configuration") agent_api.RemoveConfigObjects(flowmon_spec_objects) break api.Logger.info( "Test for FlowMon with {} sessions {} collectors".format( tc.iterators.num_flowmon_sessions, num_exports_at_create)) utils.DumpFlowmonSessions() ret = InjectTestTrafficAndValidateCapture( tc, tc.iterators.num_flowmon_sessions, num_exports_at_create) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() elif tc.iterators.num_exports > num_exports_at_create: #update flowexport sessions with num_exports result = updateFlowmonCollectors(tc, tc.iterators.num_exports) if result != api.types.status.SUCCESS: api.Logger.info("Failed in Flowmon Collector configuration") else: api.Logger.info( "Test for FlowMon with {} sessions {} collectors".format( tc.iterators.num_flowmon_sessions, tc.iterators.num_exports)) utils.DumpFlowmonSessions() ret = InjectTestTrafficAndValidateCapture( tc, tc.iterators.num_flowmon_sessions, tc.iterators.num_exports) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() #remove all but one flowmon session and check the collectors are not deleted if (result == api.types.status.SUCCESS): for iteration in range(1, tc.iterators.num_flowmon_sessions): obj = tc.test_iterator_data[iteration]['del_obj'] agent_api.DeleteConfigObjects(obj) agent_api.RemoveConfigObjects(obj) tc.test_iterator_data[iteration] = {} api.Logger.info( "Test for FlowMon with {} sessions {} collectors".format( 1, tc.iterators.num_exports)) utils.DumpFlowmonSessions() ret = InjectTestTrafficAndValidateCapture(tc, 1, tc.iterators.num_exports) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() for iteration in range(tc.iterators.num_flowmon_sessions): if tc.test_iterator_data[iteration]: obj = tc.test_iterator_data[iteration]['del_obj'] agent_api.DeleteConfigObjects(obj) agent_api.RemoveConfigObjects(obj) tc.test_iterator_data[iteration] = {} agent_api.RemoveConfigObjects(flowmon_spec_objects) api.Logger.info( "policy_json = {}, count = {}, total_count = {}".format( policy_json, ret_count, count)) if result != api.types.status.SUCCESS: api.Logger.info( "policy_json = {}, Encountered FAILURE, stopping".format( policy_json)) break tc.SetTestCount(count) return result