def configurationChangeEvent(tc): if tc.cancel: api.Logger.info("Canceling configurationChangeEvent...") sys.exit(0) api.Logger.info("Running configurationChangeEvent...") for proto in ["tcp", "udp"]: policies = utils.GetTargetJsons(proto) for policy_json in policies: # Delete allow-all policy agent_api.DeleteSgPolicies() api.Logger.info("Pushing Security policy: %s " % (policy_json)) newObjects = agent_api.AddOneConfig(policy_json) ret = agent_api.PushConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to push policies for %s" % policy_json) if agent_api.DeleteConfigObjects(newObjects): api.Logger.error("Failed to delete config object for %s" % policy_json) if agent_api.RemoveConfigObjects(newObjects): api.Logger.error("Failed to remove config object for %s" % policy_json) # Restore allow-all policy agent_api.PushConfigObjects( agent_api.QueryConfigs(kind='NetworkSecurityPolicy')) if tc.cancel: return api.types.status.SUCCESS for proto in ['tcp', 'udp', 'icmp', 'mixed', 'scale']: mirrorPolicies = GetTargetJsons('mirror', proto) flowmonPolicies = GetTargetJsons('flowmon', proto) for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies): mpObjs = agent_api.AddOneConfig(mp_json) fpObjs = agent_api.AddOneConfig(fp_json) ret = agent_api.PushConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to push the telemetry objects") ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to delete the telemetry objects") ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to remove the telemetry objects") if tc.cancel: return api.types.status.SUCCESS return api.types.status.SUCCESS
def Teardown(tc): api.Logger.info("Tearing down ...") policy_json = "{}/sgpolicy.json".format(api.GetTopologyDirectory()) sg_json_obj = utils.ReadJson(policy_json) newObjects = agent_api.AddOneConfig(policy_json) agent_api.PushConfigObjects(newObjects) return api.types.status.SUCCESS
def Trigger(tc): result = api.types.status.SUCCESS mirrorPolicies = utils.GetTargetJsons('mirror', "scale") flowmonPolicies = utils.GetTargetJsons('flowmon', "scale") #colPolicies = utils.GetTargetJsons('mirror', "collector") iters = getattr(tc.args, "iters", 10) iters = 1 mpObjs = fpObjs = [] for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies): for i in range(iters): # # Push Mirror Session and Flow Export objects # mpObjs = agent_api.AddOneConfig(mp_json) fpObjs = agent_api.AddOneConfig(fp_json) ret = agent_api.PushConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to push the telemetry objects") return api.types.status.FAILURE # # Update Mirror Session and Flow Export objects # mpObjs = UpdateMirrorSessionObjects(mpObjs) fpObjs = UpdateFlowMonitorObjects(fpObjs) ret = agent_api.UpdateConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to update the telemetry objects") return api.types.status.FAILURE # # Delete Mirror Session and Flow Export objects # ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to delete the telemetry objects") return api.types.status.FAILURE ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to remove the telemetry objects") return api.types.status.FAILURE return result
def Trigger(tc): nwsec_objs = agent_api.QueryConfigs(kind="NetworkSecurityPolicy") agent_api.DeleteConfigObjects(nwsec_objs) agent_api.RemoveConfigObjects(nwsec_objs) nwsec_json = api.GetTopologyDirectory() + "/" + "sgpolicy.json" nwsec_objs = agent_api.AddOneConfig(nwsec_json) ret = agent_api.PushConfigObjects(nwsec_objs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to push nwsec policy") return ret return api.types.status.SUCCESS
def Setup(tc): tc.skip_flap = False tc.newObjects = None tc.collector_ip = [] tc.collector_wl = [] tc.collector_type = [] tc.wl_sec_ip_info = defaultdict(lambda: dict()) tc.IsBareMetal = utils.IsBareMetal() tc.port_down_time = getattr(tc.args, "port_down_time", 60) policies = utils.GetTargetJsons('mirror', tc.iterators.proto) policy_json = policies[0] tc.verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) # Push Mirror objects tc.newObjects = agent_api.AddOneConfig(policy_json) ret = agent_api.PushConfigObjects(tc.newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") tc.newObjects = None return api.types.status.FAILURE # Populate secondary IP utils.PopulateSecondaryAddress(tc) # Get collector ret = GetCollectorWorkloadFromObjects(tc) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to get collector workload") return ret ret = utils.DetectUpLinkState(api.GetNaplesHostnames(), utils.PORT_OPER_STATUS_UP, all) if ret != api.types.status.SUCCESS: api.Logger.error("All uplink on Nodes are not in UP state.") tc.skip_flap = True return api.types.status.SUCCESS if api.GetConfigNicMode() in ["classic", "unified"]: api.Logger.info( f"NIC mode: {api.GetConfigNicMode()}, Skipping uplink flap") tc.skip_flap = True api.Logger.info("All uplink on Nodes are UP!") # Bring up inband and reset the active link on bond. ret = utils.SetupInbandInterface() if ret != api.types.status.SUCCESS: return ret return api.types.status.SUCCESS
def Trigger(tc): policies = utils.GetTargetJsons("netagent-expansion") sg_json_obj = None for policy_json in policies: sg_json_obj = utils.ReadJson(policy_json) newObjects = agent_api.AddOneConfig(policy_json) start = time.time() tc.ret = agent_api.PushConfigObjects(newObjects) end = time.time() diff = end - start if diff > 120: api.Logger.info("Time taken to push configs is {} seconds.") tc.ret = api.types.status.FAILURE agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) if tc.ret == api.types.status.FAILURE: return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE protoDir1 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', 'lif') api.Logger.info("Template Config files location: ", protoDir1) protoDir2 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir2) protoDir3 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir3) result = api.types.status.SUCCESS count = 0 MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto) FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto) LifPolicies = utils.GetTargetJsons('mirror', 'lif') flowmon_policy_idx = 0 ret_count = 0 for mirror_json in MirrorPolicies: # # Get template-Mirror Config # newMirrorObjects = agent_api.AddOneConfig(mirror_json) if len(newMirrorObjects) == 0: api.Logger.error("Adding new Mirror objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newMirrorObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newMirrorObjects[0].spec.collectors) > 1: continue idx = 0 for flowmon_json in FlowMonPolicies: if idx < flowmon_policy_idx: idx += 1 continue # # Get template-FlowMon Config # newFlowMonObjects = agent_api.AddOneConfig(flowmon_json) if len(newFlowMonObjects) == 0: api.Logger.error("Adding new FlowMon objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newFlowMonObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newFlowMonObjects[0].spec.exports) > 1: flowmon_policy_idx += 1 idx += 1 continue # # Modify template-Mirror / template-FlowMon Config to make sure # that Naples-node # act as either source or destination # # Set up Collector in the remote node # eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects) eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects) ret_count = 0 for i in range(0, len(tc.mirror_verif)): # # If Execution-Optimization is enabled, no need to run the # test for the same protocol more than once # if i > 0 and tc.mirror_verif[i]['protocol'] ==\ tc.mirror_verif[i-1]['protocol']: continue # # Flow-ERSPAN for TCP-traffic is not tested (yet) in # Classic-mode until applicable pkt-trigger tools are # identified # if tc.classic_mode == True and\ tc.mirror_verif[i]['protocol'] == 'tcp': continue for policy_json in LifPolicies: # # Get template-Mirror Config # newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE # # Modify template-Mirror Config to make sure that # Naples-node act as either source or destination # # Set up Collector in the remote node # if newObjects[0].kind == 'InterfaceMirrorSession': tc.lif_collector_objects = newObjects agent_api.RemoveConfigObjects(tc.lif_collector_objects) elif newObjects[0].kind == 'Interface': tc.interface_objects = newObjects agent_api.RemoveConfigObjects(tc.interface_objects) # # Push Collector Config to Naples # colObjects = tc.lif_collector_objects ret = eutils.generateLifCollectorConfig(tc, colObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to identify Collector Workload") tc.error = True return api.types.status.FAILURE ret = agent_api.PushConfigObjects(colObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push collector objects") tc.error = True return api.types.status.FAILURE # # Push Mirror / FlowMon Config to Naples # ret = agent_api.PushConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to push mirror objects") tc.error = True return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to push flowmon objects") tc.error = True return api.types.status.FAILURE # # Update Interface objects # ifObjects = tc.interface_objects ret = eutils.generateLifInterfaceConfig( tc, ifObjects, tc.lif_collector_objects) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error( "Unable to identify Uplink/LIF Interfaces") tc.error = True return api.types.status.FAILURE ret = agent_api.UpdateConfigObjects(ifObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to update interface objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and # Collectors # eutils.establishForwardingSetup(tc) # # Give a little time for flows clean-up to happen so that # stale IPFIX records don't show up # if tc.classic_mode == True: time.sleep(1) if tc.collection == 'distinct': req_tcpdump_flow_erspan = \ api.Trigger_CreateExecuteCommandsRequest(serial = True) for c in range(0, len(tc.flow_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flow_collector_idx[c] if tc.flow_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto\ gre and dst {} --immediate-mode -U\ -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip\ proto gre and dst {} --immediate-mode -U\ -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_flow_erspan, tc.flow_collector[c], cmd, True) resp_tcpdump_flow_erspan = api.Trigger(\ req_tcpdump_flow_erspan) for cmd in resp_tcpdump_flow_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(1) req_tcpdump_lif_erspan = \ api.Trigger_CreateExecuteCommandsRequest(serial = True) req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.lif_collector)): # # Set up TCPDUMP's on the collector # idx = tc.lif_collector_idx[c] if tc.lif_collector[c].IsNaples(): cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre\ and dst {} --immediate-mode -U\ -w lif-mirror-{}.pcap"\ .format(tc.lif_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto\ gre and dst {} --immediate-mode -U\ -w lif-mirror-{}.pcap"\ .format(tc.lif_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_lif_erspan, tc.lif_collector[c], cmd, True) for c in range(0, len(tc.flowmon_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flowmon_collector_idx[c] if tc.flowmon_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\ dst port {} and dst {} --immediate-mode\ -U -w flowmon-{}.pcap"\ .format(tc.flowmon_collector[c].interface, tc.export_port[c], tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp\ and dst port {} and dst {}\ --immediate-mode -U -w flowmon-{}.pcap"\ .format(tc.flowmon_collector[c].interface, tc.export_port[c], tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_flowmon, tc.flowmon_collector[c], cmd, True) resp_tcpdump_lif_erspan = api.Trigger(\ req_tcpdump_lif_erspan) for cmd in resp_tcpdump_lif_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(2) resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon) for cmd in resp_tcpdump_flowmon.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN / FLOWMON to take effect # tc.protocol = tc.mirror_verif[i]['protocol'] tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port']) protocol = tc.protocol tc.protocol = 'all' if api.GetNodeOs(tc.naples.node_name) == 'linux': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) tc.protocol = protocol # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.lif_collector, tc.lif_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_lif_erspan = \ api.Trigger_TerminateAllCommands(resp_tcpdump_lif_erspan) tc.resp_tcpdump_lif_erspan = \ api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_lif_erspan, term_resp_tcpdump_lif_erspan) if tc.collection == 'distinct': term_resp_tcpdump_flow_erspan = \ api.Trigger_TerminateAllCommands(resp_tcpdump_flow_erspan) tc.resp_tcpdump_flow_erspan = \ api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_flow_erspan, term_resp_tcpdump_flow_erspan) term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\ resp_tcpdump_flowmon) tc.resp_tcpdump_flowmon = \ api.Trigger_AggregateCommandsResponse(resp_tcpdump_flowmon, term_resp_tcpdump_flowmon) # Delete the objects eutils.deGenerateLifInterfaceConfig(tc, tc.interface_objects, tc.lif_collector_objects) agent_api.UpdateConfigObjects(tc.interface_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) # # Validate ERSPAN packets reception # tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION tc.udp_erspan_pkts_expected = (tc.udp_count << 1) tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\ (tc.icmp_count << 1) if tc.iterators.direction != 'both': tc.tcp_erspan_pkts_expected >>= 1 tc.udp_erspan_pkts_expected >>= 1 tc.icmp_erspan_pkts_expected >>= 1 if tc.dupcheck == 'disable': tc.tcp_erspan_pkts_expected = \ (tc.tcp_erspan_pkts_expected+1) << 1 tc.udp_erspan_pkts_expected <<= 1 tc.icmp_erspan_pkts_expected <<= 1 # # Adjust Expected-pkt-counts taking into account Flow-ERSPAN # Config's # if tc.collection == 'unified': if (tc.protocol == 'tcp' or tc.iterators.proto == 'mixed')\ and tc.iterators.direction != 'both': tc.tcp_erspan_pkts_expected <<= 1 if (tc.protocol == 'udp' or tc.iterators.proto == 'mixed')\ and tc.iterators.direction != 'both': tc.udp_erspan_pkts_expected <<= 1 if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed': if tc.iterators.direction != 'both': tc.icmp_erspan_pkts_expected <<= 1 #if tc.iterators.direction != 'egress': # tc.icmp_erspan_pkts_expected += 1 protocol = tc.protocol tc.protocol = 'all' tc.feature = 'lif-erspan' tc.resp_tcpdump_erspan = tc.resp_tcpdump_lif_erspan res_1 = eutils.validateErspanPackets(tc, tc.lif_collector, tc.lif_collector_idx) if tc.collection == 'distinct': tc.tcp_erspan_pkts_expected = 0 tc.udp_erspan_pkts_expected = 0 tc.icmp_erspan_pkts_expected = 0 tc.protocol = protocol if tc.protocol == 'tcp' or tc.iterators.proto == 'mixed': tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION if tc.protocol == 'udp' or tc.iterators.proto == 'mixed': tc.udp_erspan_pkts_expected = (tc.udp_count << 1) if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed': tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\ (tc.icmp_count << 1) tc.protocol = 'all' tc.feature = 'flow-erspan' tc.resp_tcpdump_erspan = tc.resp_tcpdump_flow_erspan res_f = eutils.validateErspanPackets( tc, tc.flow_collector, tc.flow_collector_idx) if res_f == api.types.status.FAILURE: result = api.types.status.FAILURE # # Validate IPFIX packets reception # tc.feature = 'flowmon' res_2 = eutils.validateIpFixPackets(tc) tc.protocol = protocol # # Validate Config-cleanup # res_3 = eutils.validateConfigCleanup(tc) if res_1 == api.types.status.FAILURE or\ res_2 == api.types.status.FAILURE or\ res_3 == api.types.status.FAILURE: result = api.types.status.FAILURE if result == api.types.status.FAILURE: break ret_count += 1 flowmon_policy_idx += 1 break if result == api.types.status.FAILURE: break count += ret_count tc.SetTestCount(count) return result
def Trigger(tc): tc.cmd_cookies = [] tc.cmd_cookies1 = [] tc.cmd_cookies2 = [] nodes = api.GetWorkloadNodeHostnames() push_node_0 = [nodes[0]] push_node_1 = [nodes[1]] encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt') decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt') policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy') # Configure IPsec on Node 1 if api.IsNaplesNode(nodes[0]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-policy objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy flush") api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" ) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" ) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy list") trig_resp1 = api.Trigger(req1) term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1) # Configure IPsec on Node 2 if api.IsNaplesNode(nodes[1]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy flush") api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" ) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" ) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy list") trig_resp2 = api.Trigger(req2) term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2) workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] bypass_test = 0 if w1.IsNaples() and w2.IsNaples(): api.Logger.info( "Both workloads are Naples, %s is nc client, %s is nc server, bypassing test" % (w1.node_name, w2.node_name)) nc_client_wl = w1 nc_server_wl = w2 bypass_test = 1 elif w1.IsNaples(): api.Logger.info("%s is Naples and nc client, %s is nc server" % (w1.node_name, w2.node_name)) nc_client_wl = w1 nc_server_wl = w2 elif w2.IsNaples(): api.Logger.info("%s is Naples and nc client, %s is nc server" % (w2.node_name, w1.node_name)) nc_client_wl = w2 nc_server_wl = w1 req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s port %s" %\ (nc_server_wl.workload_name, nc_server_wl.ip_address, nc_client_wl.workload_name, nc_client_wl.ip_address, tc.iterators.protocol, tc.iterators.port) api.Logger.info("Starting NC test over IPSec from %s" % (tc.cmd_descr)) if bypass_test == 0: cmd_cookie = "Creating test file on %s" % (nc_client_wl.workload_name) api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "base64 /dev/urandom | head -1000 > ipsec_client.dat") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Setting MTU to smaller value on %s" % ( nc_client_wl.workload_name) api.Trigger_AddCommand(req, nc_client_wl.node_name, nc_client_wl.workload_name, "ifconfig %s mtu 1048" % nc_client_wl.interface) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Running nc server on %s" % (nc_server_wl.workload_name) if tc.iterators.protocol == "tcp": api.Trigger_AddCommand(req, nc_server_wl.node_name, nc_server_wl.workload_name, "nc -l %s > ipsec_server.dat" % (tc.iterators.port), background=True) else: api.Trigger_AddCommand(req, nc_server_wl.node_name, nc_server_wl.workload_name, "nc --udp -l %s > ipsec_server.dat" % (tc.iterators.port), background=True) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Running nc client on %s" % (nc_client_wl.workload_name) if tc.iterators.protocol == "tcp": api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "nc %s %s < ipsec_client.dat" % (nc_server_wl.ip_address, tc.iterators.port)) else: api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "nc --udp %s %s < ipsec_client.dat" % (nc_server_wl.ip_address, tc.iterators.port)) tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "Creating dummy file on %s" % (nc_client_wl.workload_name) api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "rm -f ipsec_client.dat ; touch ipsec_client.dat") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Creating dummy file on %s" % (nc_server_wl.workload_name) api.Trigger_AddCommand( req, nc_server_wl.node_name, nc_server_wl.workload_name, "rm -f ipsec_server.dat ; touch ipsec_server.dat") tc.cmd_cookies.append(cmd_cookie) if nc_client_wl.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_client_wl.node_name) api.Trigger_AddNaplesCommand( req, nc_client_wl.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_client_wl.node_name) api.Trigger_AddCommand(req, nc_client_wl.node_name, nc_client_wl.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) if nc_server_wl.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_server_wl.node_name) api.Trigger_AddNaplesCommand( req, nc_server_wl.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_server_wl.node_name) api.Trigger_AddCommand(req, nc_server_wl.node_name, nc_server_wl.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) resp = api.CopyFromWorkload(nc_client_wl.node_name, nc_client_wl.workload_name, ['ipsec_client.dat'], tc.GetLogsDir()) if resp is None: api.Logger.error("Could not find ipsec_client.dat") return api.types.status.FAILURE resp = api.CopyFromWorkload(nc_server_wl.node_name, nc_server_wl.workload_name, ['ipsec_server.dat'], tc.GetLogsDir()) if resp is None: api.Logger.error("Could not find ipsec_server.dat") return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE tc.resp_tcpdump_erspan = None tc.resp_cleanup = None protoDir = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', 'endpoint') api.Logger.info("Template Config files location: ", protoDir) policies = utils.GetTargetJsons('mirror', 'endpoint') for policy_json in policies: # # Get template-Mirror Config # api.Logger.info("Adding one config object for {}", format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE # # Modify template-Mirror Config to make sure that Naples-node # act as either source or destination # # Set up Collector in the remote node # if newObjects[0].kind == 'InterfaceMirrorSession': tc.ep_collector_objects = newObjects agent_api.RemoveConfigObjects(tc.ep_collector_objects) elif newObjects[0].kind == 'Endpoint': tc.endpoint_objects = newObjects agent_api.RemoveConfigObjects(tc.endpoint_objects) updateEndpointObjectTempl(tc, tc.endpoint_objects, tc.store_endpoint_objects[0]) for i in range(0, len(policies)): # # Push Collector object # if i == 0: colObjects = tc.ep_collector_objects if tc.iterators.session == 'single': ret = eutils.generateEpCollectorConfig(tc, colObjects) else: ret = eutils.generateEpCollectorConfigForMultiMirrorSession( tc, colObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to identify Collector Workload") tc.error = True return api.types.status.FAILURE api.Logger.info("Pushing collector objects") ret = agent_api.PushConfigObjects(colObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push collector objects") tc.error = True return api.types.status.FAILURE continue api.Logger.info("Generating Endpoint objects") cfg_api.PrintConfigsObjects(colObjects) # # Update Endpoint objects # epObjects = tc.endpoint_objects ret = eutils.generateEndpointConfig(tc, epObjects, colObjects) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to identify Endpoints") tc.error = True return api.types.status.FAILURE api.Logger.info("Pushing Endpoint objects") ret = agent_api.UpdateConfigObjects(epObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to update endpoint objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and Collectors # eutils.establishForwardingSetup(tc) req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.ep_collector)): # # Set up TCPDUMP's on the collector # idx = tc.ep_collector_idx[c] if tc.ep_collector[c].IsNaples(): ### TODO - run & revisit for windows case and fix any issues. if api.GetNodeOs(tc.naples.node_name) == "windows": intfGuid = ionic_utils.winIntfGuid( tc.ep_collector[c].node_name, tc.ep_collector[c].interface) intfVal = str( ionic_utils.winTcpDumpIdx(tc.ep_collector[c].node_name, intfGuid)) cmd = "sudo /mnt/c/Windows/System32/tcpdump.exe -c 1000 -XX -vv -i {} ip proto 47 and dst {} -U -w ep-mirror-{}.pcap"\ .format(intfVal, tc.collector_ip_address[idx], c) else: intfVal = tc.ep_collector[c].interface cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre and dst {}\ --immediate-mode -U -w ep-mirror-{}.pcap"\ .format(intfVal, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto gre\ and dst {} --immediate-mode -U -w ep-mirror-{}.pcap"\ .format(tc.ep_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_erspan, tc.ep_collector[c], cmd, True) resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan) for cmd in resp_tcpdump_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP background # process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN to take effect # tc.dest_port = '120' if api.GetNodeOs(tc.naples.node_name) == 'linux' or api.GetNodeOs( tc.naples.node_name) == 'windows': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.ep_collector, tc.ep_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\ resp_tcpdump_erspan) tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_erspan, term_resp_tcpdump_erspan) if api.GetNodeOs(tc.naples.node_name) == "windows": req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = api.WINDOWS_POWERSHELL_CMD + " Stop-Process -Name 'tcpdump' -Force" api.Trigger_AddCommand(req, tc.naples.node_name, tc.naples.workload_name, cmd, background=False) resp = api.Trigger(req) # Delete the objects eutils.deGenerateEndpointConfig(tc, tc.endpoint_objects, tc.ep_collector_objects) agent_api.UpdateConfigObjects(tc.endpoint_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) return api.types.status.SUCCESS
def Trigger(tc): tc.cmd_cookies = [] tc.cmd_cookies1 = [] tc.cmd_cookies2 = [] nodes = api.GetWorkloadNodeHostnames() push_node_0 = [nodes[0]] push_node_1 = [nodes[1]] encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt') decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt') policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy') # Configure IPsec on Node 1 if api.IsNaplesNode(nodes[0]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-policy objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy flush") for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list, tc.args.aead_list): api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" % (spi, aead)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" % (spi, aead)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy list") trig_resp1 = api.Trigger(req1) term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1) # Configure IPsec on Node 2 if api.IsNaplesNode(nodes[1]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy flush") for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list, tc.args.aead_list): api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" % (spi, aead)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" % (spi, aead)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy list") trig_resp2 = api.Trigger(req2) term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2) workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] bypass_test = 0 if w1.IsNaples() and w2.IsNaples(): api.Logger.info( "Both workloads are Naples, %s is iperf client, %s is iperf server, bypassing test" % (w1.node_name, w2.node_name)) iperf_client_wl = w1 iperf_server_wl = w2 bypass_test = 1 elif w1.IsNaples(): api.Logger.info("%s is Naples and iperf client, %s is iperf server" % (w1.node_name, w2.node_name)) iperf_client_wl = w1 iperf_server_wl = w2 elif w2.IsNaples(): api.Logger.info("%s is Naples and iperf client, %s is iperf server" % (w2.node_name, w1.node_name)) iperf_client_wl = w2 iperf_server_wl = w1 req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd_cookie = "Set rcv socket buffer size on %s" % (w1.workload_name) api.Trigger_AddCommand( req3, w1.node_name, w1.workload_name, "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Set rcv socket buffer size on %s" % (w2.workload_name) api.Trigger_AddCommand( req3, w2.node_name, w2.workload_name, "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Setting MTU to smaller value on %s" % ( iperf_client_wl.workload_name) api.Trigger_AddCommand(req3, iperf_client_wl.node_name, iperf_client_wl.workload_name, "ifconfig %s mtu 1048" % iperf_client_wl.interface) tc.cmd_cookies.append(cmd_cookie) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s" %\ (iperf_server_wl.workload_name, iperf_server_wl.ip_address, iperf_client_wl.workload_name, iperf_client_wl.ip_address, tc.iterators.protocol) api.Logger.info("Starting Iperf test over IPSec from %s" % (tc.cmd_descr)) if bypass_test == 0: for port in tc.args.ports_list: cmd_cookie = "Running iperf server on %s port %s" % ( iperf_server_wl.workload_name, port) api.Trigger_AddCommand(req3, iperf_server_wl.node_name, iperf_server_wl.workload_name, "iperf -s -p %s" % (port), background=True) tc.cmd_cookies.append(cmd_cookie) req4 = api.Trigger_CreateExecuteCommandsRequest(serial=False) if bypass_test == 0: cmd_cookie = "Brief Sleep" api.Trigger_AddCommand(req4, iperf_client_wl.node_name, iperf_client_wl.workload_name, "sleep 1") tc.cmd_cookies.append(cmd_cookie) for port in tc.args.ports_list: cmd_cookie = "Running iperf client on %s port %s" % ( iperf_client_wl.workload_name, port) if tc.iterators.protocol == "tcp": api.Trigger_AddCommand( req4, iperf_client_wl.node_name, iperf_client_wl.workload_name, "iperf -c %s -p %s -M %s" % (iperf_server_wl.ip_address, port, tc.iterators.pktsize)) else: api.Trigger_AddCommand( req4, iperf_client_wl.node_name, iperf_client_wl.workload_name, "iperf --udp -c %s -p %s -M %s" % (iperf_server_wl.ip_address, port, tc.iterators.pktsize)) tc.cmd_cookies.append(cmd_cookie) req5 = api.Trigger_CreateExecuteCommandsRequest(serial=True) if w1.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w1.node_name) api.Trigger_AddNaplesCommand( req5, w1.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w1.node_name) api.Trigger_AddCommand(req5, w1.node_name, w1.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) if w2.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w2.node_name) api.Trigger_AddNaplesCommand( req5, w2.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w2.node_name) api.Trigger_AddCommand(req5, w2.node_name, w2.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) trig_resp3 = api.Trigger(req3) trig_resp4 = api.Trigger(req4) trig_resp5 = api.Trigger(req5) term_resp3 = api.Trigger_TerminateAllCommands(trig_resp3) term_resp4 = api.Trigger_TerminateAllCommands(trig_resp4) term_resp5 = api.Trigger_TerminateAllCommands(trig_resp5) agg_resp4 = api.Trigger_AggregateCommandsResponse(trig_resp4, term_resp4) tc.resp = agg_resp4 return api.types.status.SUCCESS
def Trigger(tc): #if tc.skip: return api.types.status.SUCCESS policies = utils.GetTargetJsons('mirror', tc.iterators.proto) result = api.types.status.SUCCESS count = 0 ret_count = 0 collector_dest = [] collector_wl = [] collector_type = [] for policy_json in policies: collector_dest.clear() collector_wl.clear() collector_type.clear() verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) api.Logger.info("Using policy_json = {}".format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len (newObjects) == 0: api.Logger.error("Adding new objects to store failed") return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") return api.types.status.FAILURE utils.DumpMirrorSessions() # Get collector to find the workload for obj in newObjects: for obj_collector in obj.spec.collectors: coll_dst = obj_collector.export_config.destination coll_type = obj_collector.type collector_dest.append(coll_dst) collector_type.append(coll_type) api.Logger.info(f"export-dest: {coll_dst}, erspan-type: {coll_type}") for coll_dst in collector_dest: for wl in tc.workloads: if (wl.ip_address == coll_dst) or (coll_dst in wl_sec_ip_info[wl.workload_name]): collector_wl.append(wl) api.Logger.info("collect_dest len: {} collect_wl len: {}".format(len(collector_dest), len(collector_wl))) collector_info = utils.GetMirrorCollectorsInfo(collector_wl, collector_dest, collector_type) ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json)) # Delete the objects agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) break # Update collector newObjects = agent_api.QueryConfigs(kind='MirrorSession') # mirror config update to local collector is applicable only for ESX topology if is_wl_type_bm is False: for obj in newObjects: if obj.spec.collectors[0].type == utils.ERSPAN_TYPE_2: obj.spec.collectors[0].type = utils.ERSPAN_TYPE_3 collector_info[0]['type'] = utils.ERSPAN_TYPE_3 else: obj.spec.collectors[0].type = utils.ERSPAN_TYPE_2 collector_info[0]['type'] = utils.ERSPAN_TYPE_2 break # Now push the update as we modified agent_api.UpdateConfigObjects(newObjects) utils.DumpMirrorSessions() # Rerun the tests ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm) result = ret['res'] ret_count = ret['count'] count = count + ret_count # Delete the objects agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) api.Logger.info("policy_json = {}, count = {}, total_count = {}".format(policy_json, ret_count, count)) if result != api.types.status.SUCCESS: api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json)) break tc.SetTestCount(count) collector_dest.clear() collector_wl.clear() return result
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE protoDir = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir) result = api.types.status.SUCCESS count = 0 policies = utils.GetTargetJsons('mirror', tc.iterators.proto) for policy_json in policies: # # Get template-Mirror Config # newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newObjects[0].spec.collectors) > 1: continue # # Modify template-Mirror Config to make sure that Naples-node # act as either source or destination # # Set up Collector in the remote node # eutils.generateMirrorConfig(tc, policy_json, newObjects) ret_count = 0 for i in range(0, len(tc.mirror_verif)): # # If Execution-Optimization is enabled, no need to run the test # for the same protocol more than once # if i > 0 and tc.mirror_verif[i]['protocol'] ==\ tc.mirror_verif[i-1]['protocol']: continue # # Flow-ERSPAN for TCP-traffic is not tested (yet) in # Classic-mode until applicable pkt-trigger tools are identified # if tc.classic_mode == True and\ tc.mirror_verif[i]['protocol'] == 'tcp': continue # # Push Mirror Config to Naples # ret = agent_api.PushConfigObjects(newObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and Collectors # eutils.establishForwardingSetup(tc) req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.flow_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flow_collector_idx[c] if tc.flow_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto gre and\ dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip proto gre and\ dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_erspan, tc.flow_collector[c], cmd, True) resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan) for cmd in resp_tcpdump_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP background # process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN to take effect # tc.protocol = tc.mirror_verif[i]['protocol'] tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port']) if api.GetNodeOs(tc.naples.node_name) == 'linux': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.flow_collector, tc.flow_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\ resp_tcpdump_erspan) tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_erspan, term_resp_tcpdump_erspan) # Delete the objects agent_api.DeleteConfigObjects(newObjects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) # # Validate ERSPAN packets reception # tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION tc.udp_erspan_pkts_expected = (tc.udp_count << 1) tc.icmp_erspan_pkts_expected = (tc.icmp_count << 1) if tc.protocol == 'udp' and tc.iterators.proto == 'mixed': tc.protocol = 'udp-mixed' tc.icmp_erspan_pkts_expected = tc.udp_erspan_pkts_expected res_1 = eutils.validateErspanPackets(tc, tc.flow_collector, tc.flow_collector_idx) # # Validate Config-cleanup # res_2 = eutils.validateConfigCleanup(tc) if res_1 == api.types.status.FAILURE or\ res_2 == api.types.status.FAILURE: result = api.types.status.FAILURE if result == api.types.status.FAILURE: break ret_count += 1 if result == api.types.status.FAILURE: break count += ret_count tc.SetTestCount(count) return result
def Trigger(tc): #Query will get the reference of objects on store store_profile_objects = netagent_cfg_api.QueryConfigs(kind='SecurityProfile') if len(store_profile_objects) == 0: api.Logger.error("No security profile objects in store") return api.types.status.FAILURE #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects") return api.types.status.FAILURE if len(get_config_objects) != len(store_profile_objects): api.Logger.error("Config mismatch, Get Objects : %d, Config store Objects : %d" % (len(get_config_objects), len(store_profile_objects))) return api.types.status.FAILURE #Now do an update of the objects for object in store_profile_objects: object.spec.timeouts.tcp_connection_setup = "1200s" object.spec.timeouts.tcp_half_close = "1400s" #Now push the update as we modified. netagent_cfg_api.UpdateConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(new_get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects after update") return api.types.status.FAILURE #Check whether value has changed for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects): if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup == store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup == get_object.spec.timeouts.tcp_connection_setup: api.Logger.error("Update failed") return api.types.status.FAILURE #Now do restore value to old one for object,old_object in zip(store_profile_objects, get_config_objects): object.spec.timeouts.tcp_connection_setup = old_object.spec.timeouts.tcp_connection_setup object.spec.timeouts.tcp_half_close = old_object.spec.timeouts.tcp_half_close #Now push the update as we modified. netagent_cfg_api.UpdateConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(new_get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects after update") return api.types.status.FAILURE for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects): if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup != get_object.spec.timeouts.tcp_connection_setup: api.Logger.error("Second Update failed") return api.types.status.FAILURE #Now lets do a delete netagent_cfg_api.DeleteConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(get_config_objects) != 0: api.Logger.error("Delete of objects failed") return api.types.status.FAILURE netagent_cfg_api.PushConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects") return api.types.status.FAILURE newObjects = netagent_cfg_api.AddOneConfig(api.GetTopologyDirectory() + "/test_cfg/test_security_profile.json") if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") return api.types.status.FAILURE nodes = api.GetNaplesHostnames() push_nodes = [nodes[0]] ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names = push_nodes) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to fetch security profile objects to node %s" % nodes[0]) return api.types.status.FAILURE #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE #Delete the objects that is pushed netagent_cfg_api.DeleteConfigObjects(get_config_objects) #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes) if len(get_config_objects) != 0: api.Logger.error("Delete of new objects failed") return api.types.status.FAILURE #Remoe competely those objects from the store too. ret = netagent_cfg_api.RemoveConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Remove of new objects failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): #if tc.skip: return api.types.status.SUCCESS policies = utils.GetTargetJsons('flowmon', tc.iterators.proto) result = api.types.status.SUCCESS count = 0 ret_count = 0 export_cfg = [] collector_wl = [] for policy_json in policies: export_cfg.clear() collector_wl.clear() #pdb.set_trace() verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) api.Logger.info("Using policy_json = {}".format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push flowmon objects") return api.types.status.FAILURE # Get collector to find the workload for obj in newObjects: for obj_export_cfg in obj.spec.exports: export_cfg.append(obj_export_cfg) api.Logger.info("export-dest: {} proto: {} port: {}".format( obj_export_cfg.destination, obj_export_cfg.proto_port.protocol, obj_export_cfg.proto_port.port)) for coll_dst in export_cfg: for wl in tc.workloads: if (wl.ip_address == coll_dst.destination) or ( coll_dst.destination in wl_sec_ip_info[wl.workload_name]): collector_wl.append(wl) api.Logger.info("collect_dest len: {} ".format(len(export_cfg))) api.Logger.info("collect_wl len: {} ".format(len(collector_wl))) collector_info = utils.GetFlowmonCollectorsInfo( collector_wl, export_cfg) utils.DumpFlowmonSessions() ret = utils.RunAll(tc, verif_json, 'flowmon', collector_info, is_wl_type_bm) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) api.Logger.info( "policy_json = {}, count = {}, total_count = {}".format( policy_json, ret_count, count)) if result != api.types.status.SUCCESS: api.Logger.info( "policy_json = {}, Encountered FAILURE, stopping".format( policy_json)) break tc.SetTestCount(count) export_cfg.clear() collector_wl.clear() return result
def Trigger(tc): #if tc.skip: return api.types.status.SUCCESS result = api.types.status.SUCCESS count = 0 policies = utils.GetTargetJsons('flowmon', 'crud') for policy_json in policies: api.Logger.info("Policy File: {}".format(policy_json)) flowmon_spec_objects = agent_api.AddOneConfig(policy_json) if len(flowmon_spec_objects) == 0: api.Logger.info("Policy object len {}".format( len(flowmon_spec_objects))) continue verif_json = utils.GetVerifJsonFromPolicyJson(policy_json) #create flowexport rules with 1 export cfg utils.generateFlowmonCollectorConfig(flowmon_spec_objects, num_exports_at_create) port = random.randint(100, 10000) tc.port = port result = ConfigFlowmonSession(tc, num_exports_at_create, flowmon_spec_objects) if result != api.types.status.SUCCESS: api.Logger.error("Failed in Flowmon session configuration") agent_api.RemoveConfigObjects(flowmon_spec_objects) break api.Logger.info( "Test for FlowMon with {} sessions {} collectors".format( tc.iterators.num_flowmon_sessions, num_exports_at_create)) utils.DumpFlowmonSessions() ret = InjectTestTrafficAndValidateCapture( tc, tc.iterators.num_flowmon_sessions, num_exports_at_create) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() elif tc.iterators.num_exports > num_exports_at_create: #update flowexport sessions with num_exports result = updateFlowmonCollectors(tc, tc.iterators.num_exports) if result != api.types.status.SUCCESS: api.Logger.info("Failed in Flowmon Collector configuration") else: api.Logger.info( "Test for FlowMon with {} sessions {} collectors".format( tc.iterators.num_flowmon_sessions, tc.iterators.num_exports)) utils.DumpFlowmonSessions() ret = InjectTestTrafficAndValidateCapture( tc, tc.iterators.num_flowmon_sessions, tc.iterators.num_exports) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() #remove all but one flowmon session and check the collectors are not deleted if (result == api.types.status.SUCCESS): for iteration in range(1, tc.iterators.num_flowmon_sessions): obj = tc.test_iterator_data[iteration]['del_obj'] agent_api.DeleteConfigObjects(obj) agent_api.RemoveConfigObjects(obj) tc.test_iterator_data[iteration] = {} api.Logger.info( "Test for FlowMon with {} sessions {} collectors".format( 1, tc.iterators.num_exports)) utils.DumpFlowmonSessions() ret = InjectTestTrafficAndValidateCapture(tc, 1, tc.iterators.num_exports) result = ret['res'] ret_count = ret['count'] count = count + ret_count if result != api.types.status.SUCCESS: api.Logger.error("Failed in Traffic validation") utils.DumpFlowmonSessions() for iteration in range(tc.iterators.num_flowmon_sessions): if tc.test_iterator_data[iteration]: obj = tc.test_iterator_data[iteration]['del_obj'] agent_api.DeleteConfigObjects(obj) agent_api.RemoveConfigObjects(obj) tc.test_iterator_data[iteration] = {} agent_api.RemoveConfigObjects(flowmon_spec_objects) api.Logger.info( "policy_json = {}, count = {}, total_count = {}".format( policy_json, ret_count, count)) if result != api.types.status.SUCCESS: api.Logger.info( "policy_json = {}, Encountered FAILURE, stopping".format( policy_json)) break tc.SetTestCount(count) return result
def Trigger(tc): policies = utils.GetTargetJsons(tc.iterators.proto) sg_json_obj = None # Generate the random seed seed = random.randrange(sys.maxsize) api.Logger.info("Seed val: %s"%seed) try: for policy_json in policies: newObjects = agent_api.AddOneConfig(policy_json) api.Logger.info("Created new object for %s"%policy_json) tc.ret = agent_api.PushConfigObjects(newObjects) rule_db_map = utils.SetupLocalRuleDbPerNaple(policy_json) if tc.ret != api.types.status.SUCCESS: return api.types.status.FAILURE scale = 0 while scale < tc.scale : for w1, dst_workload_list in tc.workload_dict.items(): for w2 in dst_workload_list: seed += 1 if scale >= tc.scale : break # If src and dst workload are behind same Naples, # then only one db sees the packet. if w1.node_name == w2.node_name: w1_db = rule_db_map.get(w1.node_name, None) w2_db = None else: w1_db = rule_db_map.get(w1.node_name, None) w2_db = rule_db_map.get(w2.node_name, None) api.Logger.info("(%s/%s) Running between w1: %s(%s) and w2: %s(%s)"% (scale+1, tc.scale, w1.ip_address, w1.workload_name, w2.ip_address, w2.workload_name)) tc.ret = utils.RunAll(1, w1, w2, w1_db, w2_db, FilterAndAlter, seed=seed) if tc.ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) return tc.ret scale += 1 utils.clearNaplesSessions() for node,db in rule_db_map.items(): result = utils.compareStats(db, node, tc.iterators.proto) api.Logger.info("Comparison of rule stats for Node %s - %s"% (node, "SUCCESS" \ if result == api.types.status.SUCCESS \ else "FAIL")) if result != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) tc.ret = result return tc.ret if agent_api.DeleteConfigObjects(newObjects): api.Logger.error("Failed to delete config object for %s"%policy_json) if agent_api.RemoveConfigObjects(newObjects): api.Logger.error("Failed to remove config object for %s"%policy_json) except Exception as e: agent_api.DeleteConfigObjects(newObjects) agent_api.RemoveConfigObjects(newObjects) api.Logger.error("%s"%e) Teardown(tc) return api.types.status.FAILURE return api.types.status.SUCCESS