def __parse_tunnel_config_yml(self): config_yml = "{}/config.yml".format(api.GetTopologyDirectory()) spec = parser.YmlParse(config_yml) tunnel_spec = spec.tunnels tunnel_spec.ip_address_pool = ipaddress.IPv4Network( tunnel_spec.substrate.ipam_base).hosts() return tunnel_spec
def switch_profile(node_names=None, device_names=None, fwd_mode="TRANSPARENT", policy_mode="BASENET", push=True, push_base_profile=False): objects = QueryConfigs("Profile") ret = api.types.status.SUCCESS sleep_time = 60 #sec if not push_base_profile: if len(objects) == 0: profile_json = api.GetTopologyDirectory( ) + "/profile/" + "profile.json" objects = AddOneConfig(profile_json) for obj in objects: obj.spec.fwd_mode = fwd_mode obj.spec.policy_mode = policy_mode if push or push_base_profile: if len(objects): ret = PushConfigObjects(objects, node_names, device_names) if ret == api.types.status.SUCCESS: if fwd_mode == "INSERTION" and policy_mode == "ENFORCED": api.Logger.info("Waiting for %d sec for HAL " "to flap uplinks " % (sleep_time)) time.sleep(sleep_time) else: api.Logger.error("Empty Profile object") ret = api.types.status.FAILURE return ret
def Main(args): #time.sleep(120) api.Logger.info("Testsuite NIC Mode is %s" % (api.GetConfigNicMode())) agent_nodes = api.GetNaplesHostnames() netagent_api.Init(agent_nodes, hw=True) netagent_api.ReadConfigs(api.GetTopologyDirectory(), reset=False) ret = api.types.status.SUCCESS if api.GetConfigNicMode() in ['unified']: ret = UpdateNetworkAndEnpointObject() if ret != api.types.status.SUCCESS: return ret #Delete path is not stable yet #netagent_api.DeleteBaseConfig() if GlobalOptions.skip_setup: ret = RestoreWorkloads() else: nic_mode = api.GetConfigNicMode() if nic_mode not in ['classic', 'sriov']: kinds = ["SecurityProfile"] if nic_mode == 'unified' else None netagent_api.PushBaseConfig(kinds=kinds) ret = __add_workloads(api.GetNodes()) return ret
def Teardown(tc): api.Logger.info("Tearing down ...") policy_json = "{}/sgpolicy.json".format(api.GetTopologyDirectory()) sg_json_obj = utils.ReadJson(policy_json) newObjects = agent_api.AddOneConfig(policy_json) agent_api.PushConfigObjects(newObjects) return api.types.status.SUCCESS
def __read_spec(self): classic_yml = "{}/config.yml".format(api.GetTopologyDirectory()) api.Logger.info("Config yml: \n %s" % classic_yml) self.__spec = parser.YmlParse(classic_yml) for net in self.__spec.spec.networks: self.__network_specs[net.network.name] = net.network for wl in self.__spec.spec.workloads: self.__workload_specs[wl.workload.name] = wl.workload return
def LoadNetworkObjectFromJSON(): try: nw_config = api.GetTopologyDirectory() + "/networks.json" with open(nw_config) as f: nw_obj = json.load(f) for obj in nw_obj['objects']: if obj['meta']['name'] == "nw0": return IPv4Network(obj['spec']['ipv4-subnet']) return None except: return None
def LoadNetworkObjectFromYAML(): try: nw_config = api.GetTopologyDirectory() + "/config.yml" with open(nw_config) as f: nw_obj = yaml.load(f) ipam_base = nw_obj['spec']['networks'][0]['network']['ipv4'][ 'ipam_base'].split('/')[0] prefix_length = nw_obj['spec']['networks'][0]['network']['ipv4'][ 'prefix_length'] return IPv4Network(ipam_base + "/" + str(prefix_length)) except: return None
def Trigger(tc): nwsec_objs = agent_api.QueryConfigs(kind="NetworkSecurityPolicy") agent_api.DeleteConfigObjects(nwsec_objs) agent_api.RemoveConfigObjects(nwsec_objs) nwsec_json = api.GetTopologyDirectory() + "/" + "sgpolicy.json" nwsec_objs = agent_api.AddOneConfig(nwsec_json) ret = agent_api.PushConfigObjects(nwsec_objs) if ret != api.types.status.SUCCESS: api.Logger.error("Failed to push nwsec policy") return ret return api.types.status.SUCCESS
def Main(step): # time.sleep(120) #api.Init() agent_ips = api.GetNaplesMgmtIpAddresses() netagent_api.Init(agent_ips) netagent_api.ReadConfigs(api.GetTopologyDirectory()) __init_lifdb() netagent_api.DeleteBaseConfig() if api.GetConfigNicMode() != 'classic': netagent_api.PushBaseConfig() if not api.IsConfigOnly(): __add_workloads() return api.types.status.SUCCESS
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE protoDir = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir) result = api.types.status.SUCCESS count = 0 policies = utils.GetTargetJsons('mirror', tc.iterators.proto) for policy_json in policies: # # Get template-Mirror Config # newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newObjects[0].spec.collectors) > 1: continue # # Modify template-Mirror Config to make sure that Naples-node # act as either source or destination # # Set up Collector in the remote node # eutils.generateMirrorConfig(tc, policy_json, newObjects) ret_count = 0 for i in range(0, len(tc.mirror_verif)): # # If Execution-Optimization is enabled, no need to run the test # for the same protocol more than once # if i > 0 and tc.mirror_verif[i]['protocol'] ==\ tc.mirror_verif[i-1]['protocol']: continue # # Flow-ERSPAN for TCP-traffic is not tested (yet) in # Classic-mode until applicable pkt-trigger tools are identified # if tc.classic_mode == True and\ tc.mirror_verif[i]['protocol'] == 'tcp': continue # # Push Mirror Config to Naples # ret = agent_api.PushConfigObjects(newObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push mirror objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and Collectors # eutils.establishForwardingSetup(tc) req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.flow_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flow_collector_idx[c] if tc.flow_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto gre and\ dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip proto gre and\ dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_erspan, tc.flow_collector[c], cmd, True) resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan) for cmd in resp_tcpdump_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP background # process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN to take effect # tc.protocol = tc.mirror_verif[i]['protocol'] tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port']) if api.GetNodeOs(tc.naples.node_name) == 'linux': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.flow_collector, tc.flow_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\ resp_tcpdump_erspan) tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_erspan, term_resp_tcpdump_erspan) # Delete the objects agent_api.DeleteConfigObjects(newObjects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) # # Validate ERSPAN packets reception # tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION tc.udp_erspan_pkts_expected = (tc.udp_count << 1) tc.icmp_erspan_pkts_expected = (tc.icmp_count << 1) if tc.protocol == 'udp' and tc.iterators.proto == 'mixed': tc.protocol = 'udp-mixed' tc.icmp_erspan_pkts_expected = tc.udp_erspan_pkts_expected res_1 = eutils.validateErspanPackets(tc, tc.flow_collector, tc.flow_collector_idx) # # Validate Config-cleanup # res_2 = eutils.validateConfigCleanup(tc) if res_1 == api.types.status.FAILURE or\ res_2 == api.types.status.FAILURE: result = api.types.status.FAILURE if result == api.types.status.FAILURE: break ret_count += 1 if result == api.types.status.FAILURE: break count += ret_count tc.SetTestCount(count) return result
def Trigger(tc): tc.cmd_cookies = [] tc.cmd_cookies1 = [] tc.cmd_cookies2 = [] nodes = api.GetWorkloadNodeHostnames() push_node_0 = [nodes[0]] push_node_1 = [nodes[1]] encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt') decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt') policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy') # Configure IPsec on Node 1 if api.IsNaplesNode(nodes[0]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-policy objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy flush") api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" ) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" ) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy list") trig_resp1 = api.Trigger(req1) term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1) # Configure IPsec on Node 2 if api.IsNaplesNode(nodes[1]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy flush") api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" ) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" ) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % (tc.iterators.port)) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy list") trig_resp2 = api.Trigger(req2) term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2) workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] bypass_test = 0 if w1.IsNaples() and w2.IsNaples(): api.Logger.info( "Both workloads are Naples, %s is nc client, %s is nc server, bypassing test" % (w1.node_name, w2.node_name)) nc_client_wl = w1 nc_server_wl = w2 bypass_test = 1 elif w1.IsNaples(): api.Logger.info("%s is Naples and nc client, %s is nc server" % (w1.node_name, w2.node_name)) nc_client_wl = w1 nc_server_wl = w2 elif w2.IsNaples(): api.Logger.info("%s is Naples and nc client, %s is nc server" % (w2.node_name, w1.node_name)) nc_client_wl = w2 nc_server_wl = w1 req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s port %s" %\ (nc_server_wl.workload_name, nc_server_wl.ip_address, nc_client_wl.workload_name, nc_client_wl.ip_address, tc.iterators.protocol, tc.iterators.port) api.Logger.info("Starting NC test over IPSec from %s" % (tc.cmd_descr)) if bypass_test == 0: cmd_cookie = "Creating test file on %s" % (nc_client_wl.workload_name) api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "base64 /dev/urandom | head -1000 > ipsec_client.dat") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Setting MTU to smaller value on %s" % ( nc_client_wl.workload_name) api.Trigger_AddCommand(req, nc_client_wl.node_name, nc_client_wl.workload_name, "ifconfig %s mtu 1048" % nc_client_wl.interface) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Running nc server on %s" % (nc_server_wl.workload_name) if tc.iterators.protocol == "tcp": api.Trigger_AddCommand(req, nc_server_wl.node_name, nc_server_wl.workload_name, "nc -l %s > ipsec_server.dat" % (tc.iterators.port), background=True) else: api.Trigger_AddCommand(req, nc_server_wl.node_name, nc_server_wl.workload_name, "nc --udp -l %s > ipsec_server.dat" % (tc.iterators.port), background=True) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Running nc client on %s" % (nc_client_wl.workload_name) if tc.iterators.protocol == "tcp": api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "nc %s %s < ipsec_client.dat" % (nc_server_wl.ip_address, tc.iterators.port)) else: api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "nc --udp %s %s < ipsec_client.dat" % (nc_server_wl.ip_address, tc.iterators.port)) tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "Creating dummy file on %s" % (nc_client_wl.workload_name) api.Trigger_AddCommand( req, nc_client_wl.node_name, nc_client_wl.workload_name, "rm -f ipsec_client.dat ; touch ipsec_client.dat") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Creating dummy file on %s" % (nc_server_wl.workload_name) api.Trigger_AddCommand( req, nc_server_wl.node_name, nc_server_wl.workload_name, "rm -f ipsec_server.dat ; touch ipsec_server.dat") tc.cmd_cookies.append(cmd_cookie) if nc_client_wl.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_client_wl.node_name) api.Trigger_AddNaplesCommand( req, nc_client_wl.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_client_wl.node_name) api.Trigger_AddCommand(req, nc_client_wl.node_name, nc_client_wl.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) if nc_server_wl.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_server_wl.node_name) api.Trigger_AddNaplesCommand( req, nc_server_wl.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running nc test" % ( nc_server_wl.node_name) api.Trigger_AddCommand(req, nc_server_wl.node_name, nc_server_wl.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) resp = api.CopyFromWorkload(nc_client_wl.node_name, nc_client_wl.workload_name, ['ipsec_client.dat'], tc.GetLogsDir()) if resp is None: api.Logger.error("Could not find ipsec_client.dat") return api.types.status.FAILURE resp = api.CopyFromWorkload(nc_server_wl.node_name, nc_server_wl.workload_name, ['ipsec_server.dat'], tc.GetLogsDir()) if resp is None: api.Logger.error("Could not find ipsec_server.dat") return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE tc.resp_tcpdump_erspan = None tc.resp_cleanup = None protoDir = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', 'endpoint') api.Logger.info("Template Config files location: ", protoDir) policies = utils.GetTargetJsons('mirror', 'endpoint') for policy_json in policies: # # Get template-Mirror Config # api.Logger.info("Adding one config object for {}", format(policy_json)) newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE # # Modify template-Mirror Config to make sure that Naples-node # act as either source or destination # # Set up Collector in the remote node # if newObjects[0].kind == 'InterfaceMirrorSession': tc.ep_collector_objects = newObjects agent_api.RemoveConfigObjects(tc.ep_collector_objects) elif newObjects[0].kind == 'Endpoint': tc.endpoint_objects = newObjects agent_api.RemoveConfigObjects(tc.endpoint_objects) updateEndpointObjectTempl(tc, tc.endpoint_objects, tc.store_endpoint_objects[0]) for i in range(0, len(policies)): # # Push Collector object # if i == 0: colObjects = tc.ep_collector_objects if tc.iterators.session == 'single': ret = eutils.generateEpCollectorConfig(tc, colObjects) else: ret = eutils.generateEpCollectorConfigForMultiMirrorSession( tc, colObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to identify Collector Workload") tc.error = True return api.types.status.FAILURE api.Logger.info("Pushing collector objects") ret = agent_api.PushConfigObjects(colObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push collector objects") tc.error = True return api.types.status.FAILURE continue api.Logger.info("Generating Endpoint objects") cfg_api.PrintConfigsObjects(colObjects) # # Update Endpoint objects # epObjects = tc.endpoint_objects ret = eutils.generateEndpointConfig(tc, epObjects, colObjects) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to identify Endpoints") tc.error = True return api.types.status.FAILURE api.Logger.info("Pushing Endpoint objects") ret = agent_api.UpdateConfigObjects(epObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to update endpoint objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and Collectors # eutils.establishForwardingSetup(tc) req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.ep_collector)): # # Set up TCPDUMP's on the collector # idx = tc.ep_collector_idx[c] if tc.ep_collector[c].IsNaples(): ### TODO - run & revisit for windows case and fix any issues. if api.GetNodeOs(tc.naples.node_name) == "windows": intfGuid = ionic_utils.winIntfGuid( tc.ep_collector[c].node_name, tc.ep_collector[c].interface) intfVal = str( ionic_utils.winTcpDumpIdx(tc.ep_collector[c].node_name, intfGuid)) cmd = "sudo /mnt/c/Windows/System32/tcpdump.exe -c 1000 -XX -vv -i {} ip proto 47 and dst {} -U -w ep-mirror-{}.pcap"\ .format(intfVal, tc.collector_ip_address[idx], c) else: intfVal = tc.ep_collector[c].interface cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre and dst {}\ --immediate-mode -U -w ep-mirror-{}.pcap"\ .format(intfVal, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto gre\ and dst {} --immediate-mode -U -w ep-mirror-{}.pcap"\ .format(tc.ep_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_erspan, tc.ep_collector[c], cmd, True) resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan) for cmd in resp_tcpdump_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP background # process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN to take effect # tc.dest_port = '120' if api.GetNodeOs(tc.naples.node_name) == 'linux' or api.GetNodeOs( tc.naples.node_name) == 'windows': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.ep_collector, tc.ep_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\ resp_tcpdump_erspan) tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_erspan, term_resp_tcpdump_erspan) if api.GetNodeOs(tc.naples.node_name) == "windows": req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = api.WINDOWS_POWERSHELL_CMD + " Stop-Process -Name 'tcpdump' -Force" api.Trigger_AddCommand(req, tc.naples.node_name, tc.naples.workload_name, cmd, background=False) resp = api.Trigger(req) # Delete the objects eutils.deGenerateEndpointConfig(tc, tc.endpoint_objects, tc.ep_collector_objects) agent_api.UpdateConfigObjects(tc.endpoint_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(tc.ep_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) return api.types.status.SUCCESS
def Trigger(tc): tc.cmd_cookies = [] tc.cmd_cookies1 = [] tc.cmd_cookies2 = [] nodes = api.GetWorkloadNodeHostnames() push_node_0 = [nodes[0]] push_node_1 = [nodes[1]] encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt') decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt') policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy') # Configure IPsec on Node 1 if api.IsNaplesNode(nodes[0]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node1.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node1.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_0, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-policy objects to node %s" % nodes[0]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy flush") for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list, tc.args.aead_list): api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" % (spi, aead)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" % (spi, aead)) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, "sudo ip xfrm policy list") trig_resp1 = api.Trigger(req1) term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1) # Configure IPsec on Node 2 if api.IsNaplesNode(nodes[1]): if len(encrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_encryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_encryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(decrypt_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_decryption_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_decryption_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE if len(policy_objects) == 0: newObjects = netagent_cfg_api.AddOneConfig( api.GetTopologyDirectory() + "/ipsec/ipsec_policies_node2.json") if len(newObjects) == 0: api.Logger.error( "Adding new objects to store failed for ipsec_policies_node2.json" ) return api.types.status.FAILURE ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names=push_node_1, ignore_error=True) if ret != api.types.status.SUCCESS: api.Logger.error( "Unable to push ipsec-encryption objects to node %s" % nodes[1]) return api.types.status.FAILURE get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") #return api.types.status.FAILURE else: workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state flush") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy flush") for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list, tc.args.aead_list): api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32" % (spi, aead)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32" % (spi, aead)) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel" % port) api.Trigger_AddCommand( req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel" % port) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm state list") api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, "sudo ip xfrm policy list") trig_resp2 = api.Trigger(req2) term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2) workloads = api.GetWorkloads(nodes[0]) w1 = workloads[0] workloads = api.GetWorkloads(nodes[1]) w2 = workloads[0] bypass_test = 0 if w1.IsNaples() and w2.IsNaples(): api.Logger.info( "Both workloads are Naples, %s is iperf client, %s is iperf server, bypassing test" % (w1.node_name, w2.node_name)) iperf_client_wl = w1 iperf_server_wl = w2 bypass_test = 1 elif w1.IsNaples(): api.Logger.info("%s is Naples and iperf client, %s is iperf server" % (w1.node_name, w2.node_name)) iperf_client_wl = w1 iperf_server_wl = w2 elif w2.IsNaples(): api.Logger.info("%s is Naples and iperf client, %s is iperf server" % (w2.node_name, w1.node_name)) iperf_client_wl = w2 iperf_server_wl = w1 req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd_cookie = "Set rcv socket buffer size on %s" % (w1.workload_name) api.Trigger_AddCommand( req3, w1.node_name, w1.workload_name, "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Set rcv socket buffer size on %s" % (w2.workload_name) api.Trigger_AddCommand( req3, w2.node_name, w2.workload_name, "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Setting MTU to smaller value on %s" % ( iperf_client_wl.workload_name) api.Trigger_AddCommand(req3, iperf_client_wl.node_name, iperf_client_wl.workload_name, "ifconfig %s mtu 1048" % iperf_client_wl.interface) tc.cmd_cookies.append(cmd_cookie) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s" %\ (iperf_server_wl.workload_name, iperf_server_wl.ip_address, iperf_client_wl.workload_name, iperf_client_wl.ip_address, tc.iterators.protocol) api.Logger.info("Starting Iperf test over IPSec from %s" % (tc.cmd_descr)) if bypass_test == 0: for port in tc.args.ports_list: cmd_cookie = "Running iperf server on %s port %s" % ( iperf_server_wl.workload_name, port) api.Trigger_AddCommand(req3, iperf_server_wl.node_name, iperf_server_wl.workload_name, "iperf -s -p %s" % (port), background=True) tc.cmd_cookies.append(cmd_cookie) req4 = api.Trigger_CreateExecuteCommandsRequest(serial=False) if bypass_test == 0: cmd_cookie = "Brief Sleep" api.Trigger_AddCommand(req4, iperf_client_wl.node_name, iperf_client_wl.workload_name, "sleep 1") tc.cmd_cookies.append(cmd_cookie) for port in tc.args.ports_list: cmd_cookie = "Running iperf client on %s port %s" % ( iperf_client_wl.workload_name, port) if tc.iterators.protocol == "tcp": api.Trigger_AddCommand( req4, iperf_client_wl.node_name, iperf_client_wl.workload_name, "iperf -c %s -p %s -M %s" % (iperf_server_wl.ip_address, port, tc.iterators.pktsize)) else: api.Trigger_AddCommand( req4, iperf_client_wl.node_name, iperf_client_wl.workload_name, "iperf --udp -c %s -p %s -M %s" % (iperf_server_wl.ip_address, port, tc.iterators.pktsize)) tc.cmd_cookies.append(cmd_cookie) req5 = api.Trigger_CreateExecuteCommandsRequest(serial=True) if w1.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w1.node_name) api.Trigger_AddNaplesCommand( req5, w1.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w1.node_name) api.Trigger_AddCommand(req5, w1.node_name, w1.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) if w2.IsNaples(): cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w2.node_name) api.Trigger_AddNaplesCommand( req5, w2.node_name, "/nic/bin/halctl show ipsec-global-stats") tc.cmd_cookies.append(cmd_cookie) else: cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % ( w2.node_name) api.Trigger_AddCommand(req5, w2.node_name, w2.workload_name, "sudo ip xfrm policy show") tc.cmd_cookies.append(cmd_cookie) trig_resp3 = api.Trigger(req3) trig_resp4 = api.Trigger(req4) trig_resp5 = api.Trigger(req5) term_resp3 = api.Trigger_TerminateAllCommands(trig_resp3) term_resp4 = api.Trigger_TerminateAllCommands(trig_resp4) term_resp5 = api.Trigger_TerminateAllCommands(trig_resp5) agg_resp4 = api.Trigger_AggregateCommandsResponse(trig_resp4, term_resp4) tc.resp = agg_resp4 return api.types.status.SUCCESS
def __get_topology_config(): classic_yml = "{}/config.yml".format(api.GetTopologyDirectory()) spec = parser.YmlParse(classic_yml) return spec
def Trigger(tc): #Query will get the reference of objects on store store_profile_objects = netagent_cfg_api.QueryConfigs(kind='SecurityProfile') if len(store_profile_objects) == 0: api.Logger.error("No security profile objects in store") return api.types.status.FAILURE #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects") return api.types.status.FAILURE if len(get_config_objects) != len(store_profile_objects): api.Logger.error("Config mismatch, Get Objects : %d, Config store Objects : %d" % (len(get_config_objects), len(store_profile_objects))) return api.types.status.FAILURE #Now do an update of the objects for object in store_profile_objects: object.spec.timeouts.tcp_connection_setup = "1200s" object.spec.timeouts.tcp_half_close = "1400s" #Now push the update as we modified. netagent_cfg_api.UpdateConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(new_get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects after update") return api.types.status.FAILURE #Check whether value has changed for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects): if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup == store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup == get_object.spec.timeouts.tcp_connection_setup: api.Logger.error("Update failed") return api.types.status.FAILURE #Now do restore value to old one for object,old_object in zip(store_profile_objects, get_config_objects): object.spec.timeouts.tcp_connection_setup = old_object.spec.timeouts.tcp_connection_setup object.spec.timeouts.tcp_half_close = old_object.spec.timeouts.tcp_half_close #Now push the update as we modified. netagent_cfg_api.UpdateConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(new_get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects after update") return api.types.status.FAILURE for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects): if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \ old_object.spec.timeouts.tcp_connection_setup != get_object.spec.timeouts.tcp_connection_setup: api.Logger.error("Second Update failed") return api.types.status.FAILURE #Now lets do a delete netagent_cfg_api.DeleteConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(get_config_objects) != 0: api.Logger.error("Delete of objects failed") return api.types.status.FAILURE netagent_cfg_api.PushConfigObjects(store_profile_objects) #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch security profile objects") return api.types.status.FAILURE newObjects = netagent_cfg_api.AddOneConfig(api.GetTopologyDirectory() + "/test_cfg/test_security_profile.json") if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") return api.types.status.FAILURE nodes = api.GetNaplesHostnames() push_nodes = [nodes[0]] ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names = push_nodes) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to fetch security profile objects to node %s" % nodes[0]) return api.types.status.FAILURE #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes) if len(get_config_objects) == 0: api.Logger.error("Unable to fetch newly pushed objects") return api.types.status.FAILURE #Delete the objects that is pushed netagent_cfg_api.DeleteConfigObjects(get_config_objects) #Get will return copy of pushed objects to agent get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes) if len(get_config_objects) != 0: api.Logger.error("Delete of new objects failed") return api.types.status.FAILURE #Remoe competely those objects from the store too. ret = netagent_cfg_api.RemoveConfigObjects(newObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Remove of new objects failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def Teardown(tc): api.Logger.info("Tearing down ...") policy_json = "{}/sgpolicy.json".format(api.GetTopologyDirectory()) sg_json_obj = utils.ReadJson(policy_json) agent_api.ConfigureSecurityGroupPolicies(sg_json_obj.sgpolicies, oper = agent_api.CfgOper.ADD) return api.types.status.SUCCESS
def Trigger(tc): if tc.ignore == True: return api.types.status.SUCCESS if tc.error == True: return api.types.status.FAILURE protoDir1 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', 'lif') api.Logger.info("Template Config files location: ", protoDir1) protoDir2 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir2) protoDir3 = api.GetTopologyDirectory() +\ "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto) api.Logger.info("Template Config files location: ", protoDir3) result = api.types.status.SUCCESS count = 0 MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto) FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto) LifPolicies = utils.GetTargetJsons('mirror', 'lif') flowmon_policy_idx = 0 ret_count = 0 for mirror_json in MirrorPolicies: # # Get template-Mirror Config # newMirrorObjects = agent_api.AddOneConfig(mirror_json) if len(newMirrorObjects) == 0: api.Logger.error("Adding new Mirror objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newMirrorObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newMirrorObjects[0].spec.collectors) > 1: continue idx = 0 for flowmon_json in FlowMonPolicies: if idx < flowmon_policy_idx: idx += 1 continue # # Get template-FlowMon Config # newFlowMonObjects = agent_api.AddOneConfig(flowmon_json) if len(newFlowMonObjects) == 0: api.Logger.error("Adding new FlowMon objects to store failed") tc.error = True return api.types.status.FAILURE agent_api.RemoveConfigObjects(newFlowMonObjects) # # Ignore Multi-collector template config's, since Expanded-Telemetry # testbundle dynamically creates such config's # if len(newFlowMonObjects[0].spec.exports) > 1: flowmon_policy_idx += 1 idx += 1 continue # # Modify template-Mirror / template-FlowMon Config to make sure # that Naples-node # act as either source or destination # # Set up Collector in the remote node # eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects) eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects) ret_count = 0 for i in range(0, len(tc.mirror_verif)): # # If Execution-Optimization is enabled, no need to run the # test for the same protocol more than once # if i > 0 and tc.mirror_verif[i]['protocol'] ==\ tc.mirror_verif[i-1]['protocol']: continue # # Flow-ERSPAN for TCP-traffic is not tested (yet) in # Classic-mode until applicable pkt-trigger tools are # identified # if tc.classic_mode == True and\ tc.mirror_verif[i]['protocol'] == 'tcp': continue for policy_json in LifPolicies: # # Get template-Mirror Config # newObjects = agent_api.AddOneConfig(policy_json) if len(newObjects) == 0: api.Logger.error("Adding new objects to store failed") tc.error = True return api.types.status.FAILURE # # Modify template-Mirror Config to make sure that # Naples-node act as either source or destination # # Set up Collector in the remote node # if newObjects[0].kind == 'InterfaceMirrorSession': tc.lif_collector_objects = newObjects agent_api.RemoveConfigObjects(tc.lif_collector_objects) elif newObjects[0].kind == 'Interface': tc.interface_objects = newObjects agent_api.RemoveConfigObjects(tc.interface_objects) # # Push Collector Config to Naples # colObjects = tc.lif_collector_objects ret = eutils.generateLifCollectorConfig(tc, colObjects) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to identify Collector Workload") tc.error = True return api.types.status.FAILURE ret = agent_api.PushConfigObjects(colObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: api.Logger.error("Unable to push collector objects") tc.error = True return api.types.status.FAILURE # # Push Mirror / FlowMon Config to Naples # ret = agent_api.PushConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to push mirror objects") tc.error = True return api.types.status.FAILURE ret = agent_api.PushConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to push flowmon objects") tc.error = True return api.types.status.FAILURE # # Update Interface objects # ifObjects = tc.interface_objects ret = eutils.generateLifInterfaceConfig( tc, ifObjects, tc.lif_collector_objects) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error( "Unable to identify Uplink/LIF Interfaces") tc.error = True return api.types.status.FAILURE ret = agent_api.UpdateConfigObjects(ifObjects, [tc.naples.node_name], [tc.naples_device_name]) if ret != api.types.status.SUCCESS: agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) api.Logger.error("Unable to update interface objects") tc.error = True return api.types.status.FAILURE # # Establish Forwarding set up between Naples-peer and # Collectors # eutils.establishForwardingSetup(tc) # # Give a little time for flows clean-up to happen so that # stale IPFIX records don't show up # if tc.classic_mode == True: time.sleep(1) if tc.collection == 'distinct': req_tcpdump_flow_erspan = \ api.Trigger_CreateExecuteCommandsRequest(serial = True) for c in range(0, len(tc.flow_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flow_collector_idx[c] if tc.flow_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto\ gre and dst {} --immediate-mode -U\ -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip\ proto gre and dst {} --immediate-mode -U\ -w flow-mirror-{}.pcap"\ .format(tc.flow_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_flow_erspan, tc.flow_collector[c], cmd, True) resp_tcpdump_flow_erspan = api.Trigger(\ req_tcpdump_flow_erspan) for cmd in resp_tcpdump_flow_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(1) req_tcpdump_lif_erspan = \ api.Trigger_CreateExecuteCommandsRequest(serial = True) req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\ serial = True) for c in range(0, len(tc.lif_collector)): # # Set up TCPDUMP's on the collector # idx = tc.lif_collector_idx[c] if tc.lif_collector[c].IsNaples(): cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre\ and dst {} --immediate-mode -U\ -w lif-mirror-{}.pcap"\ .format(tc.lif_collector[c].interface, tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto\ gre and dst {} --immediate-mode -U\ -w lif-mirror-{}.pcap"\ .format(tc.lif_collector[c].interface, tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_lif_erspan, tc.lif_collector[c], cmd, True) for c in range(0, len(tc.flowmon_collector)): # # Set up TCPDUMP's on the collector # idx = tc.flowmon_collector_idx[c] if tc.flowmon_collector[c].IsNaples(): cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\ dst port {} and dst {} --immediate-mode\ -U -w flowmon-{}.pcap"\ .format(tc.flowmon_collector[c].interface, tc.export_port[c], tc.collector_ip_address[idx], c) else: cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp\ and dst port {} and dst {}\ --immediate-mode -U -w flowmon-{}.pcap"\ .format(tc.flowmon_collector[c].interface, tc.export_port[c], tc.collector_ip_address[idx], c) eutils.add_command(req_tcpdump_flowmon, tc.flowmon_collector[c], cmd, True) resp_tcpdump_lif_erspan = api.Trigger(\ req_tcpdump_lif_erspan) for cmd in resp_tcpdump_lif_erspan.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(2) resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon) for cmd in resp_tcpdump_flowmon.commands: api.PrintCommandResults(cmd) # # Classic mode requires a delay to make sure that TCPDUMP # background process is fully up # if tc.classic_mode == True: time.sleep(2) # # Trigger packets for ERSPAN / FLOWMON to take effect # tc.protocol = tc.mirror_verif[i]['protocol'] tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port']) protocol = tc.protocol tc.protocol = 'all' if api.GetNodeOs(tc.naples.node_name) == 'linux': eutils.triggerTrafficInClassicModeLinux(tc) else: eutils.triggerTrafficInHostPinModeOrFreeBSD(tc) tc.protocol = protocol # # Dump sessions/flows/P4-tables for debug purposes # eutils.showSessionAndP4TablesForDebug(tc, tc.lif_collector, tc.lif_collector_idx) # # Terminate TCPDUMP background process # term_resp_tcpdump_lif_erspan = \ api.Trigger_TerminateAllCommands(resp_tcpdump_lif_erspan) tc.resp_tcpdump_lif_erspan = \ api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_lif_erspan, term_resp_tcpdump_lif_erspan) if tc.collection == 'distinct': term_resp_tcpdump_flow_erspan = \ api.Trigger_TerminateAllCommands(resp_tcpdump_flow_erspan) tc.resp_tcpdump_flow_erspan = \ api.Trigger_AggregateCommandsResponse(\ resp_tcpdump_flow_erspan, term_resp_tcpdump_flow_erspan) term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\ resp_tcpdump_flowmon) tc.resp_tcpdump_flowmon = \ api.Trigger_AggregateCommandsResponse(resp_tcpdump_flowmon, term_resp_tcpdump_flowmon) # Delete the objects eutils.deGenerateLifInterfaceConfig(tc, tc.interface_objects, tc.lif_collector_objects) agent_api.UpdateConfigObjects(tc.interface_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(tc.lif_collector_objects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newMirrorObjects, [tc.naples.node_name], [tc.naples_device_name]) agent_api.DeleteConfigObjects(newFlowMonObjects, [tc.naples.node_name], [tc.naples_device_name]) # # Make sure that Mirror-config has been removed # tc.resp_cleanup = eutils.showP4TablesForValidation(tc) # # Validate ERSPAN packets reception # tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION tc.udp_erspan_pkts_expected = (tc.udp_count << 1) tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\ (tc.icmp_count << 1) if tc.iterators.direction != 'both': tc.tcp_erspan_pkts_expected >>= 1 tc.udp_erspan_pkts_expected >>= 1 tc.icmp_erspan_pkts_expected >>= 1 if tc.dupcheck == 'disable': tc.tcp_erspan_pkts_expected = \ (tc.tcp_erspan_pkts_expected+1) << 1 tc.udp_erspan_pkts_expected <<= 1 tc.icmp_erspan_pkts_expected <<= 1 # # Adjust Expected-pkt-counts taking into account Flow-ERSPAN # Config's # if tc.collection == 'unified': if (tc.protocol == 'tcp' or tc.iterators.proto == 'mixed')\ and tc.iterators.direction != 'both': tc.tcp_erspan_pkts_expected <<= 1 if (tc.protocol == 'udp' or tc.iterators.proto == 'mixed')\ and tc.iterators.direction != 'both': tc.udp_erspan_pkts_expected <<= 1 if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed': if tc.iterators.direction != 'both': tc.icmp_erspan_pkts_expected <<= 1 #if tc.iterators.direction != 'egress': # tc.icmp_erspan_pkts_expected += 1 protocol = tc.protocol tc.protocol = 'all' tc.feature = 'lif-erspan' tc.resp_tcpdump_erspan = tc.resp_tcpdump_lif_erspan res_1 = eutils.validateErspanPackets(tc, tc.lif_collector, tc.lif_collector_idx) if tc.collection == 'distinct': tc.tcp_erspan_pkts_expected = 0 tc.udp_erspan_pkts_expected = 0 tc.icmp_erspan_pkts_expected = 0 tc.protocol = protocol if tc.protocol == 'tcp' or tc.iterators.proto == 'mixed': tc.tcp_erspan_pkts_expected = \ NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION if tc.protocol == 'udp' or tc.iterators.proto == 'mixed': tc.udp_erspan_pkts_expected = (tc.udp_count << 1) if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed': tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\ (tc.icmp_count << 1) tc.protocol = 'all' tc.feature = 'flow-erspan' tc.resp_tcpdump_erspan = tc.resp_tcpdump_flow_erspan res_f = eutils.validateErspanPackets( tc, tc.flow_collector, tc.flow_collector_idx) if res_f == api.types.status.FAILURE: result = api.types.status.FAILURE # # Validate IPFIX packets reception # tc.feature = 'flowmon' res_2 = eutils.validateIpFixPackets(tc) tc.protocol = protocol # # Validate Config-cleanup # res_3 = eutils.validateConfigCleanup(tc) if res_1 == api.types.status.FAILURE or\ res_2 == api.types.status.FAILURE or\ res_3 == api.types.status.FAILURE: result = api.types.status.FAILURE if result == api.types.status.FAILURE: break ret_count += 1 flowmon_policy_idx += 1 break if result == api.types.status.FAILURE: break count += ret_count tc.SetTestCount(count) return result
def GetProtocolDirectory(feature, proto): return api.GetTopologyDirectory() + "/gen/telemetry/{}/{}".format( feature, proto)
def GetProtocolDirectory(proto): return api.GetTopologyDirectory() + "/gen/{}".format(proto)