Example #1
0
def Trigger(tc):

    ret = netagent_api.DeleteBaseConfig(kinds=['SecurityProfile'])
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to delete the security profile.")
        return ret

    print("\t\t\t########################################################################")
    print("\t\t\t#            TRANSPARENT, FLOWAWARE => TRANSPARENT, ENFORCE            #")
    print("\t\t\t########################################################################")

    # Change mode from TRANSPARENT, FLOWAWARE => TRANSPARENT, ENFORCE
    ret = netagent_api.switch_profile(fwd_mode="TRANSPARENT", policy_mode="ENFORCED")
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to switch profile")
        return ret

    #profile_json = api.GetTopologyDirectory() + "/" + "security_profile.json"
    profile_objs = netagent_api.QueryConfigs(kind='SecurityProfile')
    ret = netagent_api.PushConfigObjects(profile_objs)
    if ret != api.types.status.SUCCESS:
       api.Logger.error("Failed to push nwsec profile")
       return ret

    #Push the default policy
    policy_objs = netagent_api.QueryConfigs(kind='NetworkSecurityPolicy')
    ret = netagent_api.PushConfigObjects(policy_objs)
    if ret != api.types.status.SUCCESS:
       api.Logger.error("Failed to push nwsec policy")
       return ret

    api.Logger.info("Successfully changed the mode TRANSPARENT, FLOWAWARE => TRANSPARENT, ENFORCE")
    return api.types.status.SUCCESS
Example #2
0
def Trigger(tc):
    for i in range(0, tc.iters):
        api.Logger.info(" ################### ITER %s  ###################" %
                        (i + 1))
        ret = agent_api.DeleteConfigObjects(tc.epObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to delete network object" %
                             (i + 1))
            break
        ret = agent_api.DeleteConfigObjects(tc.nwObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to delete network object" %
                             (i + 1))
            break

        ret = agent_api.PushConfigObjects(tc.nwObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to push network object" %
                             (i + 1))
            break
        ret = agent_api.PushConfigObjects(tc.epObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to push network object" %
                             (i + 1))
            break

    tc.ret = ret
    return ret
Example #3
0
def configurationChangeEvent(tc):
    if tc.cancel:
        api.Logger.info("Canceling configurationChangeEvent...")
        sys.exit(0)

    api.Logger.info("Running configurationChangeEvent...")
    for proto in ["tcp", "udp"]:
        policies = utils.GetTargetJsons(proto)
        for policy_json in policies:
            # Delete allow-all policy
            agent_api.DeleteSgPolicies()
            api.Logger.info("Pushing Security policy: %s " % (policy_json))
            newObjects = agent_api.AddOneConfig(policy_json)
            ret = agent_api.PushConfigObjects(newObjects)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push policies for %s" %
                                 policy_json)
            if agent_api.DeleteConfigObjects(newObjects):
                api.Logger.error("Failed to delete config object for %s" %
                                 policy_json)
            if agent_api.RemoveConfigObjects(newObjects):
                api.Logger.error("Failed to remove config object for %s" %
                                 policy_json)
            # Restore allow-all policy
            agent_api.PushConfigObjects(
                agent_api.QueryConfigs(kind='NetworkSecurityPolicy'))

            if tc.cancel:
                return api.types.status.SUCCESS

    for proto in ['tcp', 'udp', 'icmp', 'mixed', 'scale']:
        mirrorPolicies = GetTargetJsons('mirror', proto)
        flowmonPolicies = GetTargetJsons('flowmon', proto)
        for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies):
            mpObjs = agent_api.AddOneConfig(mp_json)
            fpObjs = agent_api.AddOneConfig(fp_json)
            ret = agent_api.PushConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push the telemetry objects")
            ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to delete the telemetry objects")
            ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to remove the telemetry objects")

            if tc.cancel:
                return api.types.status.SUCCESS

    return api.types.status.SUCCESS
Example #4
0
def Teardown(tc):
    api.Logger.info("Tearing down ...")
    newObjects = newObjects = agent_api.QueryConfigs(
        kind='NetworkSecurityPolicy')
    agent_api.PushConfigObjects(newObjects)

    return api.types.status.SUCCESS
Example #5
0
def create_ep_info(tc, wl, dest_node, migr_state, src_node):
    # get a naples handle to move to
    ep_filter = "meta.name=" + wl.workload_name + ";"
    if not hasattr(tc, 'dsc_conn_type'):
       api.Logger.info(" seeing dsc_conn_type to oob")
       tc.dsc_conn_type = 'oob'  
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert(len(objects) == 1)
    object                          = copy.deepcopy(objects[0])
    # delete endpoint being moved on new host, TEMP
    agent_api.DeleteConfigObjects([object], [dest_node], ignore_error=True)

    # sleep to let delete cleanup all sessions/handles
    time.sleep(1)

    object.spec.node_uuid           = tc.uuidMap[dest_node]
    object.spec.migration           = migr_state 
    if (api.IsNaplesNode(src_node)):
        object.status.node_uuid         = tc.uuidMap[src_node]
        if (tc.dsc_conn_type == "oob"):
            object.spec.homing_host_address = api.GetNicMgmtIP(src_node)
        else:
            object.spec.homing_host_address = api.GetBondIp(src_node)
    else:
        object.status.node_uuid         = "0011.2233.4455"  # TEMP
        object.spec.homing_host_address = "169.169.169.169" # TEMP
    # this triggers endpoint on new host(naples) to setup flows
    agent_api.PushConfigObjects([object], [dest_node], ignore_error=True)
Example #6
0
def Teardown(tc):
    api.Logger.info("Tearing down ...")
    #policy_json = "{}/sgpolicy.json".format(api.GetTopologyDirectory())
    #sg_json_obj = utils.ReadJson(policy_json)
    newObjects = agent_api.QueryConfigs(kind='NetworkSecurityPolicy')
    agent_api.PushConfigObjects(newObjects)
    addPktFltrRuleOnEp(tc, enable=False)
    return api.types.status.SUCCESS
Example #7
0
def Teardown(tc):
    api.Logger.info("Tearing down ...")
    policy_json = "{}/sgpolicy.json".format(api.GetTopologyDirectory())
    sg_json_obj = utils.ReadJson(policy_json)
    newObjects = agent_api.AddOneConfig(policy_json)
    agent_api.PushConfigObjects(newObjects)

    return api.types.status.SUCCESS
Example #8
0
def deleteEpTrigger(tc, node, wl):
    api.Logger.info("Running delete ep %s on node %s" %
                    (wl.workload_name, node))
    ep_filter = "meta.name=" + wl.workload_name + ";"
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert (len(objects) == 1)
    object = copy.deepcopy(objects[0])
    delete_ep_info(tc, wl, node)
    time.sleep(5)
    agent_api.PushConfigObjects([object], [node], True)
Example #9
0
def Trigger(tc):
    nwsec_objs = agent_api.QueryConfigs(kind="NetworkSecurityPolicy")
    agent_api.DeleteConfigObjects(nwsec_objs)
    agent_api.RemoveConfigObjects(nwsec_objs)

    nwsec_json = api.GetTopologyDirectory() + "/" + "sgpolicy.json"
    nwsec_objs = agent_api.AddOneConfig(nwsec_json)
    ret = agent_api.PushConfigObjects(nwsec_objs)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to push nwsec policy")
        return ret
    return api.types.status.SUCCESS
Example #10
0
def Trigger(tc):
    newObjects = agent_api.QueryConfigs(kind='NetworkSecurityPolicy')
    ret = api.types.status.SUCCESS

    for i in range(0, 100):
        ret = agent_api.PushConfigObjects(newObjects)
        agent_api.DeleteConfigObjects(newObjects)

        if ret != api.types.status.SUCCESS:
            break

    agent_api.RemoveConfigObjects(newObjects)
    tc.ret = ret
    return ret
Example #11
0
def Setup(tc):
    tc.skip_flap = False
    tc.newObjects = None
    tc.collector_ip = []
    tc.collector_wl = []
    tc.collector_type = []
    tc.wl_sec_ip_info = defaultdict(lambda: dict())
    tc.IsBareMetal = utils.IsBareMetal()
    tc.port_down_time = getattr(tc.args, "port_down_time", 60)
    policies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    policy_json = policies[0]
    tc.verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)

    # Push Mirror objects
    tc.newObjects = agent_api.AddOneConfig(policy_json)
    ret = agent_api.PushConfigObjects(tc.newObjects)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to push mirror objects")
        tc.newObjects = None
        return api.types.status.FAILURE

    # Populate secondary IP
    utils.PopulateSecondaryAddress(tc)

    # Get collector
    ret = GetCollectorWorkloadFromObjects(tc)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to get collector workload")
        return ret

    ret = utils.DetectUpLinkState(api.GetNaplesHostnames(),
                                  utils.PORT_OPER_STATUS_UP, all)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("All uplink on Nodes are not in UP state.")
        tc.skip_flap = True
        return api.types.status.SUCCESS

    if api.GetConfigNicMode() in ["classic", "unified"]:
        api.Logger.info(
            f"NIC mode: {api.GetConfigNicMode()}, Skipping uplink flap")
        tc.skip_flap = True

    api.Logger.info("All uplink on Nodes are UP!")
    # Bring up inband and reset the active link on bond.
    ret = utils.SetupInbandInterface()
    if ret != api.types.status.SUCCESS:
        return ret
    return api.types.status.SUCCESS
Example #12
0
def Trigger(tc):
    result = api.types.status.SUCCESS
    mirrorPolicies = utils.GetTargetJsons('mirror', "scale")
    flowmonPolicies = utils.GetTargetJsons('flowmon', "scale")
    #colPolicies = utils.GetTargetJsons('mirror', "collector")
    iters = getattr(tc.args, "iters", 10)
    iters = 1
    mpObjs = fpObjs = []
    for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies):
        for i in range(iters):
            #
            # Push Mirror Session and Flow Export objects
            #
            mpObjs = agent_api.AddOneConfig(mp_json)
            fpObjs = agent_api.AddOneConfig(fp_json)
            ret = agent_api.PushConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push the telemetry objects")
                return api.types.status.FAILURE

            #
            # Update Mirror Session and Flow Export objects
            #
            mpObjs = UpdateMirrorSessionObjects(mpObjs)
            fpObjs = UpdateFlowMonitorObjects(fpObjs)
            ret = agent_api.UpdateConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to update the telemetry objects")
                return api.types.status.FAILURE

            #
            # Delete Mirror Session and Flow Export objects
            #
            ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to delete the telemetry objects")
                return api.types.status.FAILURE
            ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to remove the telemetry objects")
                return api.types.status.FAILURE

    return result
Example #13
0
def create_ep_info(tc, wl, new_node, migr_state, old_node):
    # get a naples handle to move to
    ep_filter = "meta.name=" + wl.workload_name + ";"
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert (len(objects) == 1)
    object = copy.deepcopy(objects[0])
    # delete endpoint being moved on new host, TEMP
    agent_api.DeleteConfigObjects([object], [new_node], ignore_error=True)

    object.spec.node_uuid = tc.uuidMap[new_node]
    object.spec.migration = migr_state
    if (api.IsNaplesNode(old_node)):
        object.status.node_uuid = tc.uuidMap[old_node]
        object.spec.homing_host_address = api.GetNicMgmtIP(old_node)
    else:
        object.status.node_uuid = "0011.2233.4455"  # TEMP
        object.spec.homing_host_address = "169.169.169.169"  # TEMP
    # this triggers endpoint on new host(naples) to setup flows
    agent_api.PushConfigObjects([object], [new_node], ignore_error=True)
Example #14
0
def Trigger(tc):
    policies = utils.GetTargetJsons("netagent-expansion")
    sg_json_obj = None

    for policy_json in policies:
        sg_json_obj = utils.ReadJson(policy_json)
        newObjects = agent_api.AddOneConfig(policy_json)
        start = time.time()
        tc.ret = agent_api.PushConfigObjects(newObjects)
        end = time.time()
        diff = end - start

        if diff > 120:
            api.Logger.info("Time taken to push configs is {} seconds.")
            tc.ret = api.types.status.FAILURE

        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)

        if tc.ret == api.types.status.FAILURE:
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
Example #15
0
def __create_endpoint_info(tc):
    time.sleep(
        5)  # trying to run vmotion and config update concurrently (hack)

    for dest_host, workloads in tc.vmotion_cntxt.MoveRequest.items():
        api.Logger.debug(
            "Creating endpoint info at %s for workloads being moved" %
            dest_host)
        if not api.IsNaplesNode(dest_host):
            continue
        for wl in workloads:
            api.Logger.debug("Updating ep-info for %s" % wl.workload_name)
            ep_filter = "meta.name=" + wl.workload_name + ";"
            objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
            assert (len(objects) == 1)
            obj = copy.deepcopy(objects[0])
            # delete endpoint being moved on new host, TEMP
            resp = agent_api.DeleteConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error("DeleteConfigObjects failed for %s for %s" %
                                 (wl.workload_name, dest_host))

            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            obj.spec.migration = "START"
            current_host = tc.vmotion_cntxt.CurrentHome[wl]
            if (api.IsNaplesNode(current_host)):
                obj.status.node_uuid = tc.vmotion_cntxt.UUIDMap[current_host]
                obj.spec.homing_host_address = api.GetNicMgmtIP(current_host)
            else:
                obj.status.node_uuid = "0011.2233.4455"  # TEMP
                obj.spec.homing_host_address = "169.169.169.169"  # TEMP

            # this triggers endpoint on new host(naples) to setup flows
            agent_api.PushConfigObjects([obj], [dest_host], ignore_error=True)
    api.Logger.debug("Completed endpoint info creation at NewHome")
    return
Example #16
0
def __setup_vmotion_on_hosts(nodes=None):
    vmotion_workloads = {}
    ep_objs = netagent_api.QueryConfigs(kind='Endpoint')
    ep_ref = None
    for ep in ep_objs:
        node_name = getattr(ep.spec, "_node_name", None)
        if not node_name:
            node_name = ep.spec.node_uuid
        l2seg = __get_l2segment_vlan_for_endpoint(ep)
        if api.GetTestbedNicMode(
                node_name) == 'hostpin_dvs' and not vmotion_workloads.get(
                    node_name, None) and l2seg != 0:
            ep_ref = copy.deepcopy(ep)
            ep_ref.spec.mac_address = vmotion_mac_allocator.Alloc().get()
            ep_ref.spec.ipv4_addresses = [str(vmotion_ip_allocator.Alloc())]
            ep_ref.meta.name = "vmotion-" + str(node_name)
            ret = netagent_api.PushConfigObjects([ep_ref], ignore_error=False)
            if ret != api.types.status.SUCCESS:
                api.Logger.info("Failed to push vmotion endpoint")
            vmotion_workloads[node_name] = ep_ref

    workloads = api.GetWorkloads()
    hostMap = {}
    for workload in workloads:
        if not hostMap.get(workload.node_name, None):
            if vmotion_workloads[
                    workload.node_name].spec.useg_vlan == workload.encap_vlan:
                vmotion_workloads[
                    workload.node_name].network_name = workload.network_name

    for host, workload in vmotion_workloads.items():
        ret = api.EnableVmotionOnNetwork(host, workload.network_name,
                                         workload.spec.mac_address)
        if ret != api.types.status.SUCCESS:
            return ret
    return api.types.status.SUCCESS
Example #17
0
def Trigger(tc):
    policies = utils.GetTargetJsons(tc.iterators.proto)
    sg_json_obj = None
    # Generate the random seed
    seed = random.randrange(sys.maxsize)
    api.Logger.info("Seed val: %s"%seed)

    try:
        for policy_json in policies:
            newObjects = agent_api.AddOneConfig(policy_json)
            api.Logger.info("Created new object for %s"%policy_json)
            tc.ret = agent_api.PushConfigObjects(newObjects)
            rule_db_map = utils.SetupLocalRuleDbPerNaple(policy_json)
            if tc.ret != api.types.status.SUCCESS:
                return api.types.status.FAILURE

            scale = 0
            while scale < tc.scale :
                for w1, dst_workload_list in tc.workload_dict.items():
                    for w2 in dst_workload_list:
                        seed += 1
                        if scale >= tc.scale :
                            break

                        # If src and dst workload are behind same Naples,
                        # then only one db sees the packet.
                        if w1.node_name == w2.node_name:
                            w1_db = rule_db_map.get(w1.node_name, None)
                            w2_db = None
                        else:
                            w1_db = rule_db_map.get(w1.node_name, None)
                            w2_db = rule_db_map.get(w2.node_name, None)

                        api.Logger.info("(%s/%s) Running between w1: %s(%s) and w2: %s(%s)"%
                                        (scale+1, tc.scale, w1.ip_address, w1.workload_name,
                                         w2.ip_address,
                                         w2.workload_name))
                        tc.ret = utils.RunAll(1, w1, w2, w1_db, w2_db, FilterAndAlter, seed=seed)
                        if tc.ret != api.types.status.SUCCESS:
                            agent_api.DeleteConfigObjects(newObjects)
                            agent_api.RemoveConfigObjects(newObjects)
                            return tc.ret

                        scale += 1
                utils.clearNaplesSessions()

            for node,db in rule_db_map.items():
                result = utils.compareStats(db, node, tc.iterators.proto)
                api.Logger.info("Comparison of rule stats for Node %s - %s"%
                                (node, "SUCCESS" \
                                 if result == api.types.status.SUCCESS \
                                 else "FAIL"))
                if result != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(newObjects)
                    agent_api.RemoveConfigObjects(newObjects)
                    tc.ret = result
                    return tc.ret

            if agent_api.DeleteConfigObjects(newObjects):
                api.Logger.error("Failed to delete config object for %s"%policy_json)

            if agent_api.RemoveConfigObjects(newObjects):
                api.Logger.error("Failed to remove config object for %s"%policy_json)

    except Exception as e:
        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.error("%s"%e)
        Teardown(tc)
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Example #18
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    protoDir1 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', 'lif')
    api.Logger.info("Template Config files location: ", protoDir1)
    protoDir2 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir2)
    protoDir3 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir3)

    result = api.types.status.SUCCESS

    count = 0
    MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto)
    LifPolicies = utils.GetTargetJsons('mirror', 'lif')
    flowmon_policy_idx = 0
    ret_count = 0
    for mirror_json in MirrorPolicies:
        #
        # Get template-Mirror Config
        #
        newMirrorObjects = agent_api.AddOneConfig(mirror_json)
        if len(newMirrorObjects) == 0:
            api.Logger.error("Adding new Mirror objects to store failed")
            tc.error = True
            return api.types.status.FAILURE
        agent_api.RemoveConfigObjects(newMirrorObjects)

        #
        # Ignore Multi-collector template config's, since Expanded-Telemetry
        # testbundle dynamically creates such config's
        #
        if len(newMirrorObjects[0].spec.collectors) > 1:
            continue

        idx = 0
        for flowmon_json in FlowMonPolicies:
            if idx < flowmon_policy_idx:
                idx += 1
                continue

            #
            # Get template-FlowMon Config
            #
            newFlowMonObjects = agent_api.AddOneConfig(flowmon_json)
            if len(newFlowMonObjects) == 0:
                api.Logger.error("Adding new FlowMon objects to store failed")
                tc.error = True
                return api.types.status.FAILURE
            agent_api.RemoveConfigObjects(newFlowMonObjects)

            #
            # Ignore Multi-collector template config's, since Expanded-Telemetry
            # testbundle dynamically creates such config's
            #
            if len(newFlowMonObjects[0].spec.exports) > 1:
                flowmon_policy_idx += 1
                idx += 1
                continue

            #
            # Modify template-Mirror / template-FlowMon Config to make sure
            # that Naples-node # act as either source or destination
            #
            # Set up Collector in the remote node
            #
            eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects)
            eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects)

            ret_count = 0
            for i in range(0, len(tc.mirror_verif)):
                #
                # If Execution-Optimization is enabled, no need to run the
                # test for the same protocol more than once
                #
                if i > 0 and tc.mirror_verif[i]['protocol'] ==\
                             tc.mirror_verif[i-1]['protocol']:
                    continue

                #
                # Flow-ERSPAN for TCP-traffic is not tested (yet) in
                # Classic-mode until applicable pkt-trigger tools are
                # identified
                #
                if tc.classic_mode == True and\
                   tc.mirror_verif[i]['protocol'] == 'tcp':
                    continue

                for policy_json in LifPolicies:
                    #
                    # Get template-Mirror Config
                    #
                    newObjects = agent_api.AddOneConfig(policy_json)
                    if len(newObjects) == 0:
                        api.Logger.error("Adding new objects to store failed")
                        tc.error = True
                        return api.types.status.FAILURE

                    #
                    # Modify template-Mirror Config to make sure that
                    # Naples-node act as either source or destination
                    #
                    # Set up Collector in the remote node
                    #
                    if newObjects[0].kind == 'InterfaceMirrorSession':
                        tc.lif_collector_objects = newObjects
                        agent_api.RemoveConfigObjects(tc.lif_collector_objects)
                    elif newObjects[0].kind == 'Interface':
                        tc.interface_objects = newObjects
                        agent_api.RemoveConfigObjects(tc.interface_objects)

                #
                # Push Collector Config to Naples
                #
                colObjects = tc.lif_collector_objects
                ret = eutils.generateLifCollectorConfig(tc, colObjects)
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to identify Collector Workload")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(colObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to push collector objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Push Mirror / FlowMon Config to Naples
                #
                ret = agent_api.PushConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to push mirror objects")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to push flowmon objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Update Interface objects
                #
                ifObjects = tc.interface_objects
                ret = eutils.generateLifInterfaceConfig(
                    tc, ifObjects, tc.lif_collector_objects)
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error(
                        "Unable to identify Uplink/LIF Interfaces")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.UpdateConfigObjects(ifObjects,
                                                    [tc.naples.node_name],
                                                    [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to update interface objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Establish Forwarding set up between Naples-peer and
                # Collectors
                #
                eutils.establishForwardingSetup(tc)

                #
                # Give a little time for flows clean-up to happen so that
                # stale IPFIX records don't show up
                #
                if tc.classic_mode == True:
                    time.sleep(1)

                if tc.collection == 'distinct':
                    req_tcpdump_flow_erspan = \
                    api.Trigger_CreateExecuteCommandsRequest(serial = True)
                    for c in range(0, len(tc.flow_collector)):
                        #
                        # Set up TCPDUMP's on the collector
                        #
                        idx = tc.flow_collector_idx[c]
                        if tc.flow_collector[c].IsNaples():
                            cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto\
                            gre and dst {} --immediate-mode -U\
                            -w flow-mirror-{}.pcap"\
                            .format(tc.flow_collector[c].interface,
                                    tc.collector_ip_address[idx], c)
                        else:
                            cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip\
                            proto gre and dst {} --immediate-mode -U\
                            -w flow-mirror-{}.pcap"\
                            .format(tc.flow_collector[c].interface,
                                    tc.collector_ip_address[idx], c)
                        eutils.add_command(req_tcpdump_flow_erspan,
                                           tc.flow_collector[c], cmd, True)

                    resp_tcpdump_flow_erspan = api.Trigger(\
                                               req_tcpdump_flow_erspan)
                    for cmd in resp_tcpdump_flow_erspan.commands:
                        api.PrintCommandResults(cmd)

                    #
                    # Classic mode requires a delay to make sure that TCPDUMP
                    # background process is fully up
                    #
                    if tc.classic_mode == True:
                        time.sleep(1)

                req_tcpdump_lif_erspan = \
                api.Trigger_CreateExecuteCommandsRequest(serial = True)
                req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\
                                      serial = True)
                for c in range(0, len(tc.lif_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.lif_collector_idx[c]
                    if tc.lif_collector[c].IsNaples():
                        cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre\
                              and dst {} --immediate-mode -U\
                              -w lif-mirror-{}.pcap"\
                              .format(tc.lif_collector[c].interface,
                                      tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto\
                              gre and dst {} --immediate-mode -U\
                              -w lif-mirror-{}.pcap"\
                              .format(tc.lif_collector[c].interface,
                                      tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_lif_erspan,
                                       tc.lif_collector[c], cmd, True)

                for c in range(0, len(tc.flowmon_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.flowmon_collector_idx[c]
                    if tc.flowmon_collector[c].IsNaples():
                        cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\
                               dst port {} and dst {} --immediate-mode\
                               -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp\
                               and dst port {} and dst {}\
                               --immediate-mode -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_flowmon,
                                       tc.flowmon_collector[c], cmd, True)

                resp_tcpdump_lif_erspan = api.Trigger(\
                                          req_tcpdump_lif_erspan)
                for cmd in resp_tcpdump_lif_erspan.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon)
                for cmd in resp_tcpdump_flowmon.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                #
                # Trigger packets for ERSPAN / FLOWMON to take effect
                #
                tc.protocol = tc.mirror_verif[i]['protocol']
                tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port'])

                protocol = tc.protocol
                tc.protocol = 'all'
                if api.GetNodeOs(tc.naples.node_name) == 'linux':
                    eutils.triggerTrafficInClassicModeLinux(tc)
                else:
                    eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)
                tc.protocol = protocol

                #
                # Dump sessions/flows/P4-tables for debug purposes
                #
                eutils.showSessionAndP4TablesForDebug(tc, tc.lif_collector,
                                                      tc.lif_collector_idx)

                #
                # Terminate TCPDUMP background process
                #
                term_resp_tcpdump_lif_erspan = \
                api.Trigger_TerminateAllCommands(resp_tcpdump_lif_erspan)
                tc.resp_tcpdump_lif_erspan = \
                api.Trigger_AggregateCommandsResponse(\
                resp_tcpdump_lif_erspan, term_resp_tcpdump_lif_erspan)

                if tc.collection == 'distinct':
                    term_resp_tcpdump_flow_erspan = \
                    api.Trigger_TerminateAllCommands(resp_tcpdump_flow_erspan)
                    tc.resp_tcpdump_flow_erspan = \
                    api.Trigger_AggregateCommandsResponse(\
                    resp_tcpdump_flow_erspan, term_resp_tcpdump_flow_erspan)

                term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\
                                            resp_tcpdump_flowmon)
                tc.resp_tcpdump_flowmon = \
                api.Trigger_AggregateCommandsResponse(resp_tcpdump_flowmon,
                                                      term_resp_tcpdump_flowmon)

                # Delete the objects
                eutils.deGenerateLifInterfaceConfig(tc, tc.interface_objects,
                                                    tc.lif_collector_objects)
                agent_api.UpdateConfigObjects(tc.interface_objects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newMirrorObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newFlowMonObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])

                #
                # Make sure that Mirror-config has been removed
                #
                tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

                #
                # Validate ERSPAN packets reception
                #
                tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\
                                               (tc.icmp_count << 1)

                if tc.iterators.direction != 'both':
                    tc.tcp_erspan_pkts_expected >>= 1
                    tc.udp_erspan_pkts_expected >>= 1
                    tc.icmp_erspan_pkts_expected >>= 1

                if tc.dupcheck == 'disable':
                    tc.tcp_erspan_pkts_expected = \
                    (tc.tcp_erspan_pkts_expected+1) << 1
                    tc.udp_erspan_pkts_expected <<= 1
                    tc.icmp_erspan_pkts_expected <<= 1

                #
                # Adjust Expected-pkt-counts taking into account Flow-ERSPAN
                # Config's
                #
                if tc.collection == 'unified':
                    if (tc.protocol == 'tcp' or tc.iterators.proto == 'mixed')\
                        and tc.iterators.direction != 'both':
                        tc.tcp_erspan_pkts_expected <<= 1
                    if (tc.protocol == 'udp' or tc.iterators.proto == 'mixed')\
                        and tc.iterators.direction != 'both':
                        tc.udp_erspan_pkts_expected <<= 1
                    if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed':
                        if tc.iterators.direction != 'both':
                            tc.icmp_erspan_pkts_expected <<= 1
                        #if tc.iterators.direction != 'egress':
                        #    tc.icmp_erspan_pkts_expected += 1

                protocol = tc.protocol
                tc.protocol = 'all'
                tc.feature = 'lif-erspan'
                tc.resp_tcpdump_erspan = tc.resp_tcpdump_lif_erspan
                res_1 = eutils.validateErspanPackets(tc, tc.lif_collector,
                                                     tc.lif_collector_idx)

                if tc.collection == 'distinct':
                    tc.tcp_erspan_pkts_expected = 0
                    tc.udp_erspan_pkts_expected = 0
                    tc.icmp_erspan_pkts_expected = 0
                    tc.protocol = protocol
                    if tc.protocol == 'tcp' or tc.iterators.proto == 'mixed':
                        tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                    if tc.protocol == 'udp' or tc.iterators.proto == 'mixed':
                        tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                    if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed':
                        tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\
                                                       (tc.icmp_count << 1)

                    tc.protocol = 'all'
                    tc.feature = 'flow-erspan'
                    tc.resp_tcpdump_erspan = tc.resp_tcpdump_flow_erspan
                    res_f = eutils.validateErspanPackets(
                        tc, tc.flow_collector, tc.flow_collector_idx)
                    if res_f == api.types.status.FAILURE:
                        result = api.types.status.FAILURE

                #
                # Validate IPFIX packets reception
                #
                tc.feature = 'flowmon'
                res_2 = eutils.validateIpFixPackets(tc)
                tc.protocol = protocol

                #
                # Validate Config-cleanup
                #
                res_3 = eutils.validateConfigCleanup(tc)

                if res_1 == api.types.status.FAILURE or\
                   res_2 == api.types.status.FAILURE or\
                   res_3 == api.types.status.FAILURE:
                    result = api.types.status.FAILURE

                if result == api.types.status.FAILURE:
                    break

                ret_count += 1

            flowmon_policy_idx += 1
            break

        if result == api.types.status.FAILURE:
            break

        count += ret_count

    tc.SetTestCount(count)
    return result
Example #19
0
def Trigger(tc):
    tc.cmd_cookies = []

    pairs = api.GetRemoteWorkloadPairs()
    w1 = pairs[0][0]
    w2 = pairs[0][1]

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info("naples-naples unsupported currently for tcp-proxy")
        return api.types.status.DISABLED

    store_proxy_objects = netagent_cfg_api.QueryConfigs(kind='TCPProxyPolicy')
    if len(store_proxy_objects) == 0:
        api.Logger.error("No tcp proxy objects in store")
        return api.types.status.FAILURE

    ret = netagent_cfg_api.PushConfigObjects(store_proxy_objects,
                                             ignore_error=True)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to push tcp_proxy policies")
        return api.types.status.FAILURE

    get_config_objects = netagent_cfg_api.GetConfigObjects(store_proxy_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    tc.cmd_descr = "Client: %s(%s) <--> Server: %s(%s) on tcp proxy port %s pktsize %s" %\
                   (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address, tc.iterators.port, tc.iterators.pktsize)

    api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

    cmd_cookie = "Running iperf server on %s" % (w2.workload_name)
    api.Trigger_AddCommand(req,
                           w2.node_name,
                           w2.workload_name,
                           "iperf3 -s -p %s" % (tc.iterators.port),
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Running iperf client on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req, w1.node_name, w1.workload_name, "iperf3 -c %s -p %s -M %s" %
        (w2.ip_address, tc.iterators.port, tc.iterators.pktsize))
    tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Example #20
0
def Trigger(tc):
    tc.cmd_cookies = []
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []

    nodes = api.GetWorkloadNodeHostnames()
    push_node_0 = [nodes[0]]
    push_node_1 = [nodes[1]]

    encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt')
    decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt')
    policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy')

    # Configure IPsec on Node 1

    if api.IsNaplesNode(nodes[0]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-policy objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[0])
        w1 = workloads[0]

        req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp1 = api.Trigger(req1)
        term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    # Configure IPsec on Node 2

    if api.IsNaplesNode(nodes[1]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[1])
        w2 = workloads[0]

        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp2 = api.Trigger(req2)
        term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)

    workloads = api.GetWorkloads(nodes[0])
    w1 = workloads[0]
    workloads = api.GetWorkloads(nodes[1])
    w2 = workloads[0]
    bypass_test = 0

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info(
            "Both workloads are Naples, %s is nc client, %s is nc server, bypassing test"
            % (w1.node_name, w2.node_name))
        nc_client_wl = w1
        nc_server_wl = w2
        bypass_test = 1
    elif w1.IsNaples():
        api.Logger.info("%s is Naples and nc client, %s is nc server" %
                        (w1.node_name, w2.node_name))
        nc_client_wl = w1
        nc_server_wl = w2
    elif w2.IsNaples():
        api.Logger.info("%s is Naples and nc client, %s is nc server" %
                        (w2.node_name, w1.node_name))
        nc_client_wl = w2
        nc_server_wl = w1

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s port %s" %\
                   (nc_server_wl.workload_name, nc_server_wl.ip_address, nc_client_wl.workload_name, nc_client_wl.ip_address, tc.iterators.protocol, tc.iterators.port)

    api.Logger.info("Starting NC test over IPSec from %s" % (tc.cmd_descr))

    if bypass_test == 0:
        cmd_cookie = "Creating test file on %s" % (nc_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_client_wl.node_name, nc_client_wl.workload_name,
            "base64 /dev/urandom | head -1000 > ipsec_client.dat")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Setting MTU to smaller value on %s" % (
            nc_client_wl.workload_name)
        api.Trigger_AddCommand(req, nc_client_wl.node_name,
                               nc_client_wl.workload_name,
                               "ifconfig %s mtu 1048" % nc_client_wl.interface)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running nc server on %s" % (nc_server_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(req,
                                   nc_server_wl.node_name,
                                   nc_server_wl.workload_name,
                                   "nc -l %s > ipsec_server.dat" %
                                   (tc.iterators.port),
                                   background=True)
        else:
            api.Trigger_AddCommand(req,
                                   nc_server_wl.node_name,
                                   nc_server_wl.workload_name,
                                   "nc --udp -l %s > ipsec_server.dat" %
                                   (tc.iterators.port),
                                   background=True)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running nc client on %s" % (nc_client_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(
                req, nc_client_wl.node_name, nc_client_wl.workload_name,
                "nc %s %s < ipsec_client.dat" %
                (nc_server_wl.ip_address, tc.iterators.port))
        else:
            api.Trigger_AddCommand(
                req, nc_client_wl.node_name, nc_client_wl.workload_name,
                "nc --udp %s %s < ipsec_client.dat" %
                (nc_server_wl.ip_address, tc.iterators.port))
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "Creating dummy file on %s" % (nc_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_client_wl.node_name, nc_client_wl.workload_name,
            "rm -f ipsec_client.dat ; touch ipsec_client.dat")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Creating dummy file on %s" % (nc_server_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_server_wl.node_name, nc_server_wl.workload_name,
            "rm -f ipsec_server.dat ; touch ipsec_server.dat")
        tc.cmd_cookies.append(cmd_cookie)

    if nc_client_wl.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_client_wl.node_name)
        api.Trigger_AddNaplesCommand(
            req, nc_client_wl.node_name,
            "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_client_wl.node_name)
        api.Trigger_AddCommand(req, nc_client_wl.node_name,
                               nc_client_wl.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    if nc_server_wl.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_server_wl.node_name)
        api.Trigger_AddNaplesCommand(
            req, nc_server_wl.node_name,
            "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_server_wl.node_name)
        api.Trigger_AddCommand(req, nc_server_wl.node_name,
                               nc_server_wl.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    resp = api.CopyFromWorkload(nc_client_wl.node_name,
                                nc_client_wl.workload_name,
                                ['ipsec_client.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find ipsec_client.dat")
        return api.types.status.FAILURE
    resp = api.CopyFromWorkload(nc_server_wl.node_name,
                                nc_server_wl.workload_name,
                                ['ipsec_server.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find ipsec_server.dat")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
Example #21
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    tc.resp_tcpdump_erspan = None
    tc.resp_cleanup = None

    protoDir = api.GetTopologyDirectory() +\
               "/gen/telemetry/{}/{}".format('mirror', 'endpoint')
    api.Logger.info("Template Config files location: ", protoDir)

    policies = utils.GetTargetJsons('mirror', 'endpoint')
    for policy_json in policies:
        #
        # Get template-Mirror Config
        #
        api.Logger.info("Adding one config object for {}", format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            tc.error = True
            return api.types.status.FAILURE

        #
        # Modify template-Mirror Config to make sure that Naples-node
        # act as either source or destination
        #
        # Set up Collector in the remote node
        #
        if newObjects[0].kind == 'InterfaceMirrorSession':
            tc.ep_collector_objects = newObjects
            agent_api.RemoveConfigObjects(tc.ep_collector_objects)
        elif newObjects[0].kind == 'Endpoint':
            tc.endpoint_objects = newObjects
            agent_api.RemoveConfigObjects(tc.endpoint_objects)
            updateEndpointObjectTempl(tc, tc.endpoint_objects,
                                      tc.store_endpoint_objects[0])

    for i in range(0, len(policies)):
        #
        # Push Collector object
        #
        if i == 0:
            colObjects = tc.ep_collector_objects
            if tc.iterators.session == 'single':
                ret = eutils.generateEpCollectorConfig(tc, colObjects)
            else:
                ret = eutils.generateEpCollectorConfigForMultiMirrorSession(
                    tc, colObjects)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to identify Collector Workload")
                tc.error = True
                return api.types.status.FAILURE

            api.Logger.info("Pushing collector objects")
            ret = agent_api.PushConfigObjects(colObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to push collector objects")
                tc.error = True
                return api.types.status.FAILURE
            continue

        api.Logger.info("Generating Endpoint objects")
        cfg_api.PrintConfigsObjects(colObjects)
        #
        # Update Endpoint objects
        #
        epObjects = tc.endpoint_objects
        ret = eutils.generateEndpointConfig(tc, epObjects, colObjects)
        if ret != api.types.status.SUCCESS:
            agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                          [tc.naples.node_name],
                                          [tc.naples_device_name])
            api.Logger.error("Unable to identify Endpoints")
            tc.error = True
            return api.types.status.FAILURE

        api.Logger.info("Pushing Endpoint objects")
        ret = agent_api.UpdateConfigObjects(epObjects, [tc.naples.node_name],
                                            [tc.naples_device_name])
        if ret != api.types.status.SUCCESS:
            agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                          [tc.naples.node_name],
                                          [tc.naples_device_name])
            api.Logger.error("Unable to update endpoint objects")
            tc.error = True
            return api.types.status.FAILURE

        #
        # Establish Forwarding set up between Naples-peer and Collectors
        #
        eutils.establishForwardingSetup(tc)

        req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\
                             serial = True)
        for c in range(0, len(tc.ep_collector)):
            #
            # Set up TCPDUMP's on the collector
            #
            idx = tc.ep_collector_idx[c]
            if tc.ep_collector[c].IsNaples():
                ### TODO - run & revisit for windows case and fix any issues.
                if api.GetNodeOs(tc.naples.node_name) == "windows":
                    intfGuid = ionic_utils.winIntfGuid(
                        tc.ep_collector[c].node_name,
                        tc.ep_collector[c].interface)
                    intfVal = str(
                        ionic_utils.winTcpDumpIdx(tc.ep_collector[c].node_name,
                                                  intfGuid))
                    cmd = "sudo /mnt/c/Windows/System32/tcpdump.exe -c 1000 -XX -vv -i {} ip proto 47 and dst {} -U -w ep-mirror-{}.pcap"\
                          .format(intfVal, tc.collector_ip_address[idx], c)
                else:
                    intfVal = tc.ep_collector[c].interface
                    cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre and dst {}\
                           --immediate-mode -U -w ep-mirror-{}.pcap"\
                          .format(intfVal,
                              tc.collector_ip_address[idx], c)
            else:
                cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto gre\
                       and dst {} --immediate-mode -U -w ep-mirror-{}.pcap"\
                      .format(tc.ep_collector[c].interface,
                              tc.collector_ip_address[idx], c)
            eutils.add_command(req_tcpdump_erspan, tc.ep_collector[c], cmd,
                               True)

        resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan)
        for cmd in resp_tcpdump_erspan.commands:
            api.PrintCommandResults(cmd)

        #
        # Classic mode requires a delay to make sure that TCPDUMP background
        # process is fully up
        #
        if tc.classic_mode == True:
            time.sleep(2)

        #
        # Trigger packets for ERSPAN to take effect
        #
        tc.dest_port = '120'
        if api.GetNodeOs(tc.naples.node_name) == 'linux' or api.GetNodeOs(
                tc.naples.node_name) == 'windows':
            eutils.triggerTrafficInClassicModeLinux(tc)
        else:
            eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)

        #
        # Dump sessions/flows/P4-tables for debug purposes
        #
        eutils.showSessionAndP4TablesForDebug(tc, tc.ep_collector,
                                              tc.ep_collector_idx)

        #
        # Terminate TCPDUMP background process
        #
        term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\
                                   resp_tcpdump_erspan)
        tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\
                              resp_tcpdump_erspan, term_resp_tcpdump_erspan)
        if api.GetNodeOs(tc.naples.node_name) == "windows":
            req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
            cmd = api.WINDOWS_POWERSHELL_CMD + " Stop-Process -Name 'tcpdump' -Force"
            api.Trigger_AddCommand(req,
                                   tc.naples.node_name,
                                   tc.naples.workload_name,
                                   cmd,
                                   background=False)
            resp = api.Trigger(req)

        # Delete the objects
        eutils.deGenerateEndpointConfig(tc, tc.endpoint_objects,
                                        tc.ep_collector_objects)
        agent_api.UpdateConfigObjects(tc.endpoint_objects,
                                      [tc.naples.node_name],
                                      [tc.naples_device_name])

        agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                      [tc.naples.node_name],
                                      [tc.naples_device_name])

        #
        # Make sure that Mirror-config has been removed
        #
        tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

    return api.types.status.SUCCESS
Example #22
0
def Trigger(tc):
    tc.cmd_cookies = []
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []

    nodes = api.GetWorkloadNodeHostnames()
    push_node_0 = [nodes[0]]
    push_node_1 = [nodes[1]]

    encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt')
    decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt')
    policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy')

    # Configure IPsec on Node 1

    if api.IsNaplesNode(nodes[0]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-policy objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[0])
        w1 = workloads[0]

        req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy flush")

        for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list,
                                   tc.args.aead_list):
            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp1 = api.Trigger(req1)
        term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    # Configure IPsec on Node 2

    if api.IsNaplesNode(nodes[1]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[1])
        w2 = workloads[0]

        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy flush")

        for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list,
                                   tc.args.aead_list):
            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp2 = api.Trigger(req2)
        term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)

    workloads = api.GetWorkloads(nodes[0])
    w1 = workloads[0]
    workloads = api.GetWorkloads(nodes[1])
    w2 = workloads[0]
    bypass_test = 0

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info(
            "Both workloads are Naples, %s is iperf client, %s is iperf server, bypassing test"
            % (w1.node_name, w2.node_name))
        iperf_client_wl = w1
        iperf_server_wl = w2
        bypass_test = 1
    elif w1.IsNaples():
        api.Logger.info("%s is Naples and iperf client, %s is iperf server" %
                        (w1.node_name, w2.node_name))
        iperf_client_wl = w1
        iperf_server_wl = w2
    elif w2.IsNaples():
        api.Logger.info("%s is Naples and iperf client, %s is iperf server" %
                        (w2.node_name, w1.node_name))
        iperf_client_wl = w2
        iperf_server_wl = w1

    req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    cmd_cookie = "Set rcv socket buffer size on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req3, w1.node_name, w1.workload_name,
        "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'")
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Set rcv socket buffer size on %s" % (w2.workload_name)
    api.Trigger_AddCommand(
        req3, w2.node_name, w2.workload_name,
        "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'")
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Setting MTU to smaller value on %s" % (
        iperf_client_wl.workload_name)
    api.Trigger_AddCommand(req3, iperf_client_wl.node_name,
                           iperf_client_wl.workload_name,
                           "ifconfig %s mtu 1048" % iperf_client_wl.interface)
    tc.cmd_cookies.append(cmd_cookie)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s" %\
                   (iperf_server_wl.workload_name, iperf_server_wl.ip_address, iperf_client_wl.workload_name, iperf_client_wl.ip_address, tc.iterators.protocol)
    api.Logger.info("Starting Iperf test over IPSec from %s" % (tc.cmd_descr))

    if bypass_test == 0:
        for port in tc.args.ports_list:
            cmd_cookie = "Running iperf server on %s port %s" % (
                iperf_server_wl.workload_name, port)
            api.Trigger_AddCommand(req3,
                                   iperf_server_wl.node_name,
                                   iperf_server_wl.workload_name,
                                   "iperf -s -p %s" % (port),
                                   background=True)
            tc.cmd_cookies.append(cmd_cookie)

    req4 = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    if bypass_test == 0:
        cmd_cookie = "Brief Sleep"
        api.Trigger_AddCommand(req4, iperf_client_wl.node_name,
                               iperf_client_wl.workload_name, "sleep 1")
        tc.cmd_cookies.append(cmd_cookie)
        for port in tc.args.ports_list:
            cmd_cookie = "Running iperf client on %s port %s" % (
                iperf_client_wl.workload_name, port)
            if tc.iterators.protocol == "tcp":
                api.Trigger_AddCommand(
                    req4, iperf_client_wl.node_name,
                    iperf_client_wl.workload_name, "iperf -c %s -p %s -M %s" %
                    (iperf_server_wl.ip_address, port, tc.iterators.pktsize))
            else:
                api.Trigger_AddCommand(
                    req4, iperf_client_wl.node_name,
                    iperf_client_wl.workload_name,
                    "iperf --udp -c %s -p %s -M %s" %
                    (iperf_server_wl.ip_address, port, tc.iterators.pktsize))
            tc.cmd_cookies.append(cmd_cookie)

    req5 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    if w1.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(
            req5, w1.node_name, "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddCommand(req5, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(
            req5, w2.node_name, "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddCommand(req5, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp3 = api.Trigger(req3)
    trig_resp4 = api.Trigger(req4)
    trig_resp5 = api.Trigger(req5)

    term_resp3 = api.Trigger_TerminateAllCommands(trig_resp3)
    term_resp4 = api.Trigger_TerminateAllCommands(trig_resp4)
    term_resp5 = api.Trigger_TerminateAllCommands(trig_resp5)

    agg_resp4 = api.Trigger_AggregateCommandsResponse(trig_resp4, term_resp4)
    tc.resp = agg_resp4

    return api.types.status.SUCCESS
Example #23
0
def ConfigFlowmonSession(tc, num_exports, flowmon_spec_objects):
    result = api.types.status.SUCCESS

    iteration = 0
    tc.test_iterator_data = defaultdict(lambda: dict())

    coll_dst_port_list = {}
    for idx in range(num_exports):
        coll_dst_port_list[idx] = random.randint(100, 10000)

    for wl in tc.workload_pairs:
        w1 = wl[0]
        w2 = wl[1]

        peer_wl = w1
        local_wl = w2
        if api.IsNaplesNode(w1.node_name):
            peer_wl = w2
            local_wl = w1
        elif api.IsNaplesNode(w2.node_name) != True:
            #one has to be naples
            continue
        if utils.checkIfWorkloadsInSameSubnet(w1, w2) == False:
            #workloads has to be in same subnet to trigger ping
            continue

        #api.Logger.info("updated Match rule spec for W1: {} W2: {}, proto: {}, port: {}".format(
        #            w1.ip_address, w2.ip_address, tc.iterators.proto, tc.port))

        clonedObject = agent_api.CloneConfigObjects(flowmon_spec_objects)
        if len(clonedObject) == 0:
            api.Logger.info(
                "Failed to clone object for iteration: {}".format(iteration))
            result = api.types.status.FAILURE
            break

        utils.generateFlowmonSpecConfig(iteration, w1, w2, tc.iterators.proto,
                                        tc.port, clonedObject)

        (coll_wl_list, coll_ip_list,
         export_cfg_list) = utils.updateFlowmonExportConfig(
             tc.workloads, num_exports, local_wl, wl_sec_ip_info,
             coll_dst_port_list, clonedObject)

        if len(coll_ip_list) == 0:
            api.Logger.error("Unable to find a valid collector")
            result = api.types.status.FAILURE
            break

        result = agent_api.PushConfigObjects(clonedObject,
                                             [local_wl.node_name])

        #agent_api.PrintConfigObjects(clonedObject)
        if result != api.types.status.SUCCESS:
            api.Logger.error("Unable to push flowmon objects")
            result = api.types.status.FAILURE
            break

        api.Logger.info("collect_wl len: {} export_cfg_list len: {} ".format(
            len(coll_wl_list), len(export_cfg_list)))

        tc.test_iterator_data[iteration] = defaultdict(lambda: dict())
        tc.test_iterator_data[iteration]['local_wl'] = local_wl
        tc.test_iterator_data[iteration]['peer_wl'] = peer_wl
        tc.test_iterator_data[iteration]['coll_wl_list'] = coll_wl_list
        tc.test_iterator_data[iteration]['coll_ip_list'] = coll_ip_list
        tc.test_iterator_data[iteration]['export_cfg_list'] = export_cfg_list
        tc.test_iterator_data[iteration]['del_obj'] = clonedObject

        iteration = iteration + 1
        if iteration == tc.iterators.num_flowmon_sessions:
            break

    return (result)
Example #24
0
def Trigger(tc):
    #if tc.skip: return api.types.status.SUCCESS

    policies = utils.GetTargetJsons('flowmon', tc.iterators.proto)
    result = api.types.status.SUCCESS

    count = 0
    ret_count = 0
    export_cfg = []
    collector_wl = []
    for policy_json in policies:
        export_cfg.clear()
        collector_wl.clear()
        #pdb.set_trace()
        verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)
        api.Logger.info("Using policy_json = {}".format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            return api.types.status.FAILURE
        ret = agent_api.PushConfigObjects(newObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Unable to push flowmon objects")
            return api.types.status.FAILURE

        # Get collector to find the workload
        for obj in newObjects:
            for obj_export_cfg in obj.spec.exports:
                export_cfg.append(obj_export_cfg)
                api.Logger.info("export-dest: {} proto: {} port: {}".format(
                    obj_export_cfg.destination,
                    obj_export_cfg.proto_port.protocol,
                    obj_export_cfg.proto_port.port))

        for coll_dst in export_cfg:
            for wl in tc.workloads:
                if (wl.ip_address == coll_dst.destination) or (
                        coll_dst.destination
                        in wl_sec_ip_info[wl.workload_name]):
                    collector_wl.append(wl)

        api.Logger.info("collect_dest len: {} ".format(len(export_cfg)))
        api.Logger.info("collect_wl len: {} ".format(len(collector_wl)))
        collector_info = utils.GetFlowmonCollectorsInfo(
            collector_wl, export_cfg)
        utils.DumpFlowmonSessions()
        ret = utils.RunAll(tc, verif_json, 'flowmon', collector_info,
                           is_wl_type_bm)
        result = ret['res']
        ret_count = ret['count']
        count = count + ret_count
        if result != api.types.status.SUCCESS:
            api.Logger.error("Failed in Traffic validation")
            utils.DumpFlowmonSessions()

        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.info(
            "policy_json = {}, count = {}, total_count = {}".format(
                policy_json, ret_count, count))
        if result != api.types.status.SUCCESS:
            api.Logger.info(
                "policy_json = {}, Encountered FAILURE, stopping".format(
                    policy_json))
            break
    tc.SetTestCount(count)
    export_cfg.clear()
    collector_wl.clear()
    return result
Example #25
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    protoDir = api.GetTopologyDirectory() +\
               "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir)

    result = api.types.status.SUCCESS

    count = 0
    policies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    for policy_json in policies:
        #
        # Get template-Mirror Config
        #
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            tc.error = True
            return api.types.status.FAILURE
        agent_api.RemoveConfigObjects(newObjects)

        #
        # Ignore Multi-collector template config's, since Expanded-Telemetry
        # testbundle dynamically creates such config's
        #
        if len(newObjects[0].spec.collectors) > 1:
            continue

        #
        # Modify template-Mirror Config to make sure that Naples-node
        # act as either source or destination
        #
        # Set up Collector in the remote node
        #
        eutils.generateMirrorConfig(tc, policy_json, newObjects)

        ret_count = 0
        for i in range(0, len(tc.mirror_verif)):
            #
            # If Execution-Optimization is enabled, no need to run the test
            # for the same protocol more than once
            #
            if i > 0 and tc.mirror_verif[i]['protocol'] ==\
                         tc.mirror_verif[i-1]['protocol']:
                continue

            #
            # Flow-ERSPAN for TCP-traffic is not tested (yet) in
            # Classic-mode until applicable pkt-trigger tools are identified
            #
            if tc.classic_mode == True and\
               tc.mirror_verif[i]['protocol'] == 'tcp':
                continue

            #
            # Push Mirror Config to Naples
            #
            ret = agent_api.PushConfigObjects(newObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to push mirror objects")
                tc.error = True
                return api.types.status.FAILURE

            #
            # Establish Forwarding set up between Naples-peer and Collectors
            #
            eutils.establishForwardingSetup(tc)

            req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\
                                 serial = True)
            for c in range(0, len(tc.flow_collector)):
                #
                # Set up TCPDUMP's on the collector
                #
                idx = tc.flow_collector_idx[c]
                if tc.flow_collector[c].IsNaples():
                    cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto gre and\
                           dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\
                          .format(tc.flow_collector[c].interface,
                                  tc.collector_ip_address[idx], c)
                else:
                    cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip proto gre and\
                           dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\
                          .format(tc.flow_collector[c].interface,
                                  tc.collector_ip_address[idx], c)
                eutils.add_command(req_tcpdump_erspan, tc.flow_collector[c],
                                   cmd, True)

            resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan)
            for cmd in resp_tcpdump_erspan.commands:
                api.PrintCommandResults(cmd)

            #
            # Classic mode requires a delay to make sure that TCPDUMP background
            # process is fully up
            #
            if tc.classic_mode == True:
                time.sleep(2)

            #
            # Trigger packets for ERSPAN to take effect
            #
            tc.protocol = tc.mirror_verif[i]['protocol']
            tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port'])
            if api.GetNodeOs(tc.naples.node_name) == 'linux':
                eutils.triggerTrafficInClassicModeLinux(tc)
            else:
                eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)

            #
            # Dump sessions/flows/P4-tables for debug purposes
            #
            eutils.showSessionAndP4TablesForDebug(tc, tc.flow_collector,
                                                  tc.flow_collector_idx)

            #
            # Terminate TCPDUMP background process
            #
            term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\
                                       resp_tcpdump_erspan)
            tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\
                              resp_tcpdump_erspan, term_resp_tcpdump_erspan)

            # Delete the objects
            agent_api.DeleteConfigObjects(newObjects, [tc.naples.node_name],
                                          [tc.naples_device_name])

            #
            # Make sure that Mirror-config has been removed
            #
            tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

            #
            # Validate ERSPAN packets reception
            #
            tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
            tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
            tc.icmp_erspan_pkts_expected = (tc.icmp_count << 1)

            if tc.protocol == 'udp' and tc.iterators.proto == 'mixed':
                tc.protocol = 'udp-mixed'
                tc.icmp_erspan_pkts_expected = tc.udp_erspan_pkts_expected

            res_1 = eutils.validateErspanPackets(tc, tc.flow_collector,
                                                 tc.flow_collector_idx)

            #
            # Validate Config-cleanup
            #
            res_2 = eutils.validateConfigCleanup(tc)

            if res_1 == api.types.status.FAILURE or\
               res_2 == api.types.status.FAILURE:
                result = api.types.status.FAILURE

            if result == api.types.status.FAILURE:
                break

            ret_count += 1

        if result == api.types.status.FAILURE:
            break

        count += ret_count

    tc.SetTestCount(count)

    return result
Example #26
0
def Trigger(tc):

    #Query will get the reference of objects on store
    store_profile_objects = netagent_cfg_api.QueryConfigs(kind='SecurityProfile')
    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects")
        return api.types.status.FAILURE

    if len(get_config_objects) != len(store_profile_objects):
        api.Logger.error("Config mismatch, Get Objects : %d, Config store Objects : %d"
        % (len(get_config_objects), len(store_profile_objects)))
        return api.types.status.FAILURE

    #Now do an update of the objects
    for object in store_profile_objects:
        object.spec.timeouts.tcp_connection_setup = "1200s"
        object.spec.timeouts.tcp_half_close = "1400s"

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(new_get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects after update")
        return api.types.status.FAILURE

    #Check whether value has changed
    for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects):
        if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup == store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup ==  get_object.spec.timeouts.tcp_connection_setup:
            api.Logger.error("Update failed")
            return api.types.status.FAILURE


    #Now do  restore value to old one
    for object,old_object in zip(store_profile_objects, get_config_objects):
        object.spec.timeouts.tcp_connection_setup = old_object.spec.timeouts.tcp_connection_setup
        object.spec.timeouts.tcp_half_close = old_object.spec.timeouts.tcp_half_close


    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(new_get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects after update")
        return api.types.status.FAILURE

    for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects):
        if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup !=  get_object.spec.timeouts.tcp_connection_setup:
            api.Logger.error("Second Update failed")
            return api.types.status.FAILURE

    #Now lets do a delete

    netagent_cfg_api.DeleteConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) != 0:
        api.Logger.error("Delete of objects failed")
        return api.types.status.FAILURE

    netagent_cfg_api.PushConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects")
        return api.types.status.FAILURE


    newObjects = netagent_cfg_api.AddOneConfig(api.GetTopologyDirectory() + "/test_cfg/test_security_profile.json")
    if len(newObjects) == 0:
        api.Logger.error("Adding new objects to store failed")
        return api.types.status.FAILURE

    nodes = api.GetNaplesHostnames()
    push_nodes = [nodes[0]]
    ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names = push_nodes)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to fetch security profile objects to node %s" % nodes[0])
        return api.types.status.FAILURE

    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    #Delete the objects that is pushed
    netagent_cfg_api.DeleteConfigObjects(get_config_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes)
    if len(get_config_objects) != 0:
        api.Logger.error("Delete of new objects failed")
        return api.types.status.FAILURE

    #Remoe competely those objects from the store too.
    ret = netagent_cfg_api.RemoveConfigObjects(newObjects)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Remove of new objects failed")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Example #27
0
def Trigger(tc):
    #if tc.skip: return api.types.status.SUCCESS

    policies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    result = api.types.status.SUCCESS
    
    count = 0
    ret_count = 0
    collector_dest = []
    collector_wl = []
    collector_type = []
    for policy_json in policies:
        collector_dest.clear()
        collector_wl.clear()
        collector_type.clear()

        verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)
        api.Logger.info("Using policy_json = {}".format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len (newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            return api.types.status.FAILURE
        ret = agent_api.PushConfigObjects(newObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Unable to push mirror objects")
            return api.types.status.FAILURE
        utils.DumpMirrorSessions()

        # Get collector to find the workload
        for obj in newObjects:
            for obj_collector in obj.spec.collectors:
                coll_dst = obj_collector.export_config.destination
                coll_type = obj_collector.type
                collector_dest.append(coll_dst)
                collector_type.append(coll_type)
                api.Logger.info(f"export-dest: {coll_dst}, erspan-type: {coll_type}")

        for coll_dst in collector_dest:
            for wl in tc.workloads:
                if (wl.ip_address == coll_dst) or (coll_dst in wl_sec_ip_info[wl.workload_name]):
                    collector_wl.append(wl)

        api.Logger.info("collect_dest len: {} collect_wl len: {}".format(len(collector_dest), len(collector_wl)))
        collector_info = utils.GetMirrorCollectorsInfo(collector_wl, collector_dest, collector_type)
        ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm)
        result = ret['res']
        ret_count = ret['count']
        count = count + ret_count

        if result != api.types.status.SUCCESS:
            api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json))
            # Delete the objects
            agent_api.DeleteConfigObjects(newObjects)
            agent_api.RemoveConfigObjects(newObjects)
            break

        # Update collector
        newObjects = agent_api.QueryConfigs(kind='MirrorSession')
        # mirror config update to local collector is applicable only for ESX topology
        if is_wl_type_bm is False:
            for obj in newObjects:
                if obj.spec.collectors[0].type == utils.ERSPAN_TYPE_2:
                    obj.spec.collectors[0].type = utils.ERSPAN_TYPE_3
                    collector_info[0]['type'] = utils.ERSPAN_TYPE_3
                else:
                    obj.spec.collectors[0].type = utils.ERSPAN_TYPE_2
                    collector_info[0]['type'] = utils.ERSPAN_TYPE_2
                break

            # Now push the update as we modified
            agent_api.UpdateConfigObjects(newObjects)
            utils.DumpMirrorSessions()

            # Rerun the tests
            ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm)
            result = ret['res']
            ret_count = ret['count']
            count = count + ret_count

        # Delete the objects
        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.info("policy_json = {}, count = {}, total_count = {}".format(policy_json, ret_count, count))
        if result != api.types.status.SUCCESS:
            api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json))
            break
    tc.SetTestCount(count)
    collector_dest.clear()
    collector_wl.clear()
    return result
Example #28
0
def Trigger(tc):
    tc.cmd_cookies = []

    pairs = api.GetRemoteWorkloadPairs()
    w1 = pairs[0][0]
    w2 = pairs[0][1]

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info("naples-naples unsupported currently for tcp-proxy")
        return api.types.status.DISABLED

    store_proxy_objects = netagent_cfg_api.QueryConfigs(kind='TCPProxyPolicy')
    if len(store_proxy_objects) == 0:
        api.Logger.error("No tcp proxy objects in store")
        return api.types.status.FAILURE

    ret = netagent_cfg_api.PushConfigObjects(store_proxy_objects,
                                             ignore_error=True)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to push tcp_proxy policies")
        return api.types.status.FAILURE

    get_config_objects = netagent_cfg_api.GetConfigObjects(store_proxy_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running nc" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running nc" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    tc.cmd_descr = "Client: %s(%s) <--> Server: %s(%s) on tcp proxy port %s" %\
                   (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address, tc.iterators.port)

    api.Logger.info("Starting netcat test from %s" % (tc.cmd_descr))

    cmd_cookie = "Creating test file on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req, w1.node_name, w1.workload_name,
        "base64 /dev/urandom | head -1000 > tcp_proxy_client.dat")
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Running nc server on %s" % (w2.workload_name)
    api.Trigger_AddCommand(req,
                           w2.node_name,
                           w2.workload_name,
                           "nc -l %s > tcp_proxy_server.dat" %
                           (tc.iterators.port),
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Running nc client on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req, w1.node_name, w1.workload_name,
        "nc %s %s < tcp_proxy_client.dat" % (w2.ip_address, tc.iterators.port))
    tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running nc" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running nc" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    resp = api.CopyFromWorkload(w1.node_name, w1.workload_name,
                                ['tcp_proxy_client.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find tcp_proxy_client.dat")
        return api.types.status.FAILURE
    resp = api.CopyFromWorkload(w2.node_name, w2.workload_name,
                                ['tcp_proxy_server.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find tcp_proxy_server.dat")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS