Exemplo n.º 1
0
def Trigger(tc):
    for i in range(0, tc.iters):
        api.Logger.info(" ################### ITER %s  ###################" %
                        (i + 1))
        ret = agent_api.DeleteConfigObjects(tc.epObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to delete network object" %
                             (i + 1))
            break
        ret = agent_api.DeleteConfigObjects(tc.nwObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to delete network object" %
                             (i + 1))
            break

        ret = agent_api.PushConfigObjects(tc.nwObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to push network object" %
                             (i + 1))
            break
        ret = agent_api.PushConfigObjects(tc.epObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Iter: %s Failed to push network object" %
                             (i + 1))
            break

    tc.ret = ret
    return ret
Exemplo n.º 2
0
def configurationChangeEvent(tc):
    if tc.cancel:
        api.Logger.info("Canceling configurationChangeEvent...")
        sys.exit(0)

    api.Logger.info("Running configurationChangeEvent...")
    for proto in ["tcp", "udp"]:
        policies = utils.GetTargetJsons(proto)
        for policy_json in policies:
            # Delete allow-all policy
            agent_api.DeleteSgPolicies()
            api.Logger.info("Pushing Security policy: %s " % (policy_json))
            newObjects = agent_api.AddOneConfig(policy_json)
            ret = agent_api.PushConfigObjects(newObjects)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push policies for %s" %
                                 policy_json)
            if agent_api.DeleteConfigObjects(newObjects):
                api.Logger.error("Failed to delete config object for %s" %
                                 policy_json)
            if agent_api.RemoveConfigObjects(newObjects):
                api.Logger.error("Failed to remove config object for %s" %
                                 policy_json)
            # Restore allow-all policy
            agent_api.PushConfigObjects(
                agent_api.QueryConfigs(kind='NetworkSecurityPolicy'))

            if tc.cancel:
                return api.types.status.SUCCESS

    for proto in ['tcp', 'udp', 'icmp', 'mixed', 'scale']:
        mirrorPolicies = GetTargetJsons('mirror', proto)
        flowmonPolicies = GetTargetJsons('flowmon', proto)
        for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies):
            mpObjs = agent_api.AddOneConfig(mp_json)
            fpObjs = agent_api.AddOneConfig(fp_json)
            ret = agent_api.PushConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push the telemetry objects")
            ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to delete the telemetry objects")
            ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to remove the telemetry objects")

            if tc.cancel:
                return api.types.status.SUCCESS

    return api.types.status.SUCCESS
Exemplo n.º 3
0
def create_ep_info(tc, wl, dest_node, migr_state, src_node):
    # get a naples handle to move to
    ep_filter = "meta.name=" + wl.workload_name + ";"
    if not hasattr(tc, 'dsc_conn_type'):
       api.Logger.info(" seeing dsc_conn_type to oob")
       tc.dsc_conn_type = 'oob'  
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert(len(objects) == 1)
    object                          = copy.deepcopy(objects[0])
    # delete endpoint being moved on new host, TEMP
    agent_api.DeleteConfigObjects([object], [dest_node], ignore_error=True)

    # sleep to let delete cleanup all sessions/handles
    time.sleep(1)

    object.spec.node_uuid           = tc.uuidMap[dest_node]
    object.spec.migration           = migr_state 
    if (api.IsNaplesNode(src_node)):
        object.status.node_uuid         = tc.uuidMap[src_node]
        if (tc.dsc_conn_type == "oob"):
            object.spec.homing_host_address = api.GetNicMgmtIP(src_node)
        else:
            object.spec.homing_host_address = api.GetBondIp(src_node)
    else:
        object.status.node_uuid         = "0011.2233.4455"  # TEMP
        object.spec.homing_host_address = "169.169.169.169" # TEMP
    # this triggers endpoint on new host(naples) to setup flows
    agent_api.PushConfigObjects([object], [dest_node], ignore_error=True)
Exemplo n.º 4
0
def Teardown(tc):
    api.Logger.info("Tearing down ...")
    if tc.newObjects:
        agent_api.DeleteConfigObjects(tc.newObjects)
        agent_api.RemoveConfigObjects(tc.newObjects)

    return api.types.status.SUCCESS
Exemplo n.º 5
0
def Trigger(tc):
    nwsec_objs = agent_api.QueryConfigs(kind="NetworkSecurityPolicy")
    agent_api.DeleteConfigObjects(nwsec_objs)
    agent_api.RemoveConfigObjects(nwsec_objs)

    nwsec_json = api.GetTopologyDirectory() + "/" + "sgpolicy.json"
    nwsec_objs = agent_api.AddOneConfig(nwsec_json)
    ret = agent_api.PushConfigObjects(nwsec_objs)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to push nwsec policy")
        return ret
    return api.types.status.SUCCESS
Exemplo n.º 6
0
def Teardown(tc):
    api.Logger.info("Tearing down ...")

    # Remove secondary IP address
    utils.RemoveSecondaryAddress(tc)

    # Delete the objects
    if tc.newObjects:
        agent_api.DeleteConfigObjects(tc.newObjects)
        agent_api.RemoveConfigObjects(tc.newObjects)

    return api.types.status.SUCCESS
Exemplo n.º 7
0
def Trigger(tc):
    newObjects = agent_api.QueryConfigs(kind='NetworkSecurityPolicy')
    ret = api.types.status.SUCCESS

    for i in range(0, 100):
        ret = agent_api.PushConfigObjects(newObjects)
        agent_api.DeleteConfigObjects(newObjects)

        if ret != api.types.status.SUCCESS:
            break

    agent_api.RemoveConfigObjects(newObjects)
    tc.ret = ret
    return ret
Exemplo n.º 8
0
def __delete_endpoint_info(tc):
    api.Logger.debug(
        "Deleting endpoint info from CurrentHome of moved workloads")
    for wload, host in tc.vmotion_cntxt.CurrentHome.items():
        if not api.IsNaplesNode(host):
            continue

        api.Logger.debug("Deleting ep-info at %s for wload: %s" %
                         (host, wload.workload_name))
        ep_filter = "meta.name=" + wload.workload_name + ";"
        objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
        assert (len(objects) == 1)
        agent_api.DeleteConfigObjects(objects[:1], [host], ignore_error=True)
    return
Exemplo n.º 9
0
def Trigger(tc):
    result = api.types.status.SUCCESS
    mirrorPolicies = utils.GetTargetJsons('mirror', "scale")
    flowmonPolicies = utils.GetTargetJsons('flowmon', "scale")
    #colPolicies = utils.GetTargetJsons('mirror', "collector")
    iters = getattr(tc.args, "iters", 10)
    iters = 1
    mpObjs = fpObjs = []
    for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies):
        for i in range(iters):
            #
            # Push Mirror Session and Flow Export objects
            #
            mpObjs = agent_api.AddOneConfig(mp_json)
            fpObjs = agent_api.AddOneConfig(fp_json)
            ret = agent_api.PushConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push the telemetry objects")
                return api.types.status.FAILURE

            #
            # Update Mirror Session and Flow Export objects
            #
            mpObjs = UpdateMirrorSessionObjects(mpObjs)
            fpObjs = UpdateFlowMonitorObjects(fpObjs)
            ret = agent_api.UpdateConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to update the telemetry objects")
                return api.types.status.FAILURE

            #
            # Delete Mirror Session and Flow Export objects
            #
            ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to delete the telemetry objects")
                return api.types.status.FAILURE
            ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to remove the telemetry objects")
                return api.types.status.FAILURE

    return result
Exemplo n.º 10
0
def create_ep_info(tc, wl, new_node, migr_state, old_node):
    # get a naples handle to move to
    ep_filter = "meta.name=" + wl.workload_name + ";"
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert (len(objects) == 1)
    object = copy.deepcopy(objects[0])
    # delete endpoint being moved on new host, TEMP
    agent_api.DeleteConfigObjects([object], [new_node], ignore_error=True)

    object.spec.node_uuid = tc.uuidMap[new_node]
    object.spec.migration = migr_state
    if (api.IsNaplesNode(old_node)):
        object.status.node_uuid = tc.uuidMap[old_node]
        object.spec.homing_host_address = api.GetNicMgmtIP(old_node)
    else:
        object.status.node_uuid = "0011.2233.4455"  # TEMP
        object.spec.homing_host_address = "169.169.169.169"  # TEMP
    # this triggers endpoint on new host(naples) to setup flows
    agent_api.PushConfigObjects([object], [new_node], ignore_error=True)
Exemplo n.º 11
0
def Trigger(tc):
    policies = utils.GetTargetJsons("netagent-expansion")
    sg_json_obj = None

    for policy_json in policies:
        sg_json_obj = utils.ReadJson(policy_json)
        newObjects = agent_api.AddOneConfig(policy_json)
        start = time.time()
        tc.ret = agent_api.PushConfigObjects(newObjects)
        end = time.time()
        diff = end - start

        if diff > 120:
            api.Logger.info("Time taken to push configs is {} seconds.")
            tc.ret = api.types.status.FAILURE

        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)

        if tc.ret == api.types.status.FAILURE:
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemplo n.º 12
0
def __create_endpoint_info(tc):
    time.sleep(
        5)  # trying to run vmotion and config update concurrently (hack)

    for dest_host, workloads in tc.vmotion_cntxt.MoveRequest.items():
        api.Logger.debug(
            "Creating endpoint info at %s for workloads being moved" %
            dest_host)
        if not api.IsNaplesNode(dest_host):
            continue
        for wl in workloads:
            api.Logger.debug("Updating ep-info for %s" % wl.workload_name)
            ep_filter = "meta.name=" + wl.workload_name + ";"
            objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
            assert (len(objects) == 1)
            obj = copy.deepcopy(objects[0])
            # delete endpoint being moved on new host, TEMP
            resp = agent_api.DeleteConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error("DeleteConfigObjects failed for %s for %s" %
                                 (wl.workload_name, dest_host))

            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            obj.spec.migration = "START"
            current_host = tc.vmotion_cntxt.CurrentHome[wl]
            if (api.IsNaplesNode(current_host)):
                obj.status.node_uuid = tc.vmotion_cntxt.UUIDMap[current_host]
                obj.spec.homing_host_address = api.GetNicMgmtIP(current_host)
            else:
                obj.status.node_uuid = "0011.2233.4455"  # TEMP
                obj.spec.homing_host_address = "169.169.169.169"  # TEMP

            # this triggers endpoint on new host(naples) to setup flows
            agent_api.PushConfigObjects([obj], [dest_host], ignore_error=True)
    api.Logger.debug("Completed endpoint info creation at NewHome")
    return
Exemplo n.º 13
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    protoDir = api.GetTopologyDirectory() +\
               "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir)

    result = api.types.status.SUCCESS

    count = 0
    policies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    for policy_json in policies:
        #
        # Get template-Mirror Config
        #
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            tc.error = True
            return api.types.status.FAILURE
        agent_api.RemoveConfigObjects(newObjects)

        #
        # Ignore Multi-collector template config's, since Expanded-Telemetry
        # testbundle dynamically creates such config's
        #
        if len(newObjects[0].spec.collectors) > 1:
            continue

        #
        # Modify template-Mirror Config to make sure that Naples-node
        # act as either source or destination
        #
        # Set up Collector in the remote node
        #
        eutils.generateMirrorConfig(tc, policy_json, newObjects)

        ret_count = 0
        for i in range(0, len(tc.mirror_verif)):
            #
            # If Execution-Optimization is enabled, no need to run the test
            # for the same protocol more than once
            #
            if i > 0 and tc.mirror_verif[i]['protocol'] ==\
                         tc.mirror_verif[i-1]['protocol']:
                continue

            #
            # Flow-ERSPAN for TCP-traffic is not tested (yet) in
            # Classic-mode until applicable pkt-trigger tools are identified
            #
            if tc.classic_mode == True and\
               tc.mirror_verif[i]['protocol'] == 'tcp':
                continue

            #
            # Push Mirror Config to Naples
            #
            ret = agent_api.PushConfigObjects(newObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to push mirror objects")
                tc.error = True
                return api.types.status.FAILURE

            #
            # Establish Forwarding set up between Naples-peer and Collectors
            #
            eutils.establishForwardingSetup(tc)

            req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\
                                 serial = True)
            for c in range(0, len(tc.flow_collector)):
                #
                # Set up TCPDUMP's on the collector
                #
                idx = tc.flow_collector_idx[c]
                if tc.flow_collector[c].IsNaples():
                    cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto gre and\
                           dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\
                          .format(tc.flow_collector[c].interface,
                                  tc.collector_ip_address[idx], c)
                else:
                    cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip proto gre and\
                           dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\
                          .format(tc.flow_collector[c].interface,
                                  tc.collector_ip_address[idx], c)
                eutils.add_command(req_tcpdump_erspan, tc.flow_collector[c],
                                   cmd, True)

            resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan)
            for cmd in resp_tcpdump_erspan.commands:
                api.PrintCommandResults(cmd)

            #
            # Classic mode requires a delay to make sure that TCPDUMP background
            # process is fully up
            #
            if tc.classic_mode == True:
                time.sleep(2)

            #
            # Trigger packets for ERSPAN to take effect
            #
            tc.protocol = tc.mirror_verif[i]['protocol']
            tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port'])
            if api.GetNodeOs(tc.naples.node_name) == 'linux':
                eutils.triggerTrafficInClassicModeLinux(tc)
            else:
                eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)

            #
            # Dump sessions/flows/P4-tables for debug purposes
            #
            eutils.showSessionAndP4TablesForDebug(tc, tc.flow_collector,
                                                  tc.flow_collector_idx)

            #
            # Terminate TCPDUMP background process
            #
            term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\
                                       resp_tcpdump_erspan)
            tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\
                              resp_tcpdump_erspan, term_resp_tcpdump_erspan)

            # Delete the objects
            agent_api.DeleteConfigObjects(newObjects, [tc.naples.node_name],
                                          [tc.naples_device_name])

            #
            # Make sure that Mirror-config has been removed
            #
            tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

            #
            # Validate ERSPAN packets reception
            #
            tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
            tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
            tc.icmp_erspan_pkts_expected = (tc.icmp_count << 1)

            if tc.protocol == 'udp' and tc.iterators.proto == 'mixed':
                tc.protocol = 'udp-mixed'
                tc.icmp_erspan_pkts_expected = tc.udp_erspan_pkts_expected

            res_1 = eutils.validateErspanPackets(tc, tc.flow_collector,
                                                 tc.flow_collector_idx)

            #
            # Validate Config-cleanup
            #
            res_2 = eutils.validateConfigCleanup(tc)

            if res_1 == api.types.status.FAILURE or\
               res_2 == api.types.status.FAILURE:
                result = api.types.status.FAILURE

            if result == api.types.status.FAILURE:
                break

            ret_count += 1

        if result == api.types.status.FAILURE:
            break

        count += ret_count

    tc.SetTestCount(count)

    return result
Exemplo n.º 14
0
def Trigger(tc):
    #if tc.skip: return api.types.status.SUCCESS

    result = api.types.status.SUCCESS
    count = 0

    policies = utils.GetTargetJsons('flowmon', 'crud')
    for policy_json in policies:
        api.Logger.info("Policy File: {}".format(policy_json))
        flowmon_spec_objects = agent_api.AddOneConfig(policy_json)
        if len(flowmon_spec_objects) == 0:
            api.Logger.info("Policy object len {}".format(
                len(flowmon_spec_objects)))
            continue
        verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)
        #create flowexport rules with 1 export cfg
        utils.generateFlowmonCollectorConfig(flowmon_spec_objects,
                                             num_exports_at_create)
        port = random.randint(100, 10000)
        tc.port = port

        result = ConfigFlowmonSession(tc, num_exports_at_create,
                                      flowmon_spec_objects)
        if result != api.types.status.SUCCESS:
            api.Logger.error("Failed in Flowmon session configuration")
            agent_api.RemoveConfigObjects(flowmon_spec_objects)
            break

        api.Logger.info(
            "Test for FlowMon with {} sessions {} collectors".format(
                tc.iterators.num_flowmon_sessions, num_exports_at_create))
        utils.DumpFlowmonSessions()
        ret = InjectTestTrafficAndValidateCapture(
            tc, tc.iterators.num_flowmon_sessions, num_exports_at_create)
        result = ret['res']
        ret_count = ret['count']
        count = count + ret_count

        if result != api.types.status.SUCCESS:
            api.Logger.error("Failed in Traffic validation")
            utils.DumpFlowmonSessions()
        elif tc.iterators.num_exports > num_exports_at_create:
            #update flowexport sessions with num_exports
            result = updateFlowmonCollectors(tc, tc.iterators.num_exports)
            if result != api.types.status.SUCCESS:
                api.Logger.info("Failed in Flowmon Collector configuration")
            else:
                api.Logger.info(
                    "Test for FlowMon with {} sessions {} collectors".format(
                        tc.iterators.num_flowmon_sessions,
                        tc.iterators.num_exports))
                utils.DumpFlowmonSessions()
                ret = InjectTestTrafficAndValidateCapture(
                    tc, tc.iterators.num_flowmon_sessions,
                    tc.iterators.num_exports)
                result = ret['res']
                ret_count = ret['count']
                count = count + ret_count
                if result != api.types.status.SUCCESS:
                    api.Logger.error("Failed in Traffic validation")
                    utils.DumpFlowmonSessions()

        #remove all but one flowmon session and check the collectors are not deleted
        if (result == api.types.status.SUCCESS):
            for iteration in range(1, tc.iterators.num_flowmon_sessions):
                obj = tc.test_iterator_data[iteration]['del_obj']
                agent_api.DeleteConfigObjects(obj)
                agent_api.RemoveConfigObjects(obj)
                tc.test_iterator_data[iteration] = {}

            api.Logger.info(
                "Test for FlowMon with {} sessions {} collectors".format(
                    1, tc.iterators.num_exports))
            utils.DumpFlowmonSessions()
            ret = InjectTestTrafficAndValidateCapture(tc, 1,
                                                      tc.iterators.num_exports)
            result = ret['res']
            ret_count = ret['count']
            count = count + ret_count
            if result != api.types.status.SUCCESS:
                api.Logger.error("Failed in Traffic validation")
                utils.DumpFlowmonSessions()

        for iteration in range(tc.iterators.num_flowmon_sessions):
            if tc.test_iterator_data[iteration]:
                obj = tc.test_iterator_data[iteration]['del_obj']
                agent_api.DeleteConfigObjects(obj)
                agent_api.RemoveConfigObjects(obj)
                tc.test_iterator_data[iteration] = {}

        agent_api.RemoveConfigObjects(flowmon_spec_objects)

        api.Logger.info(
            "policy_json = {}, count = {}, total_count = {}".format(
                policy_json, ret_count, count))
        if result != api.types.status.SUCCESS:
            api.Logger.info(
                "policy_json = {}, Encountered FAILURE, stopping".format(
                    policy_json))
            break

    tc.SetTestCount(count)
    return result
Exemplo n.º 15
0
def delete_ep_info(tc, wl, node):
    ep_filter = "meta.name=" + wl.workload_name + ";"
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert (len(objects) == 1)
    object = objects[0]
    agent_api.DeleteConfigObjects([object], [node], ignore_error=True)
Exemplo n.º 16
0
def Trigger(tc):
    #if tc.skip: return api.types.status.SUCCESS

    policies = utils.GetTargetJsons('flowmon', tc.iterators.proto)
    result = api.types.status.SUCCESS

    count = 0
    ret_count = 0
    export_cfg = []
    collector_wl = []
    for policy_json in policies:
        export_cfg.clear()
        collector_wl.clear()
        #pdb.set_trace()
        verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)
        api.Logger.info("Using policy_json = {}".format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            return api.types.status.FAILURE
        ret = agent_api.PushConfigObjects(newObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Unable to push flowmon objects")
            return api.types.status.FAILURE

        # Get collector to find the workload
        for obj in newObjects:
            for obj_export_cfg in obj.spec.exports:
                export_cfg.append(obj_export_cfg)
                api.Logger.info("export-dest: {} proto: {} port: {}".format(
                    obj_export_cfg.destination,
                    obj_export_cfg.proto_port.protocol,
                    obj_export_cfg.proto_port.port))

        for coll_dst in export_cfg:
            for wl in tc.workloads:
                if (wl.ip_address == coll_dst.destination) or (
                        coll_dst.destination
                        in wl_sec_ip_info[wl.workload_name]):
                    collector_wl.append(wl)

        api.Logger.info("collect_dest len: {} ".format(len(export_cfg)))
        api.Logger.info("collect_wl len: {} ".format(len(collector_wl)))
        collector_info = utils.GetFlowmonCollectorsInfo(
            collector_wl, export_cfg)
        utils.DumpFlowmonSessions()
        ret = utils.RunAll(tc, verif_json, 'flowmon', collector_info,
                           is_wl_type_bm)
        result = ret['res']
        ret_count = ret['count']
        count = count + ret_count
        if result != api.types.status.SUCCESS:
            api.Logger.error("Failed in Traffic validation")
            utils.DumpFlowmonSessions()

        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.info(
            "policy_json = {}, count = {}, total_count = {}".format(
                policy_json, ret_count, count))
        if result != api.types.status.SUCCESS:
            api.Logger.info(
                "policy_json = {}, Encountered FAILURE, stopping".format(
                    policy_json))
            break
    tc.SetTestCount(count)
    export_cfg.clear()
    collector_wl.clear()
    return result
Exemplo n.º 17
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    protoDir1 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', 'lif')
    api.Logger.info("Template Config files location: ", protoDir1)
    protoDir2 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir2)
    protoDir3 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir3)

    result = api.types.status.SUCCESS

    count = 0
    MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto)
    LifPolicies = utils.GetTargetJsons('mirror', 'lif')
    flowmon_policy_idx = 0
    ret_count = 0
    for mirror_json in MirrorPolicies:
        #
        # Get template-Mirror Config
        #
        newMirrorObjects = agent_api.AddOneConfig(mirror_json)
        if len(newMirrorObjects) == 0:
            api.Logger.error("Adding new Mirror objects to store failed")
            tc.error = True
            return api.types.status.FAILURE
        agent_api.RemoveConfigObjects(newMirrorObjects)

        #
        # Ignore Multi-collector template config's, since Expanded-Telemetry
        # testbundle dynamically creates such config's
        #
        if len(newMirrorObjects[0].spec.collectors) > 1:
            continue

        idx = 0
        for flowmon_json in FlowMonPolicies:
            if idx < flowmon_policy_idx:
                idx += 1
                continue

            #
            # Get template-FlowMon Config
            #
            newFlowMonObjects = agent_api.AddOneConfig(flowmon_json)
            if len(newFlowMonObjects) == 0:
                api.Logger.error("Adding new FlowMon objects to store failed")
                tc.error = True
                return api.types.status.FAILURE
            agent_api.RemoveConfigObjects(newFlowMonObjects)

            #
            # Ignore Multi-collector template config's, since Expanded-Telemetry
            # testbundle dynamically creates such config's
            #
            if len(newFlowMonObjects[0].spec.exports) > 1:
                flowmon_policy_idx += 1
                idx += 1
                continue

            #
            # Modify template-Mirror / template-FlowMon Config to make sure
            # that Naples-node # act as either source or destination
            #
            # Set up Collector in the remote node
            #
            eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects)
            eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects)

            ret_count = 0
            for i in range(0, len(tc.mirror_verif)):
                #
                # If Execution-Optimization is enabled, no need to run the
                # test for the same protocol more than once
                #
                if i > 0 and tc.mirror_verif[i]['protocol'] ==\
                             tc.mirror_verif[i-1]['protocol']:
                    continue

                #
                # Flow-ERSPAN for TCP-traffic is not tested (yet) in
                # Classic-mode until applicable pkt-trigger tools are
                # identified
                #
                if tc.classic_mode == True and\
                   tc.mirror_verif[i]['protocol'] == 'tcp':
                    continue

                for policy_json in LifPolicies:
                    #
                    # Get template-Mirror Config
                    #
                    newObjects = agent_api.AddOneConfig(policy_json)
                    if len(newObjects) == 0:
                        api.Logger.error("Adding new objects to store failed")
                        tc.error = True
                        return api.types.status.FAILURE

                    #
                    # Modify template-Mirror Config to make sure that
                    # Naples-node act as either source or destination
                    #
                    # Set up Collector in the remote node
                    #
                    if newObjects[0].kind == 'InterfaceMirrorSession':
                        tc.lif_collector_objects = newObjects
                        agent_api.RemoveConfigObjects(tc.lif_collector_objects)
                    elif newObjects[0].kind == 'Interface':
                        tc.interface_objects = newObjects
                        agent_api.RemoveConfigObjects(tc.interface_objects)

                #
                # Push Collector Config to Naples
                #
                colObjects = tc.lif_collector_objects
                ret = eutils.generateLifCollectorConfig(tc, colObjects)
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to identify Collector Workload")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(colObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to push collector objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Push Mirror / FlowMon Config to Naples
                #
                ret = agent_api.PushConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to push mirror objects")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to push flowmon objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Update Interface objects
                #
                ifObjects = tc.interface_objects
                ret = eutils.generateLifInterfaceConfig(
                    tc, ifObjects, tc.lif_collector_objects)
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error(
                        "Unable to identify Uplink/LIF Interfaces")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.UpdateConfigObjects(ifObjects,
                                                    [tc.naples.node_name],
                                                    [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to update interface objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Establish Forwarding set up between Naples-peer and
                # Collectors
                #
                eutils.establishForwardingSetup(tc)

                #
                # Give a little time for flows clean-up to happen so that
                # stale IPFIX records don't show up
                #
                if tc.classic_mode == True:
                    time.sleep(1)

                if tc.collection == 'distinct':
                    req_tcpdump_flow_erspan = \
                    api.Trigger_CreateExecuteCommandsRequest(serial = True)
                    for c in range(0, len(tc.flow_collector)):
                        #
                        # Set up TCPDUMP's on the collector
                        #
                        idx = tc.flow_collector_idx[c]
                        if tc.flow_collector[c].IsNaples():
                            cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto\
                            gre and dst {} --immediate-mode -U\
                            -w flow-mirror-{}.pcap"\
                            .format(tc.flow_collector[c].interface,
                                    tc.collector_ip_address[idx], c)
                        else:
                            cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip\
                            proto gre and dst {} --immediate-mode -U\
                            -w flow-mirror-{}.pcap"\
                            .format(tc.flow_collector[c].interface,
                                    tc.collector_ip_address[idx], c)
                        eutils.add_command(req_tcpdump_flow_erspan,
                                           tc.flow_collector[c], cmd, True)

                    resp_tcpdump_flow_erspan = api.Trigger(\
                                               req_tcpdump_flow_erspan)
                    for cmd in resp_tcpdump_flow_erspan.commands:
                        api.PrintCommandResults(cmd)

                    #
                    # Classic mode requires a delay to make sure that TCPDUMP
                    # background process is fully up
                    #
                    if tc.classic_mode == True:
                        time.sleep(1)

                req_tcpdump_lif_erspan = \
                api.Trigger_CreateExecuteCommandsRequest(serial = True)
                req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\
                                      serial = True)
                for c in range(0, len(tc.lif_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.lif_collector_idx[c]
                    if tc.lif_collector[c].IsNaples():
                        cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre\
                              and dst {} --immediate-mode -U\
                              -w lif-mirror-{}.pcap"\
                              .format(tc.lif_collector[c].interface,
                                      tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto\
                              gre and dst {} --immediate-mode -U\
                              -w lif-mirror-{}.pcap"\
                              .format(tc.lif_collector[c].interface,
                                      tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_lif_erspan,
                                       tc.lif_collector[c], cmd, True)

                for c in range(0, len(tc.flowmon_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.flowmon_collector_idx[c]
                    if tc.flowmon_collector[c].IsNaples():
                        cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\
                               dst port {} and dst {} --immediate-mode\
                               -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp\
                               and dst port {} and dst {}\
                               --immediate-mode -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_flowmon,
                                       tc.flowmon_collector[c], cmd, True)

                resp_tcpdump_lif_erspan = api.Trigger(\
                                          req_tcpdump_lif_erspan)
                for cmd in resp_tcpdump_lif_erspan.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon)
                for cmd in resp_tcpdump_flowmon.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                #
                # Trigger packets for ERSPAN / FLOWMON to take effect
                #
                tc.protocol = tc.mirror_verif[i]['protocol']
                tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port'])

                protocol = tc.protocol
                tc.protocol = 'all'
                if api.GetNodeOs(tc.naples.node_name) == 'linux':
                    eutils.triggerTrafficInClassicModeLinux(tc)
                else:
                    eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)
                tc.protocol = protocol

                #
                # Dump sessions/flows/P4-tables for debug purposes
                #
                eutils.showSessionAndP4TablesForDebug(tc, tc.lif_collector,
                                                      tc.lif_collector_idx)

                #
                # Terminate TCPDUMP background process
                #
                term_resp_tcpdump_lif_erspan = \
                api.Trigger_TerminateAllCommands(resp_tcpdump_lif_erspan)
                tc.resp_tcpdump_lif_erspan = \
                api.Trigger_AggregateCommandsResponse(\
                resp_tcpdump_lif_erspan, term_resp_tcpdump_lif_erspan)

                if tc.collection == 'distinct':
                    term_resp_tcpdump_flow_erspan = \
                    api.Trigger_TerminateAllCommands(resp_tcpdump_flow_erspan)
                    tc.resp_tcpdump_flow_erspan = \
                    api.Trigger_AggregateCommandsResponse(\
                    resp_tcpdump_flow_erspan, term_resp_tcpdump_flow_erspan)

                term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\
                                            resp_tcpdump_flowmon)
                tc.resp_tcpdump_flowmon = \
                api.Trigger_AggregateCommandsResponse(resp_tcpdump_flowmon,
                                                      term_resp_tcpdump_flowmon)

                # Delete the objects
                eutils.deGenerateLifInterfaceConfig(tc, tc.interface_objects,
                                                    tc.lif_collector_objects)
                agent_api.UpdateConfigObjects(tc.interface_objects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newMirrorObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newFlowMonObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])

                #
                # Make sure that Mirror-config has been removed
                #
                tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

                #
                # Validate ERSPAN packets reception
                #
                tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\
                                               (tc.icmp_count << 1)

                if tc.iterators.direction != 'both':
                    tc.tcp_erspan_pkts_expected >>= 1
                    tc.udp_erspan_pkts_expected >>= 1
                    tc.icmp_erspan_pkts_expected >>= 1

                if tc.dupcheck == 'disable':
                    tc.tcp_erspan_pkts_expected = \
                    (tc.tcp_erspan_pkts_expected+1) << 1
                    tc.udp_erspan_pkts_expected <<= 1
                    tc.icmp_erspan_pkts_expected <<= 1

                #
                # Adjust Expected-pkt-counts taking into account Flow-ERSPAN
                # Config's
                #
                if tc.collection == 'unified':
                    if (tc.protocol == 'tcp' or tc.iterators.proto == 'mixed')\
                        and tc.iterators.direction != 'both':
                        tc.tcp_erspan_pkts_expected <<= 1
                    if (tc.protocol == 'udp' or tc.iterators.proto == 'mixed')\
                        and tc.iterators.direction != 'both':
                        tc.udp_erspan_pkts_expected <<= 1
                    if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed':
                        if tc.iterators.direction != 'both':
                            tc.icmp_erspan_pkts_expected <<= 1
                        #if tc.iterators.direction != 'egress':
                        #    tc.icmp_erspan_pkts_expected += 1

                protocol = tc.protocol
                tc.protocol = 'all'
                tc.feature = 'lif-erspan'
                tc.resp_tcpdump_erspan = tc.resp_tcpdump_lif_erspan
                res_1 = eutils.validateErspanPackets(tc, tc.lif_collector,
                                                     tc.lif_collector_idx)

                if tc.collection == 'distinct':
                    tc.tcp_erspan_pkts_expected = 0
                    tc.udp_erspan_pkts_expected = 0
                    tc.icmp_erspan_pkts_expected = 0
                    tc.protocol = protocol
                    if tc.protocol == 'tcp' or tc.iterators.proto == 'mixed':
                        tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                    if tc.protocol == 'udp' or tc.iterators.proto == 'mixed':
                        tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                    if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed':
                        tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\
                                                       (tc.icmp_count << 1)

                    tc.protocol = 'all'
                    tc.feature = 'flow-erspan'
                    tc.resp_tcpdump_erspan = tc.resp_tcpdump_flow_erspan
                    res_f = eutils.validateErspanPackets(
                        tc, tc.flow_collector, tc.flow_collector_idx)
                    if res_f == api.types.status.FAILURE:
                        result = api.types.status.FAILURE

                #
                # Validate IPFIX packets reception
                #
                tc.feature = 'flowmon'
                res_2 = eutils.validateIpFixPackets(tc)
                tc.protocol = protocol

                #
                # Validate Config-cleanup
                #
                res_3 = eutils.validateConfigCleanup(tc)

                if res_1 == api.types.status.FAILURE or\
                   res_2 == api.types.status.FAILURE or\
                   res_3 == api.types.status.FAILURE:
                    result = api.types.status.FAILURE

                if result == api.types.status.FAILURE:
                    break

                ret_count += 1

            flowmon_policy_idx += 1
            break

        if result == api.types.status.FAILURE:
            break

        count += ret_count

    tc.SetTestCount(count)
    return result
Exemplo n.º 18
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    tc.resp_tcpdump_erspan = None
    tc.resp_cleanup = None

    protoDir = api.GetTopologyDirectory() +\
               "/gen/telemetry/{}/{}".format('mirror', 'endpoint')
    api.Logger.info("Template Config files location: ", protoDir)

    policies = utils.GetTargetJsons('mirror', 'endpoint')
    for policy_json in policies:
        #
        # Get template-Mirror Config
        #
        api.Logger.info("Adding one config object for {}", format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            tc.error = True
            return api.types.status.FAILURE

        #
        # Modify template-Mirror Config to make sure that Naples-node
        # act as either source or destination
        #
        # Set up Collector in the remote node
        #
        if newObjects[0].kind == 'InterfaceMirrorSession':
            tc.ep_collector_objects = newObjects
            agent_api.RemoveConfigObjects(tc.ep_collector_objects)
        elif newObjects[0].kind == 'Endpoint':
            tc.endpoint_objects = newObjects
            agent_api.RemoveConfigObjects(tc.endpoint_objects)
            updateEndpointObjectTempl(tc, tc.endpoint_objects,
                                      tc.store_endpoint_objects[0])

    for i in range(0, len(policies)):
        #
        # Push Collector object
        #
        if i == 0:
            colObjects = tc.ep_collector_objects
            if tc.iterators.session == 'single':
                ret = eutils.generateEpCollectorConfig(tc, colObjects)
            else:
                ret = eutils.generateEpCollectorConfigForMultiMirrorSession(
                    tc, colObjects)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to identify Collector Workload")
                tc.error = True
                return api.types.status.FAILURE

            api.Logger.info("Pushing collector objects")
            ret = agent_api.PushConfigObjects(colObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to push collector objects")
                tc.error = True
                return api.types.status.FAILURE
            continue

        api.Logger.info("Generating Endpoint objects")
        cfg_api.PrintConfigsObjects(colObjects)
        #
        # Update Endpoint objects
        #
        epObjects = tc.endpoint_objects
        ret = eutils.generateEndpointConfig(tc, epObjects, colObjects)
        if ret != api.types.status.SUCCESS:
            agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                          [tc.naples.node_name],
                                          [tc.naples_device_name])
            api.Logger.error("Unable to identify Endpoints")
            tc.error = True
            return api.types.status.FAILURE

        api.Logger.info("Pushing Endpoint objects")
        ret = agent_api.UpdateConfigObjects(epObjects, [tc.naples.node_name],
                                            [tc.naples_device_name])
        if ret != api.types.status.SUCCESS:
            agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                          [tc.naples.node_name],
                                          [tc.naples_device_name])
            api.Logger.error("Unable to update endpoint objects")
            tc.error = True
            return api.types.status.FAILURE

        #
        # Establish Forwarding set up between Naples-peer and Collectors
        #
        eutils.establishForwardingSetup(tc)

        req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\
                             serial = True)
        for c in range(0, len(tc.ep_collector)):
            #
            # Set up TCPDUMP's on the collector
            #
            idx = tc.ep_collector_idx[c]
            if tc.ep_collector[c].IsNaples():
                ### TODO - run & revisit for windows case and fix any issues.
                if api.GetNodeOs(tc.naples.node_name) == "windows":
                    intfGuid = ionic_utils.winIntfGuid(
                        tc.ep_collector[c].node_name,
                        tc.ep_collector[c].interface)
                    intfVal = str(
                        ionic_utils.winTcpDumpIdx(tc.ep_collector[c].node_name,
                                                  intfGuid))
                    cmd = "sudo /mnt/c/Windows/System32/tcpdump.exe -c 1000 -XX -vv -i {} ip proto 47 and dst {} -U -w ep-mirror-{}.pcap"\
                          .format(intfVal, tc.collector_ip_address[idx], c)
                else:
                    intfVal = tc.ep_collector[c].interface
                    cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre and dst {}\
                           --immediate-mode -U -w ep-mirror-{}.pcap"\
                          .format(intfVal,
                              tc.collector_ip_address[idx], c)
            else:
                cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto gre\
                       and dst {} --immediate-mode -U -w ep-mirror-{}.pcap"\
                      .format(tc.ep_collector[c].interface,
                              tc.collector_ip_address[idx], c)
            eutils.add_command(req_tcpdump_erspan, tc.ep_collector[c], cmd,
                               True)

        resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan)
        for cmd in resp_tcpdump_erspan.commands:
            api.PrintCommandResults(cmd)

        #
        # Classic mode requires a delay to make sure that TCPDUMP background
        # process is fully up
        #
        if tc.classic_mode == True:
            time.sleep(2)

        #
        # Trigger packets for ERSPAN to take effect
        #
        tc.dest_port = '120'
        if api.GetNodeOs(tc.naples.node_name) == 'linux' or api.GetNodeOs(
                tc.naples.node_name) == 'windows':
            eutils.triggerTrafficInClassicModeLinux(tc)
        else:
            eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)

        #
        # Dump sessions/flows/P4-tables for debug purposes
        #
        eutils.showSessionAndP4TablesForDebug(tc, tc.ep_collector,
                                              tc.ep_collector_idx)

        #
        # Terminate TCPDUMP background process
        #
        term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\
                                   resp_tcpdump_erspan)
        tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\
                              resp_tcpdump_erspan, term_resp_tcpdump_erspan)
        if api.GetNodeOs(tc.naples.node_name) == "windows":
            req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
            cmd = api.WINDOWS_POWERSHELL_CMD + " Stop-Process -Name 'tcpdump' -Force"
            api.Trigger_AddCommand(req,
                                   tc.naples.node_name,
                                   tc.naples.workload_name,
                                   cmd,
                                   background=False)
            resp = api.Trigger(req)

        # Delete the objects
        eutils.deGenerateEndpointConfig(tc, tc.endpoint_objects,
                                        tc.ep_collector_objects)
        agent_api.UpdateConfigObjects(tc.endpoint_objects,
                                      [tc.naples.node_name],
                                      [tc.naples_device_name])

        agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                      [tc.naples.node_name],
                                      [tc.naples_device_name])

        #
        # Make sure that Mirror-config has been removed
        #
        tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

    return api.types.status.SUCCESS
Exemplo n.º 19
0
def Trigger(tc):

    #Query will get the reference of objects on store
    store_profile_objects = netagent_cfg_api.QueryConfigs(kind='SecurityProfile')
    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects")
        return api.types.status.FAILURE

    if len(get_config_objects) != len(store_profile_objects):
        api.Logger.error("Config mismatch, Get Objects : %d, Config store Objects : %d"
        % (len(get_config_objects), len(store_profile_objects)))
        return api.types.status.FAILURE

    #Now do an update of the objects
    for object in store_profile_objects:
        object.spec.timeouts.tcp_connection_setup = "1200s"
        object.spec.timeouts.tcp_half_close = "1400s"

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(new_get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects after update")
        return api.types.status.FAILURE

    #Check whether value has changed
    for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects):
        if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup == store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup ==  get_object.spec.timeouts.tcp_connection_setup:
            api.Logger.error("Update failed")
            return api.types.status.FAILURE


    #Now do  restore value to old one
    for object,old_object in zip(store_profile_objects, get_config_objects):
        object.spec.timeouts.tcp_connection_setup = old_object.spec.timeouts.tcp_connection_setup
        object.spec.timeouts.tcp_half_close = old_object.spec.timeouts.tcp_half_close


    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(new_get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects after update")
        return api.types.status.FAILURE

    for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects):
        if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup !=  get_object.spec.timeouts.tcp_connection_setup:
            api.Logger.error("Second Update failed")
            return api.types.status.FAILURE

    #Now lets do a delete

    netagent_cfg_api.DeleteConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) != 0:
        api.Logger.error("Delete of objects failed")
        return api.types.status.FAILURE

    netagent_cfg_api.PushConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects")
        return api.types.status.FAILURE


    newObjects = netagent_cfg_api.AddOneConfig(api.GetTopologyDirectory() + "/test_cfg/test_security_profile.json")
    if len(newObjects) == 0:
        api.Logger.error("Adding new objects to store failed")
        return api.types.status.FAILURE

    nodes = api.GetNaplesHostnames()
    push_nodes = [nodes[0]]
    ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names = push_nodes)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to fetch security profile objects to node %s" % nodes[0])
        return api.types.status.FAILURE

    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    #Delete the objects that is pushed
    netagent_cfg_api.DeleteConfigObjects(get_config_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes)
    if len(get_config_objects) != 0:
        api.Logger.error("Delete of new objects failed")
        return api.types.status.FAILURE

    #Remoe competely those objects from the store too.
    ret = netagent_cfg_api.RemoveConfigObjects(newObjects)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Remove of new objects failed")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemplo n.º 20
0
def Trigger(tc):
    #if tc.skip: return api.types.status.SUCCESS

    policies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    result = api.types.status.SUCCESS
    
    count = 0
    ret_count = 0
    collector_dest = []
    collector_wl = []
    collector_type = []
    for policy_json in policies:
        collector_dest.clear()
        collector_wl.clear()
        collector_type.clear()

        verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)
        api.Logger.info("Using policy_json = {}".format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len (newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            return api.types.status.FAILURE
        ret = agent_api.PushConfigObjects(newObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Unable to push mirror objects")
            return api.types.status.FAILURE
        utils.DumpMirrorSessions()

        # Get collector to find the workload
        for obj in newObjects:
            for obj_collector in obj.spec.collectors:
                coll_dst = obj_collector.export_config.destination
                coll_type = obj_collector.type
                collector_dest.append(coll_dst)
                collector_type.append(coll_type)
                api.Logger.info(f"export-dest: {coll_dst}, erspan-type: {coll_type}")

        for coll_dst in collector_dest:
            for wl in tc.workloads:
                if (wl.ip_address == coll_dst) or (coll_dst in wl_sec_ip_info[wl.workload_name]):
                    collector_wl.append(wl)

        api.Logger.info("collect_dest len: {} collect_wl len: {}".format(len(collector_dest), len(collector_wl)))
        collector_info = utils.GetMirrorCollectorsInfo(collector_wl, collector_dest, collector_type)
        ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm)
        result = ret['res']
        ret_count = ret['count']
        count = count + ret_count

        if result != api.types.status.SUCCESS:
            api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json))
            # Delete the objects
            agent_api.DeleteConfigObjects(newObjects)
            agent_api.RemoveConfigObjects(newObjects)
            break

        # Update collector
        newObjects = agent_api.QueryConfigs(kind='MirrorSession')
        # mirror config update to local collector is applicable only for ESX topology
        if is_wl_type_bm is False:
            for obj in newObjects:
                if obj.spec.collectors[0].type == utils.ERSPAN_TYPE_2:
                    obj.spec.collectors[0].type = utils.ERSPAN_TYPE_3
                    collector_info[0]['type'] = utils.ERSPAN_TYPE_3
                else:
                    obj.spec.collectors[0].type = utils.ERSPAN_TYPE_2
                    collector_info[0]['type'] = utils.ERSPAN_TYPE_2
                break

            # Now push the update as we modified
            agent_api.UpdateConfigObjects(newObjects)
            utils.DumpMirrorSessions()

            # Rerun the tests
            ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm)
            result = ret['res']
            ret_count = ret['count']
            count = count + ret_count

        # Delete the objects
        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.info("policy_json = {}, count = {}, total_count = {}".format(policy_json, ret_count, count))
        if result != api.types.status.SUCCESS:
            api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json))
            break
    tc.SetTestCount(count)
    collector_dest.clear()
    collector_wl.clear()
    return result
Exemplo n.º 21
0
def Trigger(tc):
    policies = utils.GetTargetJsons(tc.iterators.proto)
    sg_json_obj = None
    # Generate the random seed
    seed = random.randrange(sys.maxsize)
    api.Logger.info("Seed val: %s"%seed)

    try:
        for policy_json in policies:
            newObjects = agent_api.AddOneConfig(policy_json)
            api.Logger.info("Created new object for %s"%policy_json)
            tc.ret = agent_api.PushConfigObjects(newObjects)
            rule_db_map = utils.SetupLocalRuleDbPerNaple(policy_json)
            if tc.ret != api.types.status.SUCCESS:
                return api.types.status.FAILURE

            scale = 0
            while scale < tc.scale :
                for w1, dst_workload_list in tc.workload_dict.items():
                    for w2 in dst_workload_list:
                        seed += 1
                        if scale >= tc.scale :
                            break

                        # If src and dst workload are behind same Naples,
                        # then only one db sees the packet.
                        if w1.node_name == w2.node_name:
                            w1_db = rule_db_map.get(w1.node_name, None)
                            w2_db = None
                        else:
                            w1_db = rule_db_map.get(w1.node_name, None)
                            w2_db = rule_db_map.get(w2.node_name, None)

                        api.Logger.info("(%s/%s) Running between w1: %s(%s) and w2: %s(%s)"%
                                        (scale+1, tc.scale, w1.ip_address, w1.workload_name,
                                         w2.ip_address,
                                         w2.workload_name))
                        tc.ret = utils.RunAll(1, w1, w2, w1_db, w2_db, FilterAndAlter, seed=seed)
                        if tc.ret != api.types.status.SUCCESS:
                            agent_api.DeleteConfigObjects(newObjects)
                            agent_api.RemoveConfigObjects(newObjects)
                            return tc.ret

                        scale += 1
                utils.clearNaplesSessions()

            for node,db in rule_db_map.items():
                result = utils.compareStats(db, node, tc.iterators.proto)
                api.Logger.info("Comparison of rule stats for Node %s - %s"%
                                (node, "SUCCESS" \
                                 if result == api.types.status.SUCCESS \
                                 else "FAIL"))
                if result != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(newObjects)
                    agent_api.RemoveConfigObjects(newObjects)
                    tc.ret = result
                    return tc.ret

            if agent_api.DeleteConfigObjects(newObjects):
                api.Logger.error("Failed to delete config object for %s"%policy_json)

            if agent_api.RemoveConfigObjects(newObjects):
                api.Logger.error("Failed to remove config object for %s"%policy_json)

    except Exception as e:
        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.error("%s"%e)
        Teardown(tc)
        return api.types.status.FAILURE

    return api.types.status.SUCCESS