Beispiel #1
0
def __update_endpoint_info(tc):
    for dest_host, workloads in tc.vmotion_cntxt.MoveRequest.items():
        api.Logger.debug(
            "Creating endpoint info at %s for workloads being moved" %
            dest_host)
        if not api.IsNaplesNode(dest_host):
            continue
        for wl in workloads:
            api.Logger.debug("Updating ep-info for %s" % wl.workload_name)
            ep_filter = "meta.name=" + wl.workload_name + ";"
            objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
            assert (len(objects) == 1)
            obj = copy.deepcopy(objects[0])
            # update to indicate completion of vmotion
            obj.spec.migration = "DONE"
            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            resp = agent_api.UpdateConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error(
                    "Update migr status done failed for %s for %s" %
                    (wl.workload_name, dest_host))

            # update to keep new node happy, only in iota
            obj.spec.migration = None
            obj.spec.node_uuid = tc.vmotion_cntxt.UUIDMap[dest_host]
            resp = agent_api.UpdateConfigObjects([obj], [dest_host],
                                                 ignore_error=True)
            if resp != api.types.status.SUCCESS:
                api.Logger.error(
                    "Update migr state to None failed for %s for %s" %
                    (wl.workload_name, dest_host))
    api.Logger.debug("Completed endpoint update at NewHome")
    return
Beispiel #2
0
def update_ep_migr_status(tc, wl, node, migr_state):
    ep_filter = "meta.name=" + wl.workload_name + ";"
    objects = agent_api.QueryConfigs("Endpoint", filter=ep_filter)
    assert(len(objects) == 1)
    # update to indicate completion of vmotion
    object                          = copy.deepcopy(objects[0])
    object.spec.migration           = migr_state 
    object.spec.node_uuid           = tc.uuidMap[node]
    agent_api.UpdateConfigObjects([object], [node], ignore_error=True)
    # update to keep new node happy, only in iota 
    object.spec.migration           = None
    object.spec.node_uuid           = tc.uuidMap[node]
    agent_api.UpdateConfigObjects([object], [node], ignore_error=True)
Beispiel #3
0
def SetSessionLimit(session, limit):
    #Query will get the reference of objects on store
    store_profile_objects = netagent_cfg_api.QueryConfigs(
        kind='SecurityProfile')

    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    for object in store_profile_objects:
        if (session == 'tcp'):
            object.spec.rate_limits.tcp_half_open_session_limit = limit
        elif (session == 'udp'):
            object.spec.rate_limits.udp_active_session_limit = limit
        elif (session == 'icmp'):
            object.spec.rate_limits.icmp_active_session_limit = limit
        elif (session == 'other'):
            object.spec.rate_limits.other_session_limit = limit
        elif (session == 'all'):
            object.spec.rate_limits.tcp_half_open_session_limit = limit
            object.spec.rate_limits.udp_active_session_limit = limit
            object.spec.rate_limits.icmp_active_session_limit = limit
            object.spec.rate_limits.other_session_limit = limit
        else:
            api.Logger.error("unsupported security profile session type %s" %
                             session)
            return api.types.status.FAILURE

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)

    time.sleep(5)

    return api.types.status.SUCCESS
Beispiel #4
0
def update_sgpolicy(app_name, allowDefault=False):
    #Query will get the reference of objects on store
    store_policy_objects = netagent_cfg_api.QueryConfigs(
        kind='NetworkSecurityPolicy')
    if len(store_policy_objects) == 0:
        api.Logger.error("No SG Policy objects in store")
        return api.types.status.FAILURE

    for object in store_policy_objects:
        rules = len(object.spec.policy_rules)
        if (rules == 0):
            continue
        #We dont want to keep updating the same policy
        defaultRule = object.spec.policy_rules.pop()
        if app_name != None:
            if (hasattr(object.spec.policy_rules[rules-2], 'app_name') and \
                object.spec.policy_rules[rules-2].app_name == app_name):
                continue
            newRule = copy.deepcopy(object.spec.policy_rules[0])
            newRule.source.addresses = ['any']
            newRule.destination.addresses = ['any']
            newRule.app_name = app_name
            newRule.destination.app_configs = None
            newRule.destination.proto_ports = None
            newRule.action = 'PERMIT'
            object.spec.policy_rules.append(newRule)

        if allowDefault == False:
            defaultRule.action = 'DENY'
        else:
            defaultRule.action = 'PERMIT'
        object.spec.policy_rules.append(defaultRule)

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_policy_objects)
Beispiel #5
0
def update_timeout(timeout, val):
    #Query will get the reference of objects on store
    store_profile_objects = netagent_cfg_api.QueryConfigs(
        kind='SecurityProfile')
    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    for object in store_profile_objects:
        if (timeout == 'tcp-timeout'):
            object.spec.timeouts.tcp = val
        if (timeout == 'udp-timeout'):
            object.spec.timeouts.udp = val
        if (timeout == 'icmp-timeout'):
            object.spec.timeouts.icmp = val
        if (timeout == 'tcp-half-close'):
            object.spec.timeouts.tcp_half_close = val
        if (timeout == 'tcp-close'):
            object.spec.timeouts.tcp_close = val
        if (timeout == 'tcp-connection-setup'):
            object.spec.timeouts.tcp_connection_setup = val
        if (timeout == 'tcp-drop'):
            object.spec.timeouts.tcp_drop = val
        if (timeout == 'udp-drop'):
            object.spec.timeouts.udp_drop = val
        if (timeout == 'icmp-drop'):
            object.spec.timeouts.icmp_drop = val

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)

    return api.types.status.SUCCESS
Beispiel #6
0
def __modify_security_profile(tc):
    sp_objects = netagent_api.QueryConfigs(kind='SecurityProfile')
    tc.cloned_sp_objects = netagent_api.CloneConfigObjects(sp_objects)
    for obj in sp_objects:
        obj.spec.timeouts.tcp = "1s"
        obj.spec.timeouts.udp = "1s"
        obj.spec.timeouts.tcp_half_close = "1s"
        obj.spec.timeouts.tcp_close = "1s"
        obj.spec.timeouts.tcp_connection_setup = "10s"
    return netagent_api.UpdateConfigObjects(sp_objects)
Beispiel #7
0
def updateFlowmonCollectors(tc, num_exports):
    iteration = 0
    result = api.types.status.SUCCESS

    coll_dst_port_list = {}
    for idx in range(num_exports_at_create, num_exports):
        coll_dst_port_list[idx] = random.randint(100, 10000)

    for iteration in range(tc.iterators.num_flowmon_sessions):
        flowmon_objects = tc.test_iterator_data[iteration]['del_obj']
        local_wl = tc.test_iterator_data[iteration]['local_wl']
        peer_wl = tc.test_iterator_data[iteration]['peer_wl']
        coll_wl_list = tc.test_iterator_data[iteration]['coll_wl_list']
        coll_ip_list = tc.test_iterator_data[iteration]['coll_ip_list']
        export_cfg_list = tc.test_iterator_data[iteration]['export_cfg_list']

        utils.generateFlowmonCollectorConfig(flowmon_objects, num_exports)

        if num_exports > len(coll_ip_list):
            api.Logger.info(
                "Collector IP list {} is smaller than requested collectors {} "
                "for this test! bailing out!!!".format(len(coll_ip_list),
                                                       num_exports))
            result = api.types.status.FAILURE
            break

        for obj in flowmon_objects:
            old_export_count = num_exports_at_create
            for c in range(old_export_count, num_exports):
                obj.spec.exports[c].destination = "{}".format(coll_ip_list[c])
                obj.spec.exports[c].proto_port.port = "{}".format(
                    coll_dst_port_list[c])
                export_cfg_list.append(obj.spec.exports[c])

                #api.Logger.info("updating export idx: {} to dst: {} port: {} from collector_WL: {}".format(c,
                #        obj.spec.exports[c].destination, obj.spec.exports[c].proto_port.port, coll_wl_list[c]))

        if num_exports > len(export_cfg_list):
            api.Logger.info(
                "Export cfg list {} is smaller than requested exports {} "
                "for this test! bailing out!!!".format(len(export_cfg_list),
                                                       num_exports))
            result = api.types.status.FAILURE
            break

        result = agent_api.UpdateConfigObjects(flowmon_objects,
                                               [local_wl.node_name])
        #agent_api.PrintConfigObjects(flowmon_objects)

        if result != api.types.status.SUCCESS:
            api.Logger.error("Unable to push updates to flowmon objects")
            result = api.types.status.FAILURE
            break
    return (result)
Beispiel #8
0
def AddRemoveCollectorsOnInterface(tc, coObjs):
    node_name = random.choice(api.GetNaplesHostnames())
    api.Logger.info("Pushing lif telemetry on %s" % node_name)
    for obj in tc.node_intf_obj_map[node_name]:
        del obj.spec.TxCollectors[:]
        del obj.spec.RxCollectors[:]
        for coObj in coObjs:
            obj.spec.RxCollectors.append(coObj.meta.name)
            obj.spec.TxCollectors.append(coObj.meta.name)
        ret = agent_api.UpdateConfigObjects(tc.node_intf_obj_map[node_name],
                                            [node_name])
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Failed to push the interface objects")
            return api.types.status.FAILURE

    for obj in tc.node_intf_obj_map[node_name]:
        del obj.spec.TxCollectors[:]
        del obj.spec.RxCollectors[:]
        ret = agent_api.UpdateConfigObjects(tc.node_intf_obj_map[node_name],
                                            [node_name])
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Failed to push the interface objects")
            return api.types.status.FAILURE
    return api.types.status.SUCCESS
Beispiel #9
0
def Trigger(tc):

    store_policy_objects = netagent_api.QueryConfigs(
        kind='NetworkSecurityPolicy')

    wait = getattr(tc.args, "wait", 30)

    time.sleep(int(wait))
    action = str(getattr(tc.args, "action"))
    __update_policy_actions(store_policy_objects, action)
    ret = netagent_api.UpdateConfigObjects(store_policy_objects)
    if ret != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #10
0
def update_field(field, val):
    #Query will get the reference of objects on store
    store_profile_objects = netagent_cfg_api.QueryConfigs(
        kind='SecurityProfile')
    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    for object in store_profile_objects:
        if (field == 'enable-connection-tracking'):
            object.spec.enable_connection_tracking = val

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)

    return api.types.status.SUCCESS
Beispiel #11
0
def Trigger(tc):
    result = api.types.status.SUCCESS
    mirrorPolicies = utils.GetTargetJsons('mirror', "scale")
    flowmonPolicies = utils.GetTargetJsons('flowmon', "scale")
    #colPolicies = utils.GetTargetJsons('mirror', "collector")
    iters = getattr(tc.args, "iters", 10)
    iters = 1
    mpObjs = fpObjs = []
    for mp_json, fp_json in zip(mirrorPolicies, flowmonPolicies):
        for i in range(iters):
            #
            # Push Mirror Session and Flow Export objects
            #
            mpObjs = agent_api.AddOneConfig(mp_json)
            fpObjs = agent_api.AddOneConfig(fp_json)
            ret = agent_api.PushConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to push the telemetry objects")
                return api.types.status.FAILURE

            #
            # Update Mirror Session and Flow Export objects
            #
            mpObjs = UpdateMirrorSessionObjects(mpObjs)
            fpObjs = UpdateFlowMonitorObjects(fpObjs)
            ret = agent_api.UpdateConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to update the telemetry objects")
                return api.types.status.FAILURE

            #
            # Delete Mirror Session and Flow Export objects
            #
            ret = agent_api.DeleteConfigObjects(fpObjs + mpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to delete the telemetry objects")
                return api.types.status.FAILURE
            ret = agent_api.RemoveConfigObjects(mpObjs + fpObjs)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Failed to remove the telemetry objects")
                return api.types.status.FAILURE

    return result
Beispiel #12
0
def update_sgpolicy(src, dst, proto, dport, action="DENY"):
    #Query will get the reference of objects on store
    store_policy_objects = netagent_cfg_api.QueryConfigs(
        kind='NetworkSecurityPolicy')
    if len(store_policy_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    for object in store_policy_objects:
        for rule in object.spec.policy_rules:
            if (rule.action == action
                    and rule.destination.proto_ports != None):
                for app_config in rule.destination.proto_ports:
                    if app_config.protocol == proto:
                        app_config.port = str(dport)

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_policy_objects)

    return api.types.status.SUCCESS
Beispiel #13
0
def update_app(app, timeout, field=None, val=None, isstring=False):
    #Query will get the reference of objects on store
    store_app_objects = netagent_cfg_api.QueryConfigs(kind='App')
    if len(store_app_objects) == 0:
        api.Logger.error("No App objects in store")
        return api.types.status.FAILURE

    for object in store_app_objects:
        if object.meta.name == app:
            object.spec.app_idle_timeout = timeout
            if field != None:
                obj = 'object.spec.alg' + '.' + app + '.' + field
                if isstring == True:
                    exec(obj + "=" + "\'%s\'" % (val))
                else:
                    exec(obj + "=" + val)

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_app_objects)

    return api.types.status.SUCCESS
Beispiel #14
0
def increase_timeout():
    #Query will get the reference of objects on store
    store_profile_objects = agent_api.QueryConfigs(kind='SecurityProfile')
    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    for object in store_profile_objects:
        object.spec.timeouts.session_idle = "360s"
        object.spec.timeouts.tcp = "360s"
        object.spec.timeouts.udp = "360s"
        object.spec.timeouts.icmp = "120s"
        object.spec.timeouts.tcp_half_close = "360s"
        object.spec.timeouts.tcp_close = "360s"
        object.spec.timeouts.tcp_connection_setup = "60s"
        object.spec.timeouts.tcp_drop = "360s"
        object.spec.timeouts.udp_drop = "60s"
        object.spec.timeouts.icmp_drop = "300s"

    #Now push the update as we modified.
    agent_api.UpdateConfigObjects(store_profile_objects)
    return api.types.status.SUCCESS
Beispiel #15
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    protoDir1 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', 'lif')
    api.Logger.info("Template Config files location: ", protoDir1)
    protoDir2 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir2)
    protoDir3 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir3)

    result = api.types.status.SUCCESS

    count = 0
    MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto)
    LifPolicies = utils.GetTargetJsons('mirror', 'lif')
    flowmon_policy_idx = 0
    ret_count = 0
    for mirror_json in MirrorPolicies:
        #
        # Get template-Mirror Config
        #
        newMirrorObjects = agent_api.AddOneConfig(mirror_json)
        if len(newMirrorObjects) == 0:
            api.Logger.error("Adding new Mirror objects to store failed")
            tc.error = True
            return api.types.status.FAILURE
        agent_api.RemoveConfigObjects(newMirrorObjects)

        #
        # Ignore Multi-collector template config's, since Expanded-Telemetry
        # testbundle dynamically creates such config's
        #
        if len(newMirrorObjects[0].spec.collectors) > 1:
            continue

        idx = 0
        for flowmon_json in FlowMonPolicies:
            if idx < flowmon_policy_idx:
                idx += 1
                continue

            #
            # Get template-FlowMon Config
            #
            newFlowMonObjects = agent_api.AddOneConfig(flowmon_json)
            if len(newFlowMonObjects) == 0:
                api.Logger.error("Adding new FlowMon objects to store failed")
                tc.error = True
                return api.types.status.FAILURE
            agent_api.RemoveConfigObjects(newFlowMonObjects)

            #
            # Ignore Multi-collector template config's, since Expanded-Telemetry
            # testbundle dynamically creates such config's
            #
            if len(newFlowMonObjects[0].spec.exports) > 1:
                flowmon_policy_idx += 1
                idx += 1
                continue

            #
            # Modify template-Mirror / template-FlowMon Config to make sure
            # that Naples-node # act as either source or destination
            #
            # Set up Collector in the remote node
            #
            eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects)
            eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects)

            ret_count = 0
            for i in range(0, len(tc.mirror_verif)):
                #
                # If Execution-Optimization is enabled, no need to run the
                # test for the same protocol more than once
                #
                if i > 0 and tc.mirror_verif[i]['protocol'] ==\
                             tc.mirror_verif[i-1]['protocol']:
                    continue

                #
                # Flow-ERSPAN for TCP-traffic is not tested (yet) in
                # Classic-mode until applicable pkt-trigger tools are
                # identified
                #
                if tc.classic_mode == True and\
                   tc.mirror_verif[i]['protocol'] == 'tcp':
                    continue

                for policy_json in LifPolicies:
                    #
                    # Get template-Mirror Config
                    #
                    newObjects = agent_api.AddOneConfig(policy_json)
                    if len(newObjects) == 0:
                        api.Logger.error("Adding new objects to store failed")
                        tc.error = True
                        return api.types.status.FAILURE

                    #
                    # Modify template-Mirror Config to make sure that
                    # Naples-node act as either source or destination
                    #
                    # Set up Collector in the remote node
                    #
                    if newObjects[0].kind == 'InterfaceMirrorSession':
                        tc.lif_collector_objects = newObjects
                        agent_api.RemoveConfigObjects(tc.lif_collector_objects)
                    elif newObjects[0].kind == 'Interface':
                        tc.interface_objects = newObjects
                        agent_api.RemoveConfigObjects(tc.interface_objects)

                #
                # Push Collector Config to Naples
                #
                colObjects = tc.lif_collector_objects
                ret = eutils.generateLifCollectorConfig(tc, colObjects)
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to identify Collector Workload")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(colObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to push collector objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Push Mirror / FlowMon Config to Naples
                #
                ret = agent_api.PushConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to push mirror objects")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to push flowmon objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Update Interface objects
                #
                ifObjects = tc.interface_objects
                ret = eutils.generateLifInterfaceConfig(
                    tc, ifObjects, tc.lif_collector_objects)
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error(
                        "Unable to identify Uplink/LIF Interfaces")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.UpdateConfigObjects(ifObjects,
                                                    [tc.naples.node_name],
                                                    [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newMirrorObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    agent_api.DeleteConfigObjects(newFlowMonObjects,
                                                  [tc.naples.node_name],
                                                  [tc.naples_device_name])
                    api.Logger.error("Unable to update interface objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Establish Forwarding set up between Naples-peer and
                # Collectors
                #
                eutils.establishForwardingSetup(tc)

                #
                # Give a little time for flows clean-up to happen so that
                # stale IPFIX records don't show up
                #
                if tc.classic_mode == True:
                    time.sleep(1)

                if tc.collection == 'distinct':
                    req_tcpdump_flow_erspan = \
                    api.Trigger_CreateExecuteCommandsRequest(serial = True)
                    for c in range(0, len(tc.flow_collector)):
                        #
                        # Set up TCPDUMP's on the collector
                        #
                        idx = tc.flow_collector_idx[c]
                        if tc.flow_collector[c].IsNaples():
                            cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto\
                            gre and dst {} --immediate-mode -U\
                            -w flow-mirror-{}.pcap"\
                            .format(tc.flow_collector[c].interface,
                                    tc.collector_ip_address[idx], c)
                        else:
                            cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip\
                            proto gre and dst {} --immediate-mode -U\
                            -w flow-mirror-{}.pcap"\
                            .format(tc.flow_collector[c].interface,
                                    tc.collector_ip_address[idx], c)
                        eutils.add_command(req_tcpdump_flow_erspan,
                                           tc.flow_collector[c], cmd, True)

                    resp_tcpdump_flow_erspan = api.Trigger(\
                                               req_tcpdump_flow_erspan)
                    for cmd in resp_tcpdump_flow_erspan.commands:
                        api.PrintCommandResults(cmd)

                    #
                    # Classic mode requires a delay to make sure that TCPDUMP
                    # background process is fully up
                    #
                    if tc.classic_mode == True:
                        time.sleep(1)

                req_tcpdump_lif_erspan = \
                api.Trigger_CreateExecuteCommandsRequest(serial = True)
                req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\
                                      serial = True)
                for c in range(0, len(tc.lif_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.lif_collector_idx[c]
                    if tc.lif_collector[c].IsNaples():
                        cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre\
                              and dst {} --immediate-mode -U\
                              -w lif-mirror-{}.pcap"\
                              .format(tc.lif_collector[c].interface,
                                      tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto\
                              gre and dst {} --immediate-mode -U\
                              -w lif-mirror-{}.pcap"\
                              .format(tc.lif_collector[c].interface,
                                      tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_lif_erspan,
                                       tc.lif_collector[c], cmd, True)

                for c in range(0, len(tc.flowmon_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.flowmon_collector_idx[c]
                    if tc.flowmon_collector[c].IsNaples():
                        cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\
                               dst port {} and dst {} --immediate-mode\
                               -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp\
                               and dst port {} and dst {}\
                               --immediate-mode -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_flowmon,
                                       tc.flowmon_collector[c], cmd, True)

                resp_tcpdump_lif_erspan = api.Trigger(\
                                          req_tcpdump_lif_erspan)
                for cmd in resp_tcpdump_lif_erspan.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon)
                for cmd in resp_tcpdump_flowmon.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                #
                # Trigger packets for ERSPAN / FLOWMON to take effect
                #
                tc.protocol = tc.mirror_verif[i]['protocol']
                tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port'])

                protocol = tc.protocol
                tc.protocol = 'all'
                if api.GetNodeOs(tc.naples.node_name) == 'linux':
                    eutils.triggerTrafficInClassicModeLinux(tc)
                else:
                    eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)
                tc.protocol = protocol

                #
                # Dump sessions/flows/P4-tables for debug purposes
                #
                eutils.showSessionAndP4TablesForDebug(tc, tc.lif_collector,
                                                      tc.lif_collector_idx)

                #
                # Terminate TCPDUMP background process
                #
                term_resp_tcpdump_lif_erspan = \
                api.Trigger_TerminateAllCommands(resp_tcpdump_lif_erspan)
                tc.resp_tcpdump_lif_erspan = \
                api.Trigger_AggregateCommandsResponse(\
                resp_tcpdump_lif_erspan, term_resp_tcpdump_lif_erspan)

                if tc.collection == 'distinct':
                    term_resp_tcpdump_flow_erspan = \
                    api.Trigger_TerminateAllCommands(resp_tcpdump_flow_erspan)
                    tc.resp_tcpdump_flow_erspan = \
                    api.Trigger_AggregateCommandsResponse(\
                    resp_tcpdump_flow_erspan, term_resp_tcpdump_flow_erspan)

                term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\
                                            resp_tcpdump_flowmon)
                tc.resp_tcpdump_flowmon = \
                api.Trigger_AggregateCommandsResponse(resp_tcpdump_flowmon,
                                                      term_resp_tcpdump_flowmon)

                # Delete the objects
                eutils.deGenerateLifInterfaceConfig(tc, tc.interface_objects,
                                                    tc.lif_collector_objects)
                agent_api.UpdateConfigObjects(tc.interface_objects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(tc.lif_collector_objects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newMirrorObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newFlowMonObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])

                #
                # Make sure that Mirror-config has been removed
                #
                tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

                #
                # Validate ERSPAN packets reception
                #
                tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\
                                               (tc.icmp_count << 1)

                if tc.iterators.direction != 'both':
                    tc.tcp_erspan_pkts_expected >>= 1
                    tc.udp_erspan_pkts_expected >>= 1
                    tc.icmp_erspan_pkts_expected >>= 1

                if tc.dupcheck == 'disable':
                    tc.tcp_erspan_pkts_expected = \
                    (tc.tcp_erspan_pkts_expected+1) << 1
                    tc.udp_erspan_pkts_expected <<= 1
                    tc.icmp_erspan_pkts_expected <<= 1

                #
                # Adjust Expected-pkt-counts taking into account Flow-ERSPAN
                # Config's
                #
                if tc.collection == 'unified':
                    if (tc.protocol == 'tcp' or tc.iterators.proto == 'mixed')\
                        and tc.iterators.direction != 'both':
                        tc.tcp_erspan_pkts_expected <<= 1
                    if (tc.protocol == 'udp' or tc.iterators.proto == 'mixed')\
                        and tc.iterators.direction != 'both':
                        tc.udp_erspan_pkts_expected <<= 1
                    if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed':
                        if tc.iterators.direction != 'both':
                            tc.icmp_erspan_pkts_expected <<= 1
                        #if tc.iterators.direction != 'egress':
                        #    tc.icmp_erspan_pkts_expected += 1

                protocol = tc.protocol
                tc.protocol = 'all'
                tc.feature = 'lif-erspan'
                tc.resp_tcpdump_erspan = tc.resp_tcpdump_lif_erspan
                res_1 = eutils.validateErspanPackets(tc, tc.lif_collector,
                                                     tc.lif_collector_idx)

                if tc.collection == 'distinct':
                    tc.tcp_erspan_pkts_expected = 0
                    tc.udp_erspan_pkts_expected = 0
                    tc.icmp_erspan_pkts_expected = 0
                    tc.protocol = protocol
                    if tc.protocol == 'tcp' or tc.iterators.proto == 'mixed':
                        tc.tcp_erspan_pkts_expected = \
                               NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                    if tc.protocol == 'udp' or tc.iterators.proto == 'mixed':
                        tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                    if tc.protocol == 'icmp' or tc.iterators.proto == 'mixed':
                        tc.icmp_erspan_pkts_expected = (tc.udp_count << 1)+\
                                                       (tc.icmp_count << 1)

                    tc.protocol = 'all'
                    tc.feature = 'flow-erspan'
                    tc.resp_tcpdump_erspan = tc.resp_tcpdump_flow_erspan
                    res_f = eutils.validateErspanPackets(
                        tc, tc.flow_collector, tc.flow_collector_idx)
                    if res_f == api.types.status.FAILURE:
                        result = api.types.status.FAILURE

                #
                # Validate IPFIX packets reception
                #
                tc.feature = 'flowmon'
                res_2 = eutils.validateIpFixPackets(tc)
                tc.protocol = protocol

                #
                # Validate Config-cleanup
                #
                res_3 = eutils.validateConfigCleanup(tc)

                if res_1 == api.types.status.FAILURE or\
                   res_2 == api.types.status.FAILURE or\
                   res_3 == api.types.status.FAILURE:
                    result = api.types.status.FAILURE

                if result == api.types.status.FAILURE:
                    break

                ret_count += 1

            flowmon_policy_idx += 1
            break

        if result == api.types.status.FAILURE:
            break

        count += ret_count

    tc.SetTestCount(count)
    return result
Beispiel #16
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    tc.resp_tcpdump_erspan = None
    tc.resp_cleanup = None

    protoDir = api.GetTopologyDirectory() +\
               "/gen/telemetry/{}/{}".format('mirror', 'endpoint')
    api.Logger.info("Template Config files location: ", protoDir)

    policies = utils.GetTargetJsons('mirror', 'endpoint')
    for policy_json in policies:
        #
        # Get template-Mirror Config
        #
        api.Logger.info("Adding one config object for {}", format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len(newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            tc.error = True
            return api.types.status.FAILURE

        #
        # Modify template-Mirror Config to make sure that Naples-node
        # act as either source or destination
        #
        # Set up Collector in the remote node
        #
        if newObjects[0].kind == 'InterfaceMirrorSession':
            tc.ep_collector_objects = newObjects
            agent_api.RemoveConfigObjects(tc.ep_collector_objects)
        elif newObjects[0].kind == 'Endpoint':
            tc.endpoint_objects = newObjects
            agent_api.RemoveConfigObjects(tc.endpoint_objects)
            updateEndpointObjectTempl(tc, tc.endpoint_objects,
                                      tc.store_endpoint_objects[0])

    for i in range(0, len(policies)):
        #
        # Push Collector object
        #
        if i == 0:
            colObjects = tc.ep_collector_objects
            if tc.iterators.session == 'single':
                ret = eutils.generateEpCollectorConfig(tc, colObjects)
            else:
                ret = eutils.generateEpCollectorConfigForMultiMirrorSession(
                    tc, colObjects)
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to identify Collector Workload")
                tc.error = True
                return api.types.status.FAILURE

            api.Logger.info("Pushing collector objects")
            ret = agent_api.PushConfigObjects(colObjects,
                                              [tc.naples.node_name],
                                              [tc.naples_device_name])
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Unable to push collector objects")
                tc.error = True
                return api.types.status.FAILURE
            continue

        api.Logger.info("Generating Endpoint objects")
        cfg_api.PrintConfigsObjects(colObjects)
        #
        # Update Endpoint objects
        #
        epObjects = tc.endpoint_objects
        ret = eutils.generateEndpointConfig(tc, epObjects, colObjects)
        if ret != api.types.status.SUCCESS:
            agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                          [tc.naples.node_name],
                                          [tc.naples_device_name])
            api.Logger.error("Unable to identify Endpoints")
            tc.error = True
            return api.types.status.FAILURE

        api.Logger.info("Pushing Endpoint objects")
        ret = agent_api.UpdateConfigObjects(epObjects, [tc.naples.node_name],
                                            [tc.naples_device_name])
        if ret != api.types.status.SUCCESS:
            agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                          [tc.naples.node_name],
                                          [tc.naples_device_name])
            api.Logger.error("Unable to update endpoint objects")
            tc.error = True
            return api.types.status.FAILURE

        #
        # Establish Forwarding set up between Naples-peer and Collectors
        #
        eutils.establishForwardingSetup(tc)

        req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\
                             serial = True)
        for c in range(0, len(tc.ep_collector)):
            #
            # Set up TCPDUMP's on the collector
            #
            idx = tc.ep_collector_idx[c]
            if tc.ep_collector[c].IsNaples():
                ### TODO - run & revisit for windows case and fix any issues.
                if api.GetNodeOs(tc.naples.node_name) == "windows":
                    intfGuid = ionic_utils.winIntfGuid(
                        tc.ep_collector[c].node_name,
                        tc.ep_collector[c].interface)
                    intfVal = str(
                        ionic_utils.winTcpDumpIdx(tc.ep_collector[c].node_name,
                                                  intfGuid))
                    cmd = "sudo /mnt/c/Windows/System32/tcpdump.exe -c 1000 -XX -vv -i {} ip proto 47 and dst {} -U -w ep-mirror-{}.pcap"\
                          .format(intfVal, tc.collector_ip_address[idx], c)
                else:
                    intfVal = tc.ep_collector[c].interface
                    cmd = "tcpdump -c 1000 -XX -vv -nni {} ip proto gre and dst {}\
                           --immediate-mode -U -w ep-mirror-{}.pcap"\
                          .format(intfVal,
                              tc.collector_ip_address[idx], c)
            else:
                cmd = "tcpdump -p -c 1000 -XX -vv -nni {} ip proto gre\
                       and dst {} --immediate-mode -U -w ep-mirror-{}.pcap"\
                      .format(tc.ep_collector[c].interface,
                              tc.collector_ip_address[idx], c)
            eutils.add_command(req_tcpdump_erspan, tc.ep_collector[c], cmd,
                               True)

        resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan)
        for cmd in resp_tcpdump_erspan.commands:
            api.PrintCommandResults(cmd)

        #
        # Classic mode requires a delay to make sure that TCPDUMP background
        # process is fully up
        #
        if tc.classic_mode == True:
            time.sleep(2)

        #
        # Trigger packets for ERSPAN to take effect
        #
        tc.dest_port = '120'
        if api.GetNodeOs(tc.naples.node_name) == 'linux' or api.GetNodeOs(
                tc.naples.node_name) == 'windows':
            eutils.triggerTrafficInClassicModeLinux(tc)
        else:
            eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)

        #
        # Dump sessions/flows/P4-tables for debug purposes
        #
        eutils.showSessionAndP4TablesForDebug(tc, tc.ep_collector,
                                              tc.ep_collector_idx)

        #
        # Terminate TCPDUMP background process
        #
        term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\
                                   resp_tcpdump_erspan)
        tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\
                              resp_tcpdump_erspan, term_resp_tcpdump_erspan)
        if api.GetNodeOs(tc.naples.node_name) == "windows":
            req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
            cmd = api.WINDOWS_POWERSHELL_CMD + " Stop-Process -Name 'tcpdump' -Force"
            api.Trigger_AddCommand(req,
                                   tc.naples.node_name,
                                   tc.naples.workload_name,
                                   cmd,
                                   background=False)
            resp = api.Trigger(req)

        # Delete the objects
        eutils.deGenerateEndpointConfig(tc, tc.endpoint_objects,
                                        tc.ep_collector_objects)
        agent_api.UpdateConfigObjects(tc.endpoint_objects,
                                      [tc.naples.node_name],
                                      [tc.naples_device_name])

        agent_api.DeleteConfigObjects(tc.ep_collector_objects,
                                      [tc.naples.node_name],
                                      [tc.naples_device_name])

        #
        # Make sure that Mirror-config has been removed
        #
        tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

    return api.types.status.SUCCESS
Beispiel #17
0
def Trigger(tc):
    #if tc.skip: return api.types.status.SUCCESS

    policies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    result = api.types.status.SUCCESS
    
    count = 0
    ret_count = 0
    collector_dest = []
    collector_wl = []
    collector_type = []
    for policy_json in policies:
        collector_dest.clear()
        collector_wl.clear()
        collector_type.clear()

        verif_json = utils.GetVerifJsonFromPolicyJson(policy_json)
        api.Logger.info("Using policy_json = {}".format(policy_json))
        newObjects = agent_api.AddOneConfig(policy_json)
        if len (newObjects) == 0:
            api.Logger.error("Adding new objects to store failed")
            return api.types.status.FAILURE
        ret = agent_api.PushConfigObjects(newObjects)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Unable to push mirror objects")
            return api.types.status.FAILURE
        utils.DumpMirrorSessions()

        # Get collector to find the workload
        for obj in newObjects:
            for obj_collector in obj.spec.collectors:
                coll_dst = obj_collector.export_config.destination
                coll_type = obj_collector.type
                collector_dest.append(coll_dst)
                collector_type.append(coll_type)
                api.Logger.info(f"export-dest: {coll_dst}, erspan-type: {coll_type}")

        for coll_dst in collector_dest:
            for wl in tc.workloads:
                if (wl.ip_address == coll_dst) or (coll_dst in wl_sec_ip_info[wl.workload_name]):
                    collector_wl.append(wl)

        api.Logger.info("collect_dest len: {} collect_wl len: {}".format(len(collector_dest), len(collector_wl)))
        collector_info = utils.GetMirrorCollectorsInfo(collector_wl, collector_dest, collector_type)
        ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm)
        result = ret['res']
        ret_count = ret['count']
        count = count + ret_count

        if result != api.types.status.SUCCESS:
            api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json))
            # Delete the objects
            agent_api.DeleteConfigObjects(newObjects)
            agent_api.RemoveConfigObjects(newObjects)
            break

        # Update collector
        newObjects = agent_api.QueryConfigs(kind='MirrorSession')
        # mirror config update to local collector is applicable only for ESX topology
        if is_wl_type_bm is False:
            for obj in newObjects:
                if obj.spec.collectors[0].type == utils.ERSPAN_TYPE_2:
                    obj.spec.collectors[0].type = utils.ERSPAN_TYPE_3
                    collector_info[0]['type'] = utils.ERSPAN_TYPE_3
                else:
                    obj.spec.collectors[0].type = utils.ERSPAN_TYPE_2
                    collector_info[0]['type'] = utils.ERSPAN_TYPE_2
                break

            # Now push the update as we modified
            agent_api.UpdateConfigObjects(newObjects)
            utils.DumpMirrorSessions()

            # Rerun the tests
            ret = utils.RunAll(tc, verif_json, 'mirror', collector_info, is_wl_type_bm)
            result = ret['res']
            ret_count = ret['count']
            count = count + ret_count

        # Delete the objects
        agent_api.DeleteConfigObjects(newObjects)
        agent_api.RemoveConfigObjects(newObjects)
        api.Logger.info("policy_json = {}, count = {}, total_count = {}".format(policy_json, ret_count, count))
        if result != api.types.status.SUCCESS:
            api.Logger.info("policy_json = {}, Encountered FAILURE, stopping".format(policy_json))
            break
    tc.SetTestCount(count)
    collector_dest.clear()
    collector_wl.clear()
    return result
Beispiel #18
0
def Trigger(tc):

    #Query will get the reference of objects on store
    store_profile_objects = netagent_cfg_api.QueryConfigs(kind='SecurityProfile')
    if len(store_profile_objects) == 0:
        api.Logger.error("No security profile objects in store")
        return api.types.status.FAILURE

    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects")
        return api.types.status.FAILURE

    if len(get_config_objects) != len(store_profile_objects):
        api.Logger.error("Config mismatch, Get Objects : %d, Config store Objects : %d"
        % (len(get_config_objects), len(store_profile_objects)))
        return api.types.status.FAILURE

    #Now do an update of the objects
    for object in store_profile_objects:
        object.spec.timeouts.tcp_connection_setup = "1200s"
        object.spec.timeouts.tcp_half_close = "1400s"

    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(new_get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects after update")
        return api.types.status.FAILURE

    #Check whether value has changed
    for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects):
        if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup == store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup ==  get_object.spec.timeouts.tcp_connection_setup:
            api.Logger.error("Update failed")
            return api.types.status.FAILURE


    #Now do  restore value to old one
    for object,old_object in zip(store_profile_objects, get_config_objects):
        object.spec.timeouts.tcp_connection_setup = old_object.spec.timeouts.tcp_connection_setup
        object.spec.timeouts.tcp_half_close = old_object.spec.timeouts.tcp_half_close


    #Now push the update as we modified.
    netagent_cfg_api.UpdateConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    new_get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(new_get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects after update")
        return api.types.status.FAILURE

    for (get_object,store_object,old_object) in zip(new_get_config_objects, store_profile_objects, get_config_objects):
        if get_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup != store_object.spec.timeouts.tcp_connection_setup or \
            old_object.spec.timeouts.tcp_connection_setup !=  get_object.spec.timeouts.tcp_connection_setup:
            api.Logger.error("Second Update failed")
            return api.types.status.FAILURE

    #Now lets do a delete

    netagent_cfg_api.DeleteConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) != 0:
        api.Logger.error("Delete of objects failed")
        return api.types.status.FAILURE

    netagent_cfg_api.PushConfigObjects(store_profile_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(store_profile_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch security profile objects")
        return api.types.status.FAILURE


    newObjects = netagent_cfg_api.AddOneConfig(api.GetTopologyDirectory() + "/test_cfg/test_security_profile.json")
    if len(newObjects) == 0:
        api.Logger.error("Adding new objects to store failed")
        return api.types.status.FAILURE

    nodes = api.GetNaplesHostnames()
    push_nodes = [nodes[0]]
    ret = netagent_cfg_api.PushConfigObjects(newObjects, node_names = push_nodes)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to fetch security profile objects to node %s" % nodes[0])
        return api.types.status.FAILURE

    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    #Delete the objects that is pushed
    netagent_cfg_api.DeleteConfigObjects(get_config_objects)
    #Get will return copy of pushed objects to agent
    get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects, node_names = push_nodes)
    if len(get_config_objects) != 0:
        api.Logger.error("Delete of new objects failed")
        return api.types.status.FAILURE

    #Remoe competely those objects from the store too.
    ret = netagent_cfg_api.RemoveConfigObjects(newObjects)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Remove of new objects failed")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #19
0
def __restore_security_profile(tc):
    return netagent_api.UpdateConfigObjects(tc.cloned_sp_objects)