コード例 #1
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_flow_normalise_direction():
    """
    Test normalising direction of flow.
    Pass a dictionary of an identity record check return a similar
    dictionary that has sources and destinations normalised to the
    direction of the first observed packet in the flow
    """
    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Test Flow 1 Packet 0 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    original_record = flow.packet.dbdict()
    assert original_record['ip_src'] == pkts.IP_SRC[0]
    assert original_record['ip_dst'] == pkts.IP_DST[0]
    assert original_record['tp_src'] == pkts.TP_SRC[0]
    assert original_record['tp_dst'] == pkts.TP_DST[0]
    normalised_record = api.flow_normalise_direction(original_record)
    assert normalised_record['ip_src'] == pkts.IP_SRC[0]
    assert normalised_record['ip_dst'] == pkts.IP_DST[0]
    assert normalised_record['tp_src'] == pkts.TP_SRC[0]
    assert normalised_record['tp_dst'] == pkts.TP_DST[0]

    #*** Test Flow 1 Packet 1 (Server TCP SYN ACK). This should be transposed:
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    original_record = flow.packet.dbdict()
    assert original_record['ip_src'] == pkts.IP_SRC[1]
    assert original_record['ip_dst'] == pkts.IP_DST[1]
    assert original_record['tp_src'] == pkts.TP_SRC[1]
    assert original_record['tp_dst'] == pkts.TP_DST[1]
    normalised_record = api.flow_normalise_direction(original_record)
    assert normalised_record['ip_src'] == pkts.IP_DST[1]
    assert normalised_record['ip_dst'] == pkts.IP_SRC[1]
    assert normalised_record['tp_src'] == pkts.TP_DST[1]
    assert normalised_record['tp_dst'] == pkts.TP_SRC[1]
コード例 #2
0
def test_record_suppression():
    """
    Test the recording of a flow suppression event
    """
    #*** Instantiate Flow class:
    flow = flows_module.Flow(config)
    #*** Create a sample result to use:
    ipv4_src = '10.1.0.1'
    ipv4_dst = '10.1.0.2'
    result = {
        'match_type': 'single',
        'forward_cookie': 1,
        'forward_match': {
            'eth_type': 0x0800,
            'ipv4_src': ipv4_src,
            'ipv4_dst': ipv4_dst,
            'ip_proto': 6
        },
        'reverse_cookie': 0,
        'reverse_match': {},
        'client_ip': ipv4_src
    }

    #*** Ingest a packet from pc1:
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())

    #*** Record suppressing this flow
    flow.record_suppression(DPID1, 'forward', result)
コード例 #3
0
def test_record_removal():
    """
    Test the recording of an idle-timeout flow removal message
    sent by a switch into the flow_rems database collection

    Synthesise flow removal messages to test with.
    """
    #*** Supports OpenFlow version 1.3:
    OFP_VERSION = ofproto_v1_3.OFP_VERSION

    #*** Instantiate Flow class:
    flow = flows_module.Flow(config)

    #*** Load JSON representations of flow removed messages:
    with open('OFPMsgs/OFPFlowRemoved_1.json', 'r') as json_file:
        json_str_tx = json_file.read()
        json_dict_tx = json.loads(json_str_tx)
    with open('OFPMsgs/OFPFlowRemoved_2.json', 'r') as json_file:
        json_str_rx = json_file.read()
        json_dict_rx = json.loads(json_str_rx)

    #*** Set up fake datapath and synthesise messages:
    datapath = ofproto_protocol.ProtocolDesc(version=OFP_VERSION)
    datapath.id = 1
    msg_tx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_tx)
    msg_rx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_rx)

    logger.debug("msg_tx=%s", msg_tx)

    #*** Call our method that we're testing with the synthesised flow rems:
    flow.record_removal(msg_tx)
    flow.record_removal(msg_rx)

    #*** Check that messages recorded correctly in database collection:
    db_data_tx = {'ip_A': '10.1.0.1', 'tp_B': 80}
    result = flow.flow_rems.find(db_data_tx).sort('$natural', -1).limit(1)
    result_tx = list(result)[0]
    logger.debug("result=%s", result_tx)
    assert result_tx['table_id'] == 1
    assert result_tx['ip_B'] == '10.1.0.2'
    assert result_tx['tp_A'] == 43297
    assert result_tx['packet_count'] == 10
    assert result_tx['flow_hash'] == nethash.hash_flow(
        ('10.1.0.1', '10.1.0.2', 43297, 80, 6))
    assert result_tx['cookie'] == 23
    assert result_tx['direction'] == 'forward'

    #*** Return leg of flow:
    db_data_tx = {'ip_B': '10.1.0.1', 'tp_A': 80}
    result = flow.flow_rems.find(db_data_tx).sort('$natural', -1).limit(1)
    result_tx = list(result)[0]
    logger.debug("result=%s", result_tx)
    assert result_tx['table_id'] == 1
    assert result_tx['ip_A'] == '10.1.0.2'
    assert result_tx['tp_B'] == 43297
    assert result_tx['packet_count'] == 9
    assert result_tx['flow_hash'] == nethash.hash_flow(
        ('10.1.0.2', '10.1.0.1', 80, 43297, 6))
    assert result_tx['cookie'] == 1000000023
    assert result_tx['direction'] == 'reverse'
コード例 #4
0
def test_flow_ipv4_http2():
    """
    Test ingesting packets from an IPv4 HTTP flow, with a packet
    from a different flow ingested mid-stream. This flow is a
    successful retrieval of an HTTP object with connection close
    so TCP session nicely torn down with FINs
    """
    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Test Flow 2 Packet 1 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
    pkt_test(flow, pkts2, 1, 1)

    #*** Test Flow 2 Packet 2 (Server TCP SYN ACK):
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[1], datetime.datetime.now())
    pkt_test(flow, pkts2, 2, 2)

    #*** Test Flow 2 Packet 3 (Client ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
    pkt_test(flow, pkts2, 3, 3)

    #*** Random packet to ensure it doesn't count against flow 2:
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())

    #*** Test Flow 2 Packet 4 (Client HTTP GET):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
    pkt_test(flow, pkts2, 4, 4)

    #*** Test Flow 2 Packet 5 (Server ACK):
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[4], datetime.datetime.now())
    pkt_test(flow, pkts2, 5, 5)

    #*** Test Flow 2 Packet 6 (Server HTTP 200 OK):
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[5], datetime.datetime.now())
    pkt_test(flow, pkts2, 6, 6)

    #*** Test Flow 2 Packet 7 (Client ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[6], datetime.datetime.now())
    pkt_test(flow, pkts2, 7, 7)

    #*** Test Flow 2 Packet 8 (Server sends HTML Page to Client):
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[7], datetime.datetime.now())
    pkt_test(flow, pkts2, 8, 8)

    #*** Test Flow 2 Packet 9 (Client ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[8], datetime.datetime.now())
    pkt_test(flow, pkts2, 9, 9)

    #*** Test Flow 2 Packet 10 (Server FIN ACK):
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[9], datetime.datetime.now())
    pkt_test(flow, pkts2, 10, 10)

    #*** Test Flow 2 Packet 11 (Client FIN ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[10], datetime.datetime.now())
    pkt_test(flow, pkts2, 11, 11)

    #*** Test Flow 2 Packet 12 (Final ACK from Server):
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[11], datetime.datetime.now())
    pkt_test(flow, pkts2, 12, 12)
コード例 #5
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_indexing_get_pi_rate():
    """
    Test indexing of database collections for api queries
    to ensure that they run efficiently
    """
    #*** Instantiate classes:
    flow = flows_module.Flow(config)

    #*** Ingest packets older than flow timeout:
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[0], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))

    #*** Ingest packets:
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())

    #*** Test packet_ins collection indexing...
    #*** Should be 5 documents in packet_ins collection:
    assert flow.packet_ins.count() == 5
    #*** Get query execution statistics:
    explain = api.get_pi_rate(test=1)

    #*** Check an index is used:
    assert explain['queryPlanner']['winningPlan']['inputStage'][
        'stage'] == 'IXSCAN'
    #*** Check how query ran:
    assert explain['executionStats']['executionSuccess'] == True
    assert explain['executionStats']['nReturned'] == 3
    assert explain['executionStats']['totalKeysExamined'] == 3
    assert explain['executionStats']['totalDocsExamined'] == 3
コード例 #6
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_flows_removed_stats_count():
    """
    Test the flows_removed API stats count by ingesting flow removal messages
    then checking that the API response correctly specifies message count
    """
    #*** Start api_external as separate process:
    logger.info("Starting api_external")
    api_ps = multiprocessing.Process(target=api.run, args=())
    api_ps.start()

    #*** Supports OpenFlow version 1.3:
    OFP_VERSION = ofproto_v1_3.OFP_VERSION

    #*** Instantiate Flow class:
    flow = flows_module.Flow(config)

    #*** Load JSON representations of flow removed messages:
    with open('OFPMsgs/OFPFlowRemoved_1.json', 'r') as json_file:
        json_str_tx = json_file.read()
        json_dict_tx = json.loads(json_str_tx)
    with open('OFPMsgs/OFPFlowRemoved_2.json', 'r') as json_file:
        json_str_rx = json_file.read()
        json_dict_rx = json.loads(json_str_rx)

    #*** Set up fake datapath and synthesise messages:
    datapath = ofproto_protocol.ProtocolDesc(version=OFP_VERSION)
    datapath.id = 1
    msg_tx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_tx)
    msg_rx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_rx)

    #*** Call the external API:
    api_result = get_api_result(URL_TEST_FLOWS_REMOVED_STATS_COUNT)
    logger.debug("api_result=%s", api_result)

    #*** Validate API Response parameters:
    assert api_result['flows_removed'] == 0

    #*** Record flow removal to flow_rems database collection:
    flow.record_removal(msg_tx)

    #*** Call the external API:
    api_result = get_api_result(URL_TEST_FLOWS_REMOVED_STATS_COUNT)
    logger.debug("api_result=%s", api_result)

    #*** Validate API Response parameters:
    assert api_result['flows_removed'] == 1

    #*** Record flow removal to flow_rems database collection:
    flow.record_removal(msg_rx)

    #*** Call the external API:
    api_result = get_api_result(URL_TEST_FLOWS_REMOVED_STATS_COUNT)
    logger.debug("api_result=%s", api_result)

    #*** Validate API Response parameters:
    assert api_result['flows_removed'] == 2

    #*** Stop api_external sub-process:
    api_ps.terminate()
コード例 #7
0
def test_not_suppressed():
    """
    Test this query that checks to see if a flow mod to a switch
    is not suppressed (preventing possible duplicate flow mods)
    """
    #*** Instantiate Flow class:
    flow = flows_module.Flow(config)
    #*** Create a sample result to use:
    ipv4_src = '10.1.0.1'
    ipv4_dst = '10.1.0.2'
    result = {
        'match_type': 'single',
        'forward_cookie': 1,
        'forward_match': {
            'eth_type': 0x0800,
            'ipv4_src': ipv4_src,
            'ipv4_dst': ipv4_dst,
            'ip_proto': 6
        },
        'reverse_cookie': 0,
        'reverse_match': {},
        'client_ip': ipv4_src
    }

    #*** Ingest a packet from pc1:
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())

    #*** Check to see if this flow is suppressed:
    assert flow.not_suppressed(DPID1, 'suppress') == 1

    #*** Record suppressing this flow:
    flow.record_suppression(DPID1, 'suppress', result)

    #*** Check to see if this flow is suppressed now:
    assert flow.not_suppressed(DPID1, 'suppress') == 0

    #*** Check to see if this flow is not suppressed for different DPID:
    assert flow.not_suppressed(DPID2, 'suppress') == 1

    #*** Record suppressing this flow for DPID2:
    flow.record_suppression(DPID2, 'suppress', result)

    #*** Check to see if this flow is now suppressed for DPID2:
    assert flow.not_suppressed(DPID2, 'suppress') == 0

    #*** Check to see if this flow is not suppressed for different
    #***  suppress_type:
    assert flow.not_suppressed(DPID1, 'drop') == 1

    #*** Record suppressing this flow for suppress_type drop:
    flow.record_suppression(DPID1, 'drop', result)

    #*** Check to see if this flow is now suppressed for drop
    assert flow.not_suppressed(DPID1, 'drop') == 0
コード例 #8
0
ファイル: test_identities.py プロジェクト: rubiruchi/nmeta
def test_harvest_DHCP():
    """
    Test harvesting identity metadata from an IPv4 DHCP request
    Note: this test is very basic and does not cover much...
    TBD: cover more scenarios and DHCP message types
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    #*** Client to Server DHCP Request:
    flow.ingest_packet(DPID1, INPORT1, pkts_dhcp.RAW[2],
                       datetime.datetime.now())
    identities.harvest(pkts_dhcp.RAW[2], flow.packet)
    flow_pkt = flow.packet

    assert identities.dhcp_msg.dpid == DPID1
    assert identities.dhcp_msg.in_port == INPORT1
    assert identities.dhcp_msg.eth_src == flow_pkt.eth_src
    assert identities.dhcp_msg.eth_dst == flow_pkt.eth_dst
    assert identities.dhcp_msg.ip_src == flow_pkt.ip_src
    assert identities.dhcp_msg.ip_dst == flow_pkt.ip_dst
    assert identities.dhcp_msg.tp_src == flow_pkt.tp_src
    assert identities.dhcp_msg.tp_dst == flow_pkt.tp_dst
    assert identities.dhcp_msg.transaction_id == '0xabc5667f'
    assert identities.dhcp_msg.host_name == 'pc1'
    assert identities.dhcp_msg.message_type == 'DHCPREQUEST'

    #*** Server to Client DHCP ACK:
    #*** Set ingest time so we can check validity based on lease
    ingest_time = datetime.datetime.now()
    flow.ingest_packet(DPID1, INPORT2, pkts_dhcp.RAW[3], ingest_time)
    identities.harvest(pkts_dhcp.RAW[3], flow.packet)
    flow_pkt = flow.packet

    assert identities.dhcp_msg.dpid == DPID1
    assert identities.dhcp_msg.in_port == INPORT2
    assert identities.dhcp_msg.eth_src == flow_pkt.eth_src
    assert identities.dhcp_msg.eth_dst == flow_pkt.eth_dst
    assert identities.dhcp_msg.ip_src == flow_pkt.ip_src
    assert identities.dhcp_msg.ip_dst == flow_pkt.ip_dst
    assert identities.dhcp_msg.tp_src == flow_pkt.tp_src
    assert identities.dhcp_msg.tp_dst == flow_pkt.tp_dst
    assert identities.dhcp_msg.transaction_id == '0xabc5667f'
    assert identities.dhcp_msg.host_name == ''
    assert identities.dhcp_msg.ip_assigned == '10.1.0.1'
    assert identities.dhcp_msg.message_type == 'DHCPACK'
    assert identities.dhcp_msg.lease_time == 300

    result_identity = identities.findbynode('pc1')
    logger.debug("result_identity=%s", result_identity)
    assert result_identity['mac_address'] == pkts_dhcp.ETH_SRC[2]
    assert result_identity['ip_address'] == '10.1.0.1'
コード例 #9
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_get_flow_data_xfer():
    """
    Test the get_flow_data_xfer method.

    Synthesise flow removal messages to test with.
    """
    #*** Supports OpenFlow version 1.3:
    OFP_VERSION = ofproto_v1_3.OFP_VERSION

    #*** Instantiate Flow class:
    flow = flows_module.Flow(config)
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts.RAW[1], datetime.datetime.now())

    #*** Load JSON representations of flow removed messages:
    with open('OFPMsgs/OFPFlowRemoved_1.json', 'r') as json_file:
        json_str_tx = json_file.read()
        json_dict_tx = json.loads(json_str_tx)
    with open('OFPMsgs/OFPFlowRemoved_2.json', 'r') as json_file:
        json_str_rx = json_file.read()
        json_dict_rx = json.loads(json_str_rx)

    #*** Set up fake datapath and synthesise messages:
    datapath = ofproto_protocol.ProtocolDesc(version=OFP_VERSION)
    datapath.id = 1
    msg_tx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_tx)
    msg_rx = ofproto_parser.ofp_msg_from_jsondict(datapath, json_dict_rx)

    logger.debug("msg_tx=%s", msg_tx)

    #*** Record flow removals to flow_rems database collection:
    flow.record_removal(msg_tx)
    flow.record_removal(msg_rx)

    #*** Now, test the get_flow_data_xfer method:

    record = {
        'ip_src': '10.1.0.1',
        'ip_dst': '10.1.0.2',
        'tp_src': 43297,
        'tp_dst': 80,
        'proto': 6,
        'flow_hash': '9822b2867652ee0957892482b9f004c3'
    }
    xfer = api.get_flow_data_xfer(record)
    logger.debug("xfer=%s", xfer)

    assert xfer['tx_found'] == 1
    assert xfer['tx_bytes'] == 744
    assert xfer['tx_pkts'] == 10
    assert xfer['rx_found'] == 1
    assert xfer['rx_bytes'] == 6644
    assert xfer['rx_pkts'] == 9
コード例 #10
0
ファイル: test_policy.py プロジェクト: rubiruchi/nmeta
def test_check_policy():
    """
    Test that packet match against policy works correctly
    """
    #*** Instantiate tc, flows and identities classes, specifying
    #*** a particular main_policy file to use:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static.yaml")
    flow = flows_module.Flow(config)
    ident = identities.Identities(config, policy)

    #*** Note: cannot query a classification until a packet has been
    #*** ingested - will throw error

    #*** Ingest a packet:
    #*** Test Flow 1 Packet 1 (Client TCP SYN):
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    #*** Check policy:
    policy.check_policy(flow, ident)
    #*** Should not match any rules in that policy:
    logger.debug("flow.classification.classified=%s",
                 flow.classification.classified)
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == ""
    assert flow.classification.actions == {}

    #*** Re-instantiate policy with different policy that should classify:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static_3.yaml")

    #*** Re-ingest packet:
    #*** Test Flow 1 Packet 1 (Client TCP SYN):
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    #*** Check policy:
    policy.check_policy(flow, ident)
    #*** Should match policy:
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
    logger.debug("flow.classification.actions=%s", flow.classification.actions)
    assert flow.classification.actions == {
        'set_desc': 'Constrained Bandwidth Traffic',
        'qos_treatment': 'constrained_bw'
    }
コード例 #11
0
def test_flow_ipv4_tcp_reset():
    """
    Test ingesting packets from an IPv4 TCP flow that is immediately
    shutdown with a TCP RST
    """
    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Test Flow 2 Packet 1 (Client SYN on TCP-81):
    flow.ingest_packet(DPID1, INPORT1, pkts3.RAW[0], datetime.datetime.now())
    pkt_test(flow, pkts3, 1, 1)

    #*** Test Flow 2 Packet 2 (Server RST):
    flow.ingest_packet(DPID1, INPORT2, pkts3.RAW[1], datetime.datetime.now())
    pkt_test(flow, pkts3, 2, 2)
コード例 #12
0
def test_flow_ipv4_http():
    """
    Test ingesting packets from an IPv4 HTTP flow, with a packet
    from a different flow ingested mid-stream.
    This flow is not torn down.
    """
    #*** Sanity check can read into dpkt:
    eth = dpkt.ethernet.Ethernet(pkts.RAW[0])
    eth_src = mac_addr(eth.src)
    assert eth_src == '08:00:27:2a:d6:dd'

    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Test Flow 1 Packet 1 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    pkt_test(flow, pkts, 1, 1)

    #*** Test Flow 1 Packet 2 (Server TCP SYN ACK):
    flow.ingest_packet(DPID1, INPORT2, pkts.RAW[1], datetime.datetime.now())
    pkt_test(flow, pkts, 2, 2)

    #*** Test Flow 1 Packet 3 (Client ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())
    pkt_test(flow, pkts, 3, 3)

    #*** Random packet to ensure it doesn't count against flow 1:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], datetime.datetime.now())

    #*** Test Flow 1 Packet 4 (Client to Server HTTP GET):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[3], datetime.datetime.now())
    pkt_test(flow, pkts, 4, 4)

    #*** Test Flow 1 Packet 5 (Server ACK):
    flow.ingest_packet(DPID1, INPORT2, pkts.RAW[4], datetime.datetime.now())
    pkt_test(flow, pkts, 5, 5)

    #*** Test Flow 1 Packet 6 (Server to Client HTTP 400 Bad Request):
    flow.ingest_packet(DPID1, INPORT2, pkts.RAW[5], datetime.datetime.now())
    pkt_test(flow, pkts, 6, 6)

    #*** Test Flow 1 Packet 7 (Client ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[6], datetime.datetime.now())
    pkt_test(flow, pkts, 7, 7)

    #*** Test Flow 1 Packet 7 (Client ACK) - different DPID:
    flow.ingest_packet(DPID2, INPORT1, pkts.RAW[6], datetime.datetime.now())
    pkt_test(flow, pkts, 7, 7)
コード例 #13
0
ファイル: test_identities.py プロジェクト: rubiruchi/nmeta
def test_harvest_LLDP():
    """
    Test harvesting identity metadata from LLDP packets
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    #*** Test no result found by checking before LLDP ingestion:
    result_identity = identities.findbynode(pkts_lldp.LLDP_SYSTEM_NAME[0])
    assert result_identity == 0

    #*** LLDP packet 0:
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
                       datetime.datetime.now())
    identities.harvest(pkts_lldp.RAW[0], flow.packet)
    result_identity = identities.findbynode(pkts_lldp.LLDP_SYSTEM_NAME[0])
    assert result_identity['host_name'] == pkts_lldp.LLDP_SYSTEM_NAME[0]
    assert result_identity['host_desc'] == pkts_lldp.LLDP_SYSTEM_DESC[0]
    assert result_identity['dpid'] == DPID1
    assert result_identity['in_port'] == INPORT1
    assert result_identity['mac_address'] == pkts_lldp.ETH_SRC[0]
    assert result_identity['harvest_type'] == 'LLDP'

    #*** LLDP packet 1:
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[1],
                       datetime.datetime.now())
    identities.harvest(pkts_lldp.RAW[1], flow.packet)
    result_identity = identities.findbynode(pkts_lldp.LLDP_SYSTEM_NAME[1])
    assert result_identity['host_name'] == pkts_lldp.LLDP_SYSTEM_NAME[1]
    assert result_identity['host_desc'] == pkts_lldp.LLDP_SYSTEM_DESC[1]
    assert result_identity['dpid'] == DPID1
    assert result_identity['in_port'] == INPORT1
    assert result_identity['mac_address'] == pkts_lldp.ETH_SRC[1]
    assert result_identity['harvest_type'] == 'LLDP'

    #*** LLDP packet 2:
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[2],
                       datetime.datetime.now())
    identities.harvest(pkts_lldp.RAW[2], flow.packet)
    result_identity = identities.findbynode(pkts_lldp.LLDP_SYSTEM_NAME[2])
    assert result_identity['host_name'] == pkts_lldp.LLDP_SYSTEM_NAME[2]
    assert result_identity['host_desc'] == pkts_lldp.LLDP_SYSTEM_DESC[2]
    assert result_identity['dpid'] == DPID1
    assert result_identity['in_port'] == INPORT1
    assert result_identity['mac_address'] == pkts_lldp.ETH_SRC[2]
    assert result_identity['harvest_type'] == 'LLDP'
コード例 #14
0
def test_hash_packet():
    """
    Test that same flow packet (i.e. TCP) retx adds to count whereas
    retx of non-flow packet has count of 1
    """
    #*** Create a flows packet object:
    flow = flows.Flow(config)
    #*** Ingest Flow 1 Packet 0 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    packet0_hash = nethash.hash_packet(flow.packet)
    #*** Ingest Flow 1 Packet 2 (Client TCP ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())
    packet1_hash = nethash.hash_packet(flow.packet)

    #*** The two packet hashes must be different even though have same 5-tuple:
    assert packet0_hash != packet1_hash
コード例 #15
0
ファイル: test_identities.py プロジェクト: rubiruchi/nmeta
def test_harvest_ARP():
    """
    Test harvesting identity metadata from an IPv4 ARP reply.
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    #*** Server ARP Reply:
    flow.ingest_packet(DPID1, INPORT1, pkts_arp.RAW[1],
                       datetime.datetime.now())
    identities.harvest(pkts_arp.RAW[1], flow.packet)
    result_identity = identities.findbymac(pkts_arp.ETH_SRC[1])

    assert result_identity['mac_address'] == pkts_arp.ETH_SRC[1]
    assert result_identity['ip_address'] == '10.1.0.2'
コード例 #16
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_get_dns_ip():
    """
    Test looking up a DNS CNAME to get an IP address
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    #*** DNS packet 1 (NAME to CNAME, then second answer with IP for CNAME):
    flow.ingest_packet(DPID1, INPORT1, pkts_dns.RAW[1],
                       datetime.datetime.now())
    identities.harvest(pkts_dns.RAW[1], flow.packet)

    logger.debug("Testing lookup of CNAME=%s", pkts_dns.DNS_CNAME[1])
    result_ip = api.get_dns_ip(pkts_dns.DNS_CNAME[1])
    assert result_ip == pkts_dns.DNS_IP[1]
コード例 #17
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_get_host_by_ip():
    """
    Test get_host_by_ip
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    #*** Ingest ARP reply for MAC of pc1 so can ref later:
    flow.ingest_packet(DPID1, INPORT1, pkts_arp.RAW[3],
                       datetime.datetime.now())
    identities.harvest(pkts_arp.RAW[3], flow.packet)

    #*** Ingest LLDP from pc1
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
                       datetime.datetime.now())
    identities.harvest(pkts_lldp.RAW[0], flow.packet)

    #*** Call the get_host_by_ip:
    get_host_by_ip_result = api.get_host_by_ip('10.1.0.1')

    logger.debug("get_host_by_ip_result=%s", get_host_by_ip_result)

    assert get_host_by_ip_result == 'pc1.example.com'

    #*** Test DHCP to host by IP

    #*** Client to Server DHCP Request:
    flow.ingest_packet(DPID1, INPORT1, pkts_dhcp.RAW[2],
                       datetime.datetime.now())
    identities.harvest(pkts_dhcp.RAW[2], flow.packet)

    #*** Server to Client DHCP ACK:
    flow.ingest_packet(DPID1, INPORT2, pkts_dhcp.RAW[3],
                       datetime.datetime.now())
    identities.harvest(pkts_dhcp.RAW[3], flow.packet)

    #*** Call the get_host_by_ip:
    get_host_by_ip_result = api.get_host_by_ip('10.1.0.1')

    logger.debug("get_host_by_ip_result=%s", get_host_by_ip_result)

    assert get_host_by_ip_result == 'pc1'
コード例 #18
0
ファイル: test_identities.py プロジェクト: rubiruchi/nmeta
def test_harvest_DNS():
    """
    Test harvesting identity metadata from DNS packets
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    #*** DNS packet 1 (NAME to CNAME, then second answer with IP for CNAME):
    flow.ingest_packet(DPID1, INPORT1, pkts_dns.RAW[1],
                       datetime.datetime.now())
    identities.harvest(pkts_dns.RAW[1], flow.packet)
    result_identity = identities.findbyservice(pkts_dns.DNS_NAME[1])
    assert result_identity['service_name'] == pkts_dns.DNS_NAME[1]
    assert result_identity['service_alias'] == pkts_dns.DNS_CNAME[1]
    result_identity = identities.findbyservice(pkts_dns.DNS_CNAME[1])
    assert result_identity['service_name'] == pkts_dns.DNS_CNAME[1]
    assert result_identity['ip_address'] == pkts_dns.DNS_IP[1]
コード例 #19
0
ファイル: test_policy.py プロジェクト: rubiruchi/nmeta
def test_check_tc_rule():
    #*** Instantiate classes:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static.yaml")
    flow = flows_module.Flow(config)
    ident = identities.Identities(config, policy)

    #*** Test Flow 1 Packet 1 (Client TCP SYN):
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    #*** Set policy.pkt as work around for not calling parent method that sets it:
    policy.pkt = flow.packet

    #*** main_policy_regression_static.yaml shouldn't match HTTP (rule 0):
    tc_rules = policy_module.TCRules(policy)
    tc_rule = policy_module.TCRule(tc_rules, policy, 0)
    tc_rule_result = tc_rule.check_tc_rule(flow, ident)
    assert tc_rule_result.match == False
    assert tc_rule_result.continue_to_inspect == False
    assert tc_rule_result.classification_tag == ""
    assert tc_rule_result.actions == {}

    #*** main_policy_regression_static_3.yaml should match HTTP (rule 0):
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static_3.yaml")
    ident = identities.Identities(config, policy)
    tc_rules = policy_module.TCRules(policy)
    tc_rule = policy_module.TCRule(tc_rules, policy, 0)
    tc_rule_result = tc_rule.check_tc_rule(flow, ident)
    assert tc_rule_result.match == True
    assert tc_rule_result.continue_to_inspect == False
    assert tc_rule_result.classification_tag == "Constrained Bandwidth Traffic"
    assert tc_rule_result.actions == {
        'qos_treatment': 'constrained_bw',
        'set_desc': 'Constrained Bandwidth Traffic'
    }
コード例 #20
0
def test_packet_sizes():
    """
    Test packet_sizes method
    """
    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Forward direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
    #*** Reverse direction (ignore second one as diff DPID):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], datetime.datetime.now())
    #*** Forward direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
    #*** Reverse direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[4], datetime.datetime.now())

    #*** Check packet sizes:
    assert flow.packet_sizes() == [74, 74, 66, 321, 66]
コード例 #21
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_get_service_by_ip():
    """
    Test ability of get_service_by_ip to resolve
    IPs to service names
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    identities = identities_module.Identities(config, policy)

    tc_ident = tc_identity.IdentityInspect(config)
    #*** DNS packet 1 (NAME to CNAME, then second answer with IP for CNAME):
    # A www.facebook.com CNAME star-mini.c10r.facebook.com A 179.60.193.36
    flow.ingest_packet(DPID1, INPORT1, pkts_dns.RAW[1],
                       datetime.datetime.now())
    identities.harvest(pkts_dns.RAW[1], flow.packet)

    #*** Call the get_service_by_ip:
    get_service_by_ip_result = api.get_service_by_ip('179.60.193.36')
    logger.debug("get_service_by_ip_result=%s", get_service_by_ip_result)

    assert get_service_by_ip_result == 'www.facebook.com'
コード例 #22
0
def test_min_interpacket_interval():
    """
    Test min_interpacket_interval method
    
    Remember, assessed per direction in flow
    """
    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Create some packet times to use, based of current time as otherwise
    #*** will break db search time limits:
    base_time = datetime.datetime.now()
    time_2 = base_time + datetime.timedelta(milliseconds=10)
    time_3 = base_time + datetime.timedelta(milliseconds=30)
    time_4 = base_time + datetime.timedelta(milliseconds=80)
    time_5 = base_time + datetime.timedelta(milliseconds=90)
    time_6 = base_time + datetime.timedelta(milliseconds=190)

    #*** Ingest packets, note 3rd packet is duplicate from diff DPID to ignore:
    #*** Forward direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], base_time)
    #*** Reverse direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[1], time_2)
    flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], time_3)
    #*** Forward direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], time_4)

    #*** Smallest interpacket interval is in forward direction between
    #***  base_time and time_4
    assert flow.min_interpacket_interval() == 0.080

    #*** Forward direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], time_5)
    #*** Reverse direction:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[4], time_6)
    #*** Smallest interpacket interval is in forward direction between time_4
    #***  and time_5
    assert flow.min_interpacket_interval() == 0.010
コード例 #23
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_get_classification():
    """
    Test get_classification which takes a flow_hash
    and return a dictionary of a classification object
    for the flow_hash (if found), otherwise
    a dictionary of an empty classification object.
    """
    #*** Instantiate flow, policy and identities objects:
    flow = flows_module.Flow(config)
    policy = policy_module.Policy(config)
    ident = identities_module.Identities(config, policy)

    #*** Initial main_policy that matches tcp-80:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static_3.yaml")

    #*** Ingest Flow 1 Packet 0 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)

    logger.debug("pkt0 flow classification is %s",
                 flow.classification.dbdict())

    #*** Write classification result to classifications collection:
    flow.classification.commit()

    #*** Retrieve classification via get_classification and check results:
    clasfn_result = api.get_classification(flow.classification.flow_hash)
    assert clasfn_result['classified'] == 1
    assert clasfn_result[
        'classification_tag'] == "Constrained Bandwidth Traffic"
    assert clasfn_result['actions'][
        'set_desc'] == "Constrained Bandwidth Traffic"
    assert clasfn_result['actions']['qos_treatment'] == "constrained_bw"
コード例 #24
0
def test_origin():
    """
    Test origin method that returns tuple of client IP and first DPID
    We ingest multiple packets on flow but origin should always return
    the first source IP and DPID
    """
    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** First packet, this should lock as the source IP and DPID:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
    assert flow.origin()[0] == pkts2.IP_SRC[0]
    assert flow.origin()[1] == DPID1

    #*** Same packet, different DPID, should be ignored:
    flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[0], datetime.datetime.now())
    assert flow.origin()[0] == pkts2.IP_SRC[0]
    assert flow.origin()[1] == DPID1

    #*** Another packet, should be ignored:
    flow.ingest_packet(DPID2, INPORT1, pkts2.RAW[1], datetime.datetime.now())
    assert flow.origin()[0] == pkts2.IP_SRC[0]
    assert flow.origin()[1] == DPID1
コード例 #25
0
def test_flow_LLDP():
    """
    Test ingesting LLDP (non-IP) packets
    """

    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Test LLDP ingestion:
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
                       datetime.datetime.now())
    assert flow.packet_count() == 1
    assert flow.packet.length == pkts_lldp.LEN[0]
    assert flow.packet.eth_src == pkts_lldp.ETH_SRC[0]
    assert flow.packet.eth_dst == pkts_lldp.ETH_DST[0]

    #*** Ingest same packet again, shouldn't increase flow count as isn't flow:
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
                       datetime.datetime.now())
    assert flow.packet_count() == 1
    assert flow.packet.length == pkts_lldp.LEN[0]
    assert flow.packet.eth_src == pkts_lldp.ETH_SRC[0]
    assert flow.packet.eth_dst == pkts_lldp.ETH_DST[0]
コード例 #26
0
ファイル: test_api_external.py プロジェクト: rubiruchi/nmeta
def test_response_pi_rate():
    """
    Test ingesting packets from an IPv4 HTTP flow, and check packet-in rate
    is as expected at various points
    """
    #*** Start api_external as separate process:
    logger.info("Starting api_external")
    api_ps = multiprocessing.Process(target=api.run, args=())
    api_ps.start()

    #*** Sleep to allow api_external to start fully:
    time.sleep(.5)

    #*** Instantiate a flow object:
    flow = flows_module.Flow(config)

    #*** Test Flow 1 Packet 1 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())

    #*** Call the external API:
    api_result = get_api_result(URL_TEST_I_C_PI_RATE)

    #*** Assumes pi_rate calculated as 10 second average rate:
    assert api_result['pi_rate'] == 0.1

    #*** Ingest two more packets:
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())

    #*** Call the external API:
    api_result = get_api_result(URL_TEST_I_C_PI_RATE)

    #*** Assumes pi_rate calculated as 10 second average rate:
    assert api_result['pi_rate'] == 0.3

    #*** Stop api_external sub-process:
    api_ps.terminate()
コード例 #27
0
def test_indexing():
    """
    Test indexing of packet_ins and classification database collections

    Packets are ingested from 3 flows.

    Packets from one of the flows are too old to be significant

    The most recent packet is the one that the flow context is in
    and it only has one other packet ingested (i.e. packet_count == 2)
    """
    #*** Initial main_policy won't match as looking for tcp-1234:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static.yaml")

    #*** Instantiate flow and identities objects:
    flow = flows_module.Flow(config)
    ident = identities_module.Identities(config, policy)

    #*** Ingest packets older than flow timeout:
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[0], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    #*** Ingest current packets from two different flows:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[4], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[5], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[6], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[7], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[8], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[9], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[10], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[11], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()

    #*** Test packet_ins collection indexing...
    #*** Should be 16 documents in packet_ins collection:
    assert flow.packet_ins.count() == 16
    #*** Get query execution statistics:
    explain = flow.packet_count(test=1)
    #*** Check an index is used:
    assert explain['queryPlanner']['winningPlan']['inputStage'][
        'stage'] == 'IXSCAN'
    #*** Check how query ran:
    assert explain['executionStats']['executionSuccess'] == True
    assert explain['executionStats']['nReturned'] == 2
    #*** MongoDB returns 2 or 3 for this, not sure why...???:
    assert explain['executionStats']['totalKeysExamined'] > 1
    assert explain['executionStats']['totalKeysExamined'] < 4
    assert explain['executionStats']['totalDocsExamined'] == 2

    #*** Test classifications collection indexing...
    #*** Should be 4 documents in classifications collection:
    assert flow.classifications.count() == 4
    #*** Get query execution statistics:
    explain2 = flow.classification.test_query()
    #*** Check an index is used:
    assert explain2['queryPlanner']['winningPlan']['inputStage'][
        'stage'] == 'FETCH'
    #*** Check how query ran:
    assert explain2['executionStats']['executionSuccess'] == True
    assert explain2['executionStats']['nReturned'] == 1
    assert explain2['executionStats']['totalKeysExamined'] == 1
    assert explain2['executionStats']['totalDocsExamined'] == 1
コード例 #28
0
def test_classification_identity():
    """
    Test that classification returns correct information for an identity
    classification.
    Create a classification object, record it to DB then check
    that classification can be retrieved
    """
    #*** Load main_policy that matches identity pc1
    #*** and has action to constrain it's bandwidth:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_identity_2.yaml")

    #*** Instantiate flow and identities objects:
    flow = flows_module.Flow(config)
    ident = identities_module.Identities(config, policy)

    #*** Ingest and harvest LLDP Packet 2 (lg1) that shouldn't match:
    # 206 08:00:27:21:4f:ea 01:80:c2:00:00:0e LLDP NoS = 08:00:27:21:4f:ea
    # TTL = 120 System Name = lg1.example.com
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[2],
                       datetime.datetime.now())
    ident.harvest(pkts_lldp.RAW[2], flow.packet)

    #*** Ingest a packet from pc1:
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())

    #*** Classify the packet:
    policy.check_policy(flow, ident)

    #*** Unmatched classification state:
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == ""
    assert flow.classification.actions == {}

    #*** Ingest ARP response for pc1 so we know MAC to IP mapping:
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1],
                       datetime.datetime.now())
    ident.harvest(pkts_ARP_2.RAW[1], flow.packet)

    #*** Ingest and harvest LLDP Packet 0 (pc1) that should match:
    # 206 08:00:27:2a:d6:dd 01:80:c2:00:00:0e LLDP NoS = 08:00:27:2a:d6:dd
    # TTL = 120 System Name = pc1.example.com
    flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0],
                       datetime.datetime.now())
    ident.harvest(pkts_lldp.RAW[0], flow.packet)

    #*** Ingest a packet from pc1:
    # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())

    #*** Classify the packet:
    policy.check_policy(flow, ident)

    #*** Matched classification state:
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
    assert flow.classification.actions == {
        'qos_treatment': 'constrained_bw',
        'set_desc': 'Constrained Bandwidth Traffic'
    }
コード例 #29
0
def test_classification_static():
    """
    Test that classification returns correct information for a static
    classification.
    Create a classification object, record it to DB then check
    that classification can be retrieved
    """
    #*** Initial main_policy won't match as looking for tcp-1234:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static.yaml")

    #*** Instantiate flow and identities objects:
    flow = flows_module.Flow(config)
    ident = identities_module.Identities(config, policy)

    #*** Ingest Flow 2 Packet 0 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())

    #*** Base classification state:
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 0
    assert flow.classification.classification_tag == ""
    assert flow.classification.classification_time == 0
    assert flow.classification.actions == {}

    #*** Classify the packet:
    policy.check_policy(flow, ident)

    #*** Unmatched classification state:
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == ""
    assert flow.classification.classification_time == 0
    assert flow.classification.actions == {}

    #*** Initial main_policy that matches tcp-80:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static_3.yaml")

    #*** Classify the packet:
    policy.check_policy(flow, ident)

    #*** Matched classification state:
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
    assert flow.classification.actions == {
        'qos_treatment': 'constrained_bw',
        'set_desc': 'Constrained Bandwidth Traffic'
    }

    #*** Now test that classification remains after ingesting more packets
    #***  on same flow.
    #*** Load main_policy that matches dst tcp-80:
    policy = policy_module.Policy(
        config,
        pol_dir_default="config/tests/regression",
        pol_dir_user="******",
        pol_filename="main_policy_regression_static_4.yaml")

    #*** Ingest Flow 1 Packet 0 (Client TCP SYN):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)

    logger.debug("pkt0 flow classification is %s",
                 flow.classification.dbdict())

    #*** Matched classification state:
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
    assert flow.classification.actions == {
        'qos_treatment': 'constrained_bw',
        'set_desc': 'Constrained Bandwidth Traffic'
    }

    #*** Write classification result to classifications collection:
    flow.classification.commit()

    #*** Ingest Flow 1 Packet 1 (Client TCP SYN+ACK):
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())

    logger.debug("pkt1a flow classification is %s",
                 flow.classification.dbdict())

    assert flow.classification.classified == 1

    #*** We would never run this as otherwise above test would have failed.
    #*** Left it in here to make the point that you shouldn't classify if
    #*** classified is set.
    if not flow.classification.classified:
        #*** Classify the packet:
        policy.check_policy(flow, ident)

    logger.debug("pkt1b flow classification is %s",
                 flow.classification.dbdict())

    #*** Matched classification state (shouldn't be changed by second packet):
    assert flow.classification.flow_hash == flow.packet.flow_hash
    assert flow.classification.classified == 1
    assert flow.classification.classification_tag == "Constrained Bandwidth Traffic"
    assert flow.classification.actions == {
        'qos_treatment': 'constrained_bw',
        'set_desc': 'Constrained Bandwidth Traffic'
    }
コード例 #30
0
 def test_flow(self):
     for test in (returns_correct_shape, ):
         test(self, flows.Flow(flows.Reverse(), flows.Normal()))