Ejemplo n.º 1
0
def parse_eth_header(header_string):
    """Get MAC addresses from Ethernet header string"""

    mac_parser_class = mac_address()  # MAC parser class

    # Destination MAC
    ord_dest_mac = [
        ord(x) for x in [
            header_string[0], header_string[1], header_string[2],
            header_string[3], header_string[4], header_string[5]
        ]
    ]
    dest_mac = mac_parser_class.mac_parse(ord_dest_mac)  # Get MAC and MAC OUI

    # Source MAC
    ord_src_mac = [
        ord(x) for x in [
            header_string[6], header_string[7], header_string[8],
            header_string[9], header_string[10], header_string[11]
        ]
    ]
    src_mac = mac_parser_class.mac_parse(ord_src_mac)  # Get MAC and MAC OUI

    return (dest_mac[0], src_mac[0], dest_mac[1], src_mac[1]
            )  # DST MAC, SRC MAC, DST MAC OUI, SRC MAC OUI
Ejemplo n.º 2
0
def host_adapter(
        data,  # type: "XDR Data"
        agent,  # type: int
        subagent  # type: int
):
    """Host Adapter Counter - Type: Counter, Enterprise: 0, Format: 2001"""
    mac_parser_class = mac_address()  # MAC parser class
    sample_data = {}  # Cache
    num_adapters = int(data.unpack_uint())
    for _ in range(0, num_adapters):
        interface_index = int(data.unpack_uint())
        interface_hash = hash(
            str(agent) + str(subagent) + str(interface_index))
        sample_data[interface_hash] = {}
        sample_data[interface_hash]["Index"] = interface_index
        mac_count = int(data.unpack_uint())
        for _ in range(0, mac_count):
            a = data.unpack_fopaque(6)
            ord_mac = [ord(x) for x in [a[0], a[1], a[2], a[3], a[4], a[5]]]
            parsed_mac = mac_parser_class.mac_parse(ord_mac)
            sample_data[interface_hash]["MAC"] = parsed_mac[0]
            sample_data[interface_hash]["MAC OUI"] = parsed_mac[1]
    data.done()  # Verify all data unpacked
    return sample_data
Ejemplo n.º 3
0
global flow_dic
flow_dic = []

# Cache the Netflow v9 templates in received order to decode the data flows. ORDER MATTERS FOR TEMPLATES.
global template_list
template_list = {}

# Record counter for Elasticsearch bulk API upload trigger
record_num = 0

### Netflow v9 Collector ###
if __name__ == "__main__":

    icmp_parser = icmp_parse()  # ICMP Types and Codes
    ip_parser = ip_parse()  # Unpacking and parsing IPv4 and IPv6 addresses
    mac = mac_address()  # Unpacking and parsing MAC addresses and OUIs
    netflow_v9_parser = netflowv9_parse()  # Parsing Netflow v9 structures
    int_un = int_parse()  # Unpacking and parsing integers
    ports_protocols_parser = ports_and_protocols()  # Ports and Protocols
    name_lookups = name_lookups()  # DNS reverse lookups

    # Continually collect packets
    producer = KafkaProducer(
        #value_serializer=lambda m: pickle.dumps(m).encode('utf-8'),
        bootstrap_servers=['localhost:9092'],
        send_buffer_bytes=131072)
    while True:

        pointer = 0  # Tracking location in the packet
        flow_counter = 0  # For debug purposes only
Ejemplo n.º 4
0
# Cache the Netflow v9 templates in received order to decode the data flows. ORDER MATTERS FOR TEMPLATES.
global template_list
template_list = {}

# Record counter for Elasticsearch bulk API upload trigger
record_num = 0

### Netflow v9 Collector ###
if __name__ == '__main__':
    # ICMP Types and Codes
    icmp_parser = icmp_parse()
    # Unpacking and parsing IPv4 and IPv6b Addresses
    ip_parser = ip_parse()
    # Unpacking and parsing MAC addresses and OUIs
    mac = mac_address()
    # Parsing Netflow v9 structures
    netflow_v9_parser = netflowv9_parse()
    # Unpacking and parsing integers
    int_un = int_parse()
    # Ports and Protocols
    ports_protocols_parser = ports_and_protocols()
    # DNS reverse lookups
    name_lookup = name_lookups()

    # Continually collect packets
    while True:
        # Tracking locaation in the packet
        pointer = 0
        # For debug purpose only
        flow_counter = 0
Ejemplo n.º 5
0
es = Elasticsearch([elasticsearch_host])

# IPFIX server
if __name__ == "__main__":

    flow_dic = []  # Stage the flows for the bulk API index operation

    template_list = {
    }  # Cache the IPFIX templates, in orderedDict to decode the data flows

    record_num = 0  # Record counter for Elasticsearch bulk upload API

    # Classes for parsing fields
    icmp_parser = icmp_parse()  # Class for parsing ICMP Types and Codes
    ip_parser = ip_parse()  # Class for unpacking IPv4 and IPv6 addresses
    mac = mac_address()  # Class for parsing MAC addresses and OUIs
    int_un = int_parse()  # Class for parsing integers
    ports_protocols_parser = ports_and_protocols(
    )  # Class for parsing ports and protocols
    name_lookups = name_lookups()  # Class for DNS lookups

    while True:  # Continually collect packets

        flow_packet_contents, sensor_address = netflow_sock.recvfrom(
            65565)  # Listen for packets inbound

        ### Unpack the flow packet header ###
        try:
            packet_attributes = {}  # Flow header attributes cache

            (packet_attributes["netflow_version"],