Ejemplo n.º 1
0
# Set up the socket listener
try:
    netflow_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    netflow_sock.bind(('0.0.0.0', netflow_v5_port))
    logging.critical("Bound to port " + str(netflow_v5_port) + " - OK")
except Exception as socket_error:
    logging.critical("Could not open or bind a socket on port " +
                     str(netflow_v5_port))
    logging.critical(str(socket_error))
    sys.exit()

# ElasticSearch class
es = Elasticsearch([elasticsearch_host])

# DNS lookup class
name_lookups = parser_modules.name_lookups()

# TCP / UDP identification class
tcp_udp = parser_modules.ports_and_protocols()

### Netflow v5 Collector ###
if __name__ == "__main__":

    # Stage the flows for the bulk API index operation
    flow_dic = []

    # Number of cached records
    record_num = 0

    #while True:
    consumer = KafkaConsumer('my-topic',
Ejemplo n.º 2
0
### Netflow v9 Collector ###
if __name__ == '__main__':
    # ICMP Types and Codes
    icmp_parser = icmp_parse()
    # Unpacking and parsing IPv4 and IPv6b Addresses
    ip_parser = ip_parse()
    # Unpacking and parsing MAC addresses and OUIs
    mac = mac_address()
    # Parsing Netflow v9 structures
    netflow_v9_parser = netflowv9_parse()
    # Unpacking and parsing integers
    int_un = int_parse()
    # Ports and Protocols
    ports_protocols_parser = ports_and_protocols()
    # DNS reverse lookups
    name_lookup = name_lookups()

    # Continually collect packets
    while True:
        # Tracking locaation in the packet
        pointer = 0
        # For debug purpose only
        flow_counter = 0

        # Listen for packets inbound
        flow_packet_contents, sensor_address = netflow_sock.recvfrom(65565)

        ### Unpack the flow packet header ###
        try:
            logging.info('Unpacking header from {}'.format(sensor_address[0]))
Ejemplo n.º 3
0
global template_list
template_list = {}

# Record counter for Elasticsearch bulk API upload trigger
record_num = 0

### Netflow v9 Collector ###
if __name__ == "__main__":

    icmp_parser = icmp_parse()  # ICMP Types and Codes
    ip_parser = ip_parse()  # Unpacking and parsing IPv4 and IPv6 addresses
    mac = mac_address()  # Unpacking and parsing MAC addresses and OUIs
    netflow_v9_parser = netflowv9_parse()  # Parsing Netflow v9 structures
    int_un = int_parse()  # Unpacking and parsing integers
    ports_protocols_parser = ports_and_protocols()  # Ports and Protocols
    name_lookups = name_lookups()  # DNS reverse lookups

    # Continually collect packets
    producer = KafkaProducer(
        #value_serializer=lambda m: pickle.dumps(m).encode('utf-8'),
        bootstrap_servers=['localhost:9092'],
        send_buffer_bytes=131072)
    while True:

        pointer = 0  # Tracking location in the packet
        flow_counter = 0  # For debug purposes only

        flow_packet_contents, sensor_address = netflow_sock.recvfrom(
            65565)  # Listen for packets inbound

        #print "No of records {}".format(record_num)
Ejemplo n.º 4
0
    flow_dic = []  # Stage the flows for the bulk API index operation

    template_list = {
    }  # Cache the IPFIX templates, in orderedDict to decode the data flows

    record_num = 0  # Record counter for Elasticsearch bulk upload API

    # Classes for parsing fields
    icmp_parser = icmp_parse()  # Class for parsing ICMP Types and Codes
    ip_parser = ip_parse()  # Class for unpacking IPv4 and IPv6 addresses
    mac = mac_address()  # Class for parsing MAC addresses and OUIs
    int_un = int_parse()  # Class for parsing integers
    ports_protocols_parser = ports_and_protocols(
    )  # Class for parsing ports and protocols
    name_lookups = name_lookups()  # Class for DNS lookups

    while True:  # Continually collect packets

        flow_packet_contents, sensor_address = netflow_sock.recvfrom(
            65565)  # Listen for packets inbound

        ### Unpack the flow packet header ###
        try:
            packet_attributes = {}  # Flow header attributes cache

            (packet_attributes["netflow_version"],
             packet_attributes["ipfix_flow_bytes"],
             packet_attributes["export_time"],
             packet_attributes["sequence_number"],
             packet_attributes["observation_id"]) = struct.unpack(