flow_dic = [] # Cache the Netflow v9 templates in received order to decode the data flows. ORDER MATTERS FOR TEMPLATES. global template_list template_list = {} # Record counter for Elasticsearch bulk API upload trigger record_num = 0 ### Netflow v9 Collector ### if __name__ == "__main__": icmp_parser = icmp_parse() # ICMP Types and Codes ip_parser = ip_parse() # Unpacking and parsing IPv4 and IPv6 addresses mac = mac_address() # Unpacking and parsing MAC addresses and OUIs netflow_v9_parser = netflowv9_parse() # Parsing Netflow v9 structures int_un = int_parse() # Unpacking and parsing integers ports_protocols_parser = ports_and_protocols() # Ports and Protocols name_lookups = name_lookups() # DNS reverse lookups # Continually collect packets producer = KafkaProducer( #value_serializer=lambda m: pickle.dumps(m).encode('utf-8'), bootstrap_servers=['localhost:9092'], send_buffer_bytes=131072) while True: pointer = 0 # Tracking location in the packet flow_counter = 0 # For debug purposes only flow_packet_contents, sensor_address = netflow_sock.recvfrom(
global template_list template_list = {} # Record counter for Elasticsearch bulk API upload trigger record_num = 0 ### Netflow v9 Collector ### if __name__ == '__main__': # ICMP Types and Codes icmp_parser = icmp_parse() # Unpacking and parsing IPv4 and IPv6b Addresses ip_parser = ip_parse() # Unpacking and parsing MAC addresses and OUIs mac = mac_address() # Parsing Netflow v9 structures netflow_v9_parser = netflowv9_parse() # Unpacking and parsing integers int_un = int_parse() # Ports and Protocols ports_protocols_parser = ports_and_protocols() # DNS reverse lookups name_lookup = name_lookups() # Continually collect packets while True: # Tracking locaation in the packet pointer = 0 # For debug purpose only flow_counter = 0 # Listen for packets inbound