global flow_dic flow_dic = [] # Cache the Netflow v9 templates in received order to decode the data flows. ORDER MATTERS FOR TEMPLATES. global template_list template_list = {} # Record counter for Elasticsearch bulk API upload trigger record_num = 0 ### Netflow v9 Collector ### if __name__ == '__main__': # ICMP Types and Codes icmp_parser = icmp_parse() # Unpacking and parsing IPv4 and IPv6b Addresses ip_parser = ip_parse() # Unpacking and parsing MAC addresses and OUIs mac = mac_address() # Parsing Netflow v9 structures netflow_v9_parser = netflowv9_parse() # Unpacking and parsing integers int_un = int_parse() # Ports and Protocols ports_protocols_parser = ports_and_protocols() # DNS reverse lookups name_lookup = name_lookups() # Continually collect packets while True: # Tracking locaation in the packet pointer = 0
# Stage multiple flows for the bulk Elasticsearch API index operation global flow_dic flow_dic = [] # Cache the Netflow v9 templates in received order to decode the data flows. ORDER MATTERS FOR TEMPLATES. global template_list template_list = {} # Record counter for Elasticsearch bulk API upload trigger record_num = 0 ### Netflow v9 Collector ### if __name__ == "__main__": icmp_parser = icmp_parse() # ICMP Types and Codes ip_parser = ip_parse() # Unpacking and parsing IPv4 and IPv6 addresses mac = mac_address() # Unpacking and parsing MAC addresses and OUIs netflow_v9_parser = netflowv9_parse() # Parsing Netflow v9 structures int_un = int_parse() # Unpacking and parsing integers ports_protocols_parser = ports_and_protocols() # Ports and Protocols name_lookups = name_lookups() # DNS reverse lookups # Continually collect packets producer = KafkaProducer( #value_serializer=lambda m: pickle.dumps(m).encode('utf-8'), bootstrap_servers=['localhost:9092'], send_buffer_bytes=131072) while True: pointer = 0 # Tracking location in the packet flow_counter = 0 # For debug purposes only
### Elasticsearch instance ### es = Elasticsearch([elasticsearch_host]) # IPFIX server if __name__ == "__main__": flow_dic = [] # Stage the flows for the bulk API index operation template_list = { } # Cache the IPFIX templates, in orderedDict to decode the data flows record_num = 0 # Record counter for Elasticsearch bulk upload API # Classes for parsing fields icmp_parser = icmp_parse() # Class for parsing ICMP Types and Codes ip_parser = ip_parse() # Class for unpacking IPv4 and IPv6 addresses mac = mac_address() # Class for parsing MAC addresses and OUIs int_un = int_parse() # Class for parsing integers ports_protocols_parser = ports_and_protocols( ) # Class for parsing ports and protocols name_lookups = name_lookups() # Class for DNS lookups while True: # Continually collect packets flow_packet_contents, sensor_address = netflow_sock.recvfrom( 65565) # Listen for packets inbound ### Unpack the flow packet header ### try: packet_attributes = {} # Flow header attributes cache