def get_the_model_of_a_connection(self, group_of_model_id, connection_id):
     """ Given a connection_id and group of model id, get the model """
     # Get the note id, group_id and group
     dataset_id = int(group_of_model_id.split('-')[0])
     dataset = __datasets__.get_dataset(dataset_id)
     group = __groupofgroupofmodels__.get_group(group_of_model_id)
     try:
         if group.has_model(connection_id):
             model = group.get_model(connection_id)
             return model
     except AttributeError:
         print_error('The connection does not have a model. Probably deleted.')
         return False
 def delete_group_of_connections(self, conn_id):
     group = self.get_group(conn_id)
     if group:
         # First delete all the connections inside the group of connections
         group.delete_all_connections()
         # Now delete the group of connections
         self.group_of_connections.pop(conn_id)
         print_info('Deleted group of connections with id {}'.format(conn_id))
         # Delete the reference to this group from the dataset
         dataset = __datasets__.get_dataset(conn_id)
         dataset.remove_group_of_connections_id(conn_id)
     else:
         print_error('No such group of connections exists.')
Exemple #3
0
 def delete_group_of_connections(self, conn_id):
     group = self.get_group(conn_id)
     if group:
         # First delete all the connections inside the group of connections
         group.delete_all_connections()
         # Now delete the group of connections
         self.group_of_connections.pop(conn_id)
         print_info('Deleted group of connections with id {}'.format(conn_id))
         # Delete the reference to this group from the dataset
         dataset = __datasets__.get_dataset(conn_id)
         dataset.remove_group_of_connections_id(conn_id)
     else:
         print_error('No such group of connections exists.')
 def get_the_model_of_a_connection(self, connection_id):
     """ Given a connection_id and current dataset, get the model """
     if __datasets__.current:
         # Get the note id, group_id and group
         dataset = __datasets__.get_dataset(__datasets__.current.get_id())
         group_of_models = dataset.get_group_of_models()
         for group_id in group_of_models: # Usually is only one group...
             group = __groupofgroupofmodels__.get_group(group_id)
             if group.has_model(connection_id):
                 model = group.get_model(connection_id)
                 return model
     else:
         print_error('There is no dataset selected.')
 def get_the_model_of_a_connection(self, group_of_model_id, connection_id):
     """ Given a connection_id and group of model id, get the model """
     # Get the note id, group_id and group
     dataset_id = int(group_of_model_id.split('-')[0])
     dataset = __datasets__.get_dataset(dataset_id)
     group = __groupofgroupofmodels__.get_group(group_of_model_id)
     try:
         if group.has_model(connection_id):
             model = group.get_model(connection_id)
             return model
     except AttributeError:
         print_error(
             'The connection does not have a model. Probably deleted.')
         return False
 def list_groups(self):
     print_info('Groups of Models')
     # If we selected a dataset, just print the one belonging to the dataset
     if __datasets__.current:
         rows = []
         for group in self.group_of_models.values():
             if group.get_dataset_id() == __datasets__.current.get_id():
                 rows.append([group.get_id(), len(group.get_models()), __datasets__.current.get_id(), __datasets__.current.get_name() ])
         print(table(header=['Group of Model Id', 'Amount of Models', 'Dataset Id', 'Dataset Name'], rows=rows))
     # Otherwise print them all
     else:
         rows = []
         for group in self.group_of_models.values():
             # Get the dataset based on the dataset id stored from this group 
             dataset = __datasets__.get_dataset(group.get_dataset_id())
             rows.append([group.get_id(), len(group.get_models()), dataset.get_id(), dataset.get_name() ])
         print(table(header=['Group of Model Id', 'Amount of Models', 'Dataset Id', 'Dataset Name'], rows=rows))
Exemple #7
0
 def list_groups(self):
     print_info('Groups of Models')
     # If we selected a dataset, just print the one belonging to the dataset
     if __datasets__.current:
         rows = []
         for group in self.group_of_models.values():
             if group.get_dataset_id() == __datasets__.current.get_id():
                 rows.append([group.get_id(), group.get_constructor_id(), len(group.get_models()), __datasets__.current.get_id(), __datasets__.current.get_name() ])
         print(table(header=['Group of Model Id', 'Constructor ID', 'Amount of Models', 'Dataset Id', 'Dataset Name'], rows=rows))
     # Otherwise print them all
     else:
         rows = []
         for group in self.group_of_models.values():
             # Get the dataset based on the dataset id stored from this group 
             dataset = __datasets__.get_dataset(group.get_dataset_id())
             rows.append([group.get_id(), group.get_constructor_id(), len(group.get_models()), dataset.get_id(), dataset.get_name() ])
         print(table(header=['Group of Model Id', 'Constructor ID', 'Amount of Models', 'Dataset Id', 'Dataset Name'], rows=rows))
 def visualize_dataset(self, dataset_id, multiplier, filter):
     # Get the netflow file
     self.dataset = __datasets__.get_dataset(dataset_id)
     try:
         self.binetflow_file = self.dataset.get_file_type('binetflow')
     except AttributeError:
         print_error('That testing dataset does no seem to exist.')
         return False
     # Open the file
     try:
         file = open(self.binetflow_file.get_name(), 'r')
         self.setup_screen()
     except IOError:
         print_error('The binetflow file is not present in the system.')
         return False
     # construct filter
     self.construct_filter(filter)
     # Clean the previous models from the constructor
     __modelsconstructors__.get_default_constructor().clean_models()
     # Remove the header
     header_line = file.readline().strip()
     # Find the separation character
     self.find_separator(header_line)
     # Extract the columns names
     self.find_columns_names(header_line)
     line = ','.join(file.readline().strip().split(',')[:14])
     #logfile = open('log','w')
     while line:
         # Using our own extract_columns function makes this module more independent
         column_values = self.extract_columns_values(line)
         # Extract its 4-tuple. Find (or create) the tuple object
         tuple4 = column_values['SrcAddr']+'-'+column_values['DstAddr']+'-'+column_values['Dport']+'-'+column_values['Proto']
         # Get the _local_ model. We don't want to mess with the real models in the database, but we need the structure to get the state
         model = self.get_model(tuple4)
         # filter
         if not self.apply_filter(tuple4):
             line = ','.join(file.readline().strip().split(',')[:14])
             continue
         if not model:
             model = Model(tuple4)
             self.set_model(model)
             constructor_id = __modelsconstructors__.get_default_constructor().get_id()
             # Warning, here we depend on the modelsconstrutor
             model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))
         flow = Flow(0) # Fake flow id
         flow.add_starttime(column_values['StartTime'])
         flow.add_duration(column_values['Dur'])
         flow.add_proto(column_values['Proto'])
         flow.add_scraddr(column_values['SrcAddr'])
         flow.add_dir(column_values['Dir'])
         flow.add_dstaddr(column_values['DstAddr'])
         flow.add_dport(column_values['Dport'])
         flow.add_state(column_values['State'])
         flow.add_stos(column_values['sTos'])
         flow.add_dtos(column_values['dTos'])
         flow.add_totpkts(column_values['TotPkts'])
         flow.add_totbytes(column_values['TotBytes'])
         try:
             flow.add_srcbytes(column_values['SrcBytes'])
         except KeyError:
             # It can happen that we don't have the SrcBytes column
             pass
         try:
             flow.add_srcUdata(column_values['srcUdata'])
         except KeyError:
             # It can happen that we don't have the srcUdata column
             pass
         try:
             flow.add_dstUdata(column_values['dstUdata'])
         except KeyError:
             # It can happen that we don't have the dstUdata column
             pass
         try:
             flow.add_label(column_values['Label'])
         except KeyError:
             # It can happen that we don't have the label column
             pass
         # Add the flow
         model.add_flow(flow)
         # As fast as we can or with some delay?
         if multiplier != 0.0 and multiplier != -1:
             # Wait some time between flows. 
             last_time = model.get_last_flow_time()
             current_time = datetime.strptime(column_values['StartTime'], '%Y/%m/%d %H:%M:%S.%f')
             if last_time:
                 diff = current_time - last_time
                 wait_time = diff.total_seconds()
                 time.sleep(wait_time / multiplier)
             model.add_last_flow_time(current_time)
             # Wait the necessary time. After the visualization
         elif multiplier == -1:
             time.sleep(0.1)
         # Visualize this model
         self.qscreen.put(model)
         line = ','.join(file.readline().strip().split(',')[:14])
     self.qscreen.put('Stop')
     file.close()
Exemple #9
0
 def visualize_dataset(self, dataset_id, multiplier, filter):
     # Get the netflow file
     self.dataset = __datasets__.get_dataset(dataset_id)
     try:
         self.binetflow_file = self.dataset.get_file_type('binetflow')
     except AttributeError:
         print_error('That testing dataset does no seem to exist.')
         return False
     # Open the file
     try:
         file = open(self.binetflow_file.get_name(), 'r')
         self.setup_screen()
     except IOError:
         print_error('The binetflow file is not present in the system.')
         return False
     # construct filter
     self.construct_filter(filter)
     # Clean the previous models from the constructor
     __modelsconstructors__.get_default_constructor().clean_models()
     # Remove the header
     header_line = file.readline().strip()
     # Find the separation character
     self.find_separator(header_line)
     # Extract the columns names
     self.find_columns_names(header_line)
     line = ','.join(file.readline().strip().split(',')[:14])
     #logfile = open('log','w')
     while line:
         # Using our own extract_columns function makes this module more independent
         column_values = self.extract_columns_values(line)
         # Extract its 4-tuple. Find (or create) the tuple object
         tuple4 = column_values['SrcAddr']+'-'+column_values['DstAddr']+'-'+column_values['Dport']+'-'+column_values['Proto']
         # Get the _local_ model. We don't want to mess with the real models in the database, but we need the structure to get the state
         model = self.get_model(tuple4)
         # filter
         if not self.apply_filter(tuple4):
             line = ','.join(file.readline().strip().split(',')[:14])
             continue
         if not model:
             model = Model(tuple4)
             self.set_model(model)
             constructor_id = __modelsconstructors__.get_default_constructor().get_id()
             # Warning, here we depend on the modelsconstrutor
             model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))
         flow = Flow(0) # Fake flow id
         flow.add_starttime(column_values['StartTime'])
         flow.add_duration(column_values['Dur'])
         flow.add_proto(column_values['Proto'])
         flow.add_scraddr(column_values['SrcAddr'])
         flow.add_dir(column_values['Dir'])
         flow.add_dstaddr(column_values['DstAddr'])
         flow.add_dport(column_values['Dport'])
         flow.add_state(column_values['State'])
         flow.add_stos(column_values['sTos'])
         flow.add_dtos(column_values['dTos'])
         flow.add_totpkts(column_values['TotPkts'])
         flow.add_totbytes(column_values['TotBytes'])
         try:
             flow.add_srcbytes(column_values['SrcBytes'])
         except KeyError:
             # It can happen that we don't have the SrcBytes column
             pass
         try:
             flow.add_srcUdata(column_values['srcUdata'])
         except KeyError:
             # It can happen that we don't have the srcUdata column
             pass
         try:
             flow.add_dstUdata(column_values['dstUdata'])
         except KeyError:
             # It can happen that we don't have the dstUdata column
             pass
         try:
             flow.add_label(column_values['Label'])
         except KeyError:
             # It can happen that we don't have the label column
             pass
         # Add the flow
         model.add_flow(flow)
         # As fast as we can or with some delay?
         if multiplier != 0.0 and multiplier != -1:
             # Wait some time between flows. 
             last_time = model.get_last_flow_time()
             current_time = datetime.strptime(column_values['StartTime'], '%Y/%m/%d %H:%M:%S.%f')
             if last_time:
                 diff = current_time - last_time
                 wait_time = diff.total_seconds()
                 time.sleep(wait_time / multiplier)
             model.add_last_flow_time(current_time)
             # Wait the necessary time. After the visualization
         elif multiplier == -1:
             time.sleep(0.1)
         # Visualize this model
         self.qscreen.put(model)
         line = ','.join(file.readline().strip().split(',')[:14])
     self.qscreen.put('Stop')
     file.close()