Пример #1
0
    def generate_models(self):
        """ Generate all the individual models. We are related with only one dataset and connection group. """
        # Get the group of connections from the id
        group_of_connections = __group_of_group_of_connections__.get_group(self.get_group_connection_id())

        # For each connection
        for connection in group_of_connections.get_connections():
            # Create its model. Remember that the connection id and the model id is the 4-tuple
            model_id = connection.get_id()
            new_model = Model(model_id)
            # Set the constructor for this model. Each model has a specific way of constructing the states
            #new_model.set_constructor(__modelsconstructors__.get_default_constructor())
            constructor_id = self.get_constructor_id()
            new_model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))
            for flow in connection.get_flows():
                # Try to add the flow
                if not new_model.add_flow(flow):
                    self.delete_model_by_id(new_model.get_id())
                    # The flows are not ordered. Delete the truckated models
                    __groupofgroupofmodels__.delete_group_of_models(self.get_id())
                    return False
            self.models[model_id] = new_model
    def generate_models(self):
        """ Generate all the individual models. We are related with only one dataset and connection group. """
        # Get the group of connections from the id
        group_of_connections = __group_of_group_of_connections__.get_group(self.get_group_connection_id())

        # For each connection
        for connection in group_of_connections.get_connections():
            # Create its model. Remember that the connection id and the model id is the 4-tuple
            model_id = connection.get_id()
            new_model = Model(model_id)
            # Set the constructor for this model. Each model has a specific way of constructing the states
            #new_model.set_constructor(__modelsconstructors__.get_default_constructor())
            constructor_id = self.get_constructor_id()
            new_model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))
            for flow in connection.get_flows():
                # Try to add the flow
                if not new_model.add_flow(flow):
                    self.delete_model_by_id(new_model.get_id())
                    # The flows are not ordered. Delete the truckated models
                    __groupofgroupofmodels__.delete_group_of_models(self.get_id())
                    return False
            self.models[model_id] = new_model
 def visualize_dataset(self, dataset_id, multiplier, filter):
     # Get the netflow file
     self.dataset = __datasets__.get_dataset(dataset_id)
     try:
         self.binetflow_file = self.dataset.get_file_type('binetflow')
     except AttributeError:
         print_error('That testing dataset does no seem to exist.')
         return False
     # Open the file
     try:
         file = open(self.binetflow_file.get_name(), 'r')
         self.setup_screen()
     except IOError:
         print_error('The binetflow file is not present in the system.')
         return False
     # construct filter
     self.construct_filter(filter)
     # Clean the previous models from the constructor
     __modelsconstructors__.get_default_constructor().clean_models()
     # Remove the header
     header_line = file.readline().strip()
     # Find the separation character
     self.find_separator(header_line)
     # Extract the columns names
     self.find_columns_names(header_line)
     line = ','.join(file.readline().strip().split(',')[:14])
     #logfile = open('log','w')
     while line:
         # Using our own extract_columns function makes this module more independent
         column_values = self.extract_columns_values(line)
         # Extract its 4-tuple. Find (or create) the tuple object
         tuple4 = column_values['SrcAddr']+'-'+column_values['DstAddr']+'-'+column_values['Dport']+'-'+column_values['Proto']
         # Get the _local_ model. We don't want to mess with the real models in the database, but we need the structure to get the state
         model = self.get_model(tuple4)
         # filter
         if not self.apply_filter(tuple4):
             line = ','.join(file.readline().strip().split(',')[:14])
             continue
         if not model:
             model = Model(tuple4)
             self.set_model(model)
             constructor_id = __modelsconstructors__.get_default_constructor().get_id()
             # Warning, here we depend on the modelsconstrutor
             model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))
         flow = Flow(0) # Fake flow id
         flow.add_starttime(column_values['StartTime'])
         flow.add_duration(column_values['Dur'])
         flow.add_proto(column_values['Proto'])
         flow.add_scraddr(column_values['SrcAddr'])
         flow.add_dir(column_values['Dir'])
         flow.add_dstaddr(column_values['DstAddr'])
         flow.add_dport(column_values['Dport'])
         flow.add_state(column_values['State'])
         flow.add_stos(column_values['sTos'])
         flow.add_dtos(column_values['dTos'])
         flow.add_totpkts(column_values['TotPkts'])
         flow.add_totbytes(column_values['TotBytes'])
         try:
             flow.add_srcbytes(column_values['SrcBytes'])
         except KeyError:
             # It can happen that we don't have the SrcBytes column
             pass
         try:
             flow.add_srcUdata(column_values['srcUdata'])
         except KeyError:
             # It can happen that we don't have the srcUdata column
             pass
         try:
             flow.add_dstUdata(column_values['dstUdata'])
         except KeyError:
             # It can happen that we don't have the dstUdata column
             pass
         try:
             flow.add_label(column_values['Label'])
         except KeyError:
             # It can happen that we don't have the label column
             pass
         # Add the flow
         model.add_flow(flow)
         # As fast as we can or with some delay?
         if multiplier != 0.0 and multiplier != -1:
             # Wait some time between flows. 
             last_time = model.get_last_flow_time()
             current_time = datetime.strptime(column_values['StartTime'], '%Y/%m/%d %H:%M:%S.%f')
             if last_time:
                 diff = current_time - last_time
                 wait_time = diff.total_seconds()
                 time.sleep(wait_time / multiplier)
             model.add_last_flow_time(current_time)
             # Wait the necessary time. After the visualization
         elif multiplier == -1:
             time.sleep(0.1)
         # Visualize this model
         self.qscreen.put(model)
         line = ','.join(file.readline().strip().split(',')[:14])
     self.qscreen.put('Stop')
     file.close()
Пример #4
0
 def visualize_dataset(self, dataset_id, multiplier, filter):
     # Get the netflow file
     self.dataset = __datasets__.get_dataset(dataset_id)
     try:
         self.binetflow_file = self.dataset.get_file_type('binetflow')
     except AttributeError:
         print_error('That testing dataset does no seem to exist.')
         return False
     # Open the file
     try:
         file = open(self.binetflow_file.get_name(), 'r')
         self.setup_screen()
     except IOError:
         print_error('The binetflow file is not present in the system.')
         return False
     # construct filter
     self.construct_filter(filter)
     # Clean the previous models from the constructor
     __modelsconstructors__.get_default_constructor().clean_models()
     # Remove the header
     header_line = file.readline().strip()
     # Find the separation character
     self.find_separator(header_line)
     # Extract the columns names
     self.find_columns_names(header_line)
     line = ','.join(file.readline().strip().split(',')[:14])
     #logfile = open('log','w')
     while line:
         # Using our own extract_columns function makes this module more independent
         column_values = self.extract_columns_values(line)
         # Extract its 4-tuple. Find (or create) the tuple object
         tuple4 = column_values['SrcAddr']+'-'+column_values['DstAddr']+'-'+column_values['Dport']+'-'+column_values['Proto']
         # Get the _local_ model. We don't want to mess with the real models in the database, but we need the structure to get the state
         model = self.get_model(tuple4)
         # filter
         if not self.apply_filter(tuple4):
             line = ','.join(file.readline().strip().split(',')[:14])
             continue
         if not model:
             model = Model(tuple4)
             self.set_model(model)
             constructor_id = __modelsconstructors__.get_default_constructor().get_id()
             # Warning, here we depend on the modelsconstrutor
             model.set_constructor(__modelsconstructors__.get_constructor(constructor_id))
         flow = Flow(0) # Fake flow id
         flow.add_starttime(column_values['StartTime'])
         flow.add_duration(column_values['Dur'])
         flow.add_proto(column_values['Proto'])
         flow.add_scraddr(column_values['SrcAddr'])
         flow.add_dir(column_values['Dir'])
         flow.add_dstaddr(column_values['DstAddr'])
         flow.add_dport(column_values['Dport'])
         flow.add_state(column_values['State'])
         flow.add_stos(column_values['sTos'])
         flow.add_dtos(column_values['dTos'])
         flow.add_totpkts(column_values['TotPkts'])
         flow.add_totbytes(column_values['TotBytes'])
         try:
             flow.add_srcbytes(column_values['SrcBytes'])
         except KeyError:
             # It can happen that we don't have the SrcBytes column
             pass
         try:
             flow.add_srcUdata(column_values['srcUdata'])
         except KeyError:
             # It can happen that we don't have the srcUdata column
             pass
         try:
             flow.add_dstUdata(column_values['dstUdata'])
         except KeyError:
             # It can happen that we don't have the dstUdata column
             pass
         try:
             flow.add_label(column_values['Label'])
         except KeyError:
             # It can happen that we don't have the label column
             pass
         # Add the flow
         model.add_flow(flow)
         # As fast as we can or with some delay?
         if multiplier != 0.0 and multiplier != -1:
             # Wait some time between flows. 
             last_time = model.get_last_flow_time()
             current_time = datetime.strptime(column_values['StartTime'], '%Y/%m/%d %H:%M:%S.%f')
             if last_time:
                 diff = current_time - last_time
                 wait_time = diff.total_seconds()
                 time.sleep(wait_time / multiplier)
             model.add_last_flow_time(current_time)
             # Wait the necessary time. After the visualization
         elif multiplier == -1:
             time.sleep(0.1)
         # Visualize this model
         self.qscreen.put(model)
         line = ','.join(file.readline().strip().split(',')[:14])
     self.qscreen.put('Stop')
     file.close()