def group_hits(self, iocs, already_alarmed, already_checked): """ Returns all hits grouped by md5 hash """ md5_dict = {} md5_should_check = {} # Group all hits per md5 hash value for ioc in iocs: md5 = get_value('_source.file.hash.md5', ioc) if md5 in md5_dict: md5_dict[md5].append(ioc) else: md5_dict[md5] = [ioc] should_check = True # Check if the IOC has already been alarmed if md5 in already_alarmed: # Skip it should_check = False # Set the last checked date add_alarm_data(ioc, {}, info['submodule'], False) # Tag the doc as alarmed set_tags(info['submodule'], [ioc]) # Check if the IOC has already been checked within 'interval' if md5 in already_checked: # Skip if for now should_check = False if md5 in md5_should_check: md5_should_check[md5] = should_check & md5_should_check[md5] else: md5_should_check[md5] = should_check # self.logger.debug('Should check: %s' % md5ShouldCheck[h]) for md5 in dict.copy(md5_dict): # If we should not check the hash, remove it from the list if md5 in md5_should_check and not md5_should_check[md5]: self.logger.debug( '[%s] md5 hash already checked within interval or already alarmed previously, skipping', md5) del md5_dict[md5] return md5_dict
def build_report(self, md5_dict, alarmed_hashes): """ Build report to be returned by the alarm """ # Prepare the object to be returned report = {'mutations': {}, 'hits': []} # Loop through all hashes for md5 in md5_dict: # Loop through all related ES docs for ioc in md5_dict[md5]: # Hash has been found in one of the engines and should be alarmed if md5 in alarmed_hashes.keys(): report['mutations'][ioc['_id']] = alarmed_hashes[md5] report['hits'].append(ioc) # Hash was not found so we update the last_checked date else: self.logger.debug( 'md5 hash not alarmed, updating last_checked date: [%s]', md5) add_alarm_data(ioc, {}, info['submodule'], False) return report
def process_alarms(connector_dict, alarm_dict): """ Process the alarm results and send notifications via connector modules """ logger.info('Processing alarms') # now we can loop over the modules once again and log the lines for alarm in alarm_dict: if alarm in alarms and alarms[alarm]['enabled']: # If the alarm did fail to run, skip processing the notification and tagging as we are not sure of the results if alarm_dict[alarm]['status'] != 'success': logger.warning( 'Alarm %s did not run (correctly), skipping processing', alarm) continue logger.debug('Alarm %s enabled, processing hits', alarm) result = alarm_dict[alarm]['result'] alarm_name = alarm_dict[alarm]['info']['submodule'] # logger.debug('Alarm results: %s' % aD[a]['result']) for result_hits in result['hits']['hits']: # First check if there is a mutation data to add logger.debug(result_hits) if result_hits['_id'] in result['mutations']: mutations = result['mutations'][result_hits['_id']] else: mutations = {} # And now, let's add mutations data to the doc and update back the hits result_hits = add_alarm_data(result_hits, mutations, alarm_name) # Let's tag the docs with the alarm name set_tags(alarm_name, result['hits']['hits']) logger.debug('calling settags %s (%d hits)', alarm_name, result['hits']['total']) # Needed as groupHits will change r['hits']['hits'] and different alarms might do different grouping result = copy.deepcopy(alarm_dict[alarm]['result']) if result['hits']['total'] > 0: # Group the hits before sending it to the alarm, based on the 'groubpby' array returned by the alarm group_by = list(result['groupby']) result['hits']['hits'] = group_hits(result['hits']['hits'], group_by) for connector in connector_dict: # connector will process ['hits']['hits'] which contains a list of 'jsons' looking like an ES line # connector will report the fields in ['hits']['fields'] for each of the lines in the list if connector in notifications and notifications[connector][ 'enabled']: connector_mod = connector_dict[connector]['m'].Module() logger.info( 'connector %s enabled, sending alarm (%d hits)', connector, result['hits']['total']) connector_mod.send_alarm(result)