Beispiel #1
0
 def get_initial_beacon_doc(self, implant_id):
     """ Get the initial beacon document from cobaltstrike or return False if none found """
     query = f'implant.id:{implant_id} AND c2.program: cobaltstrike AND c2.log.type:implant_newimplant'
     initial_beacon_doc = get_query(query, size=1, index='rtops-*')
     initial_beacon_doc = initial_beacon_doc[0] if len(initial_beacon_doc) > 0 else False
     self.logger.debug('Initial beacon line [%s]: %s', implant_id, initial_beacon_doc)
     return initial_beacon_doc
Beispiel #2
0
    def enrich_beacon_data(self):
        """ Get all lines in rtops that have not been enriched yet (for CS) """
        es_query = f'implant.id:* AND c2.program: cobaltstrike AND NOT c2.log.type:implant_newimplant AND NOT tags:{info["submodule"]}'
        not_enriched_results = get_query(es_query, size=10000, index='rtops-*')

        # Created a dict grouped by implant ID
        implant_ids = {}
        for not_enriched in not_enriched_results:
            implant_id = get_value('_source.implant.id', not_enriched)
            if implant_id in implant_ids:
                implant_ids[implant_id].append(not_enriched)
            else:
                implant_ids[implant_id] = [not_enriched]

        hits = []
        # For each implant ID, get the initial beacon line
        for implant_id, implant_val in implant_ids.items():
            initial_beacon_doc = self.get_initial_beacon_doc(implant_id)

            # If not initial beacon line found, skip the beacon ID
            if not initial_beacon_doc:
                continue

            for doc in implant_val:
                # Fields to copy: host.*, implant.*, process.*, user.*
                res = self.copy_data_fields(initial_beacon_doc, doc, ['host', 'implant', 'user', 'process'])
                if res:
                    hits.append(res)

        return hits
Beispiel #3
0
    def alarm_dummy(self):
        """ This check returns the last IOC in rtops-* that have not been alarmed yet """
        es_query = 'c2.log.type:ioc AND NOT tags:alarm_*'
        es_results = get_query(es_query, 1, index='rtops-*')
        self.logger.debug(es_results)

        return es_results
Beispiel #4
0
    def alarm_check(self):  # pylint: disable=no-self-use
        """ This check queries for UA's that are listed in any blacklist_useragents.conf and do talk to c2* paths on redirectors
        We will dig trough ALL data finding specific IP related lines and tag them reading the useragents we trigger on. """
        file_name = '/etc/redelk/rogue_useragents.conf'
        with open(file_name, encoding='utf-8') as file:
            content = file.readlines()
        ua_list = []
        for line in content:
            if not line.startswith('#'):
                ua_list.append(line.strip())
        keywords = ua_list
        es_subquery = ''
        # add keywords (UA's) to query
        for keyword in keywords:
            if es_subquery == '':
                es_subquery = f'(http.headers.useragent:{keyword}'
            else:
                es_subquery = es_subquery + f' OR http.headers.useragent:{keyword}'
        es_subquery = es_subquery + ') '
        # q = "%s AND redir.backendname:c2* AND tags:enrich_* AND NOT tags:alarm_* "%qSub
        es_query = f'{es_subquery} AND redir.backend.name:c2* AND NOT tags:alarm_useragent'

        es_results = get_query(es_query, 10000)
        report = {}
        report['hits'] = es_results
        return report
Beispiel #5
0
 def alarm_check(self):
     """ This check queries for calls to backends that have *alarm* in their name """
     es_query = f'redir.backend.name:*alarm* AND NOT tags:{info["submodule"]}'
     es_results = get_query(es_query, 10000)
     report = {
         'hits': es_results
     }
     return report
Beispiel #6
0
 def alarm_check(self):
     """ This check queries for IP's that aren't listed in any iplist* but do talk to c2* paths on redirectors """
     es_query = '*'
     i = get_hits_count(es_query)
     i = min(i, 10000)
     es_result = get_query(es_query, i)
     report = {}
     report['hits'] = []
     report['hits'].append(es_result[0])
     report['hits'].append(es_result[1])
     return report
Beispiel #7
0
    def sync_iplist(self, iplist='redteam'):
        """ Sync data between ES iplist and config files """
        # Get data from config file iplist
        cfg_iplist = self.get_cfg_ips(iplist)

        # If the config file doesn't exist, skip the sync
        if cfg_iplist is None:
            return []

        # Get data from ES iplist
        query = f'iplist.name:{iplist}'
        es_iplist_docs = get_query(query, size=10000, index='redelk-*')

        # Check if config IP is in ES and source = config_file
        es_iplist = []
        for doc in es_iplist_docs:
            ip = get_value('_source.iplist.ip', doc)  # pylint: disable=invalid-name
            if ip:
                es_iplist.append((ip, doc))

        for ipc, comment in cfg_iplist:
            found = [item for item in es_iplist if ipc in item]
            if not found:
                self.logger.debug('IP not found in ES: %s', ipc)
                # if not, add it
                self.add_es_ip(ipc, iplist, comment)

        toadd = []
        for ipe, doc in es_iplist:
            # Check if ES IP is in config file
            found = [item for item in cfg_iplist if ipe in item]
            if not found:
                # if not, check if source = config_file
                if get_value('_source.iplist.source', doc) == 'config_file':
                    # if yes, remove IP from ES
                    self.remove_es_ip(doc, iplist)
                else:
                    # if not, add it
                    comment = get_value('_source.iplist.comment', doc)
                    if comment:
                        ipa = f'{ipe} # From ES -- {comment}'
                    else:
                        ipa = f'{ipe} # From ES'
                    toadd.append(ipa)

        self.add_cfg_ips(toadd, iplist)

        return toadd
Beispiel #8
0
    def alarm_check(self):
        """ This check queries public sources given a list of md5 hashes. If a hash was seen we set an alarm """
        es_query = 'c2.log.type:ioc AND NOT tags:alarm_filehash AND ioc.type:file'
        alarmed_md5_q = {
            'aggs': {
                'interval_filter': {
                    'filter': {
                        'range': {
                            'alarm.last_checked': {
                                'gte': f'now-{self.interval}s',
                                'lt': 'now'
                            }
                        }
                    },
                    'aggs': {
                        'md5_interval': {
                            'terms': {
                                'field': 'file.hash.md5'
                            }
                        }
                    }
                },
                'alarmed_filter': {
                    'filter': {
                        'terms': {
                            'tags': ['alarm_filehash']
                        }
                    },
                    'aggs': {
                        'md5_alarmed': {
                            'terms': {
                                'field': 'file.hash.md5'
                            }
                        }
                    }
                }
            }
        }
        report = {}
        iocs = []
        self.logger.debug('Running query %s', es_query)

        # First, get all IOCs of type 'file' that have not been alarmed yet
        iocs = get_query(es_query, 10000, index='rtops-*')
        self.logger.debug('found ioc: %s', iocs)

        # Then we get an aggregation of all md5 alarmed within the last 'interval' time
        self.logger.debug('Running query %s', alarmed_md5_q)
        already_alarmed_result = raw_search(alarmed_md5_q, index='rtops-*')

        already_checked = []
        already_alarmed = []

        if already_alarmed_result:
            self.logger.debug(already_alarmed_result['aggregations'])

            # add md5 hashes that have been checked within the 'interval' in 'already_checked'
            for hit in already_alarmed_result['aggregations'][
                    'interval_filter']['md5_interval']['buckets']:
                already_checked.append(hit['key'])

            # add md5 hashes that have been alarmed previously in 'already_alarmed'
            for hit in already_alarmed_result['aggregations'][
                    'alarmed_filter']['md5_alarmed']['buckets']:
                already_alarmed.append(hit['key'])

        # Group all hits per md5 hash
        md5_dict = self.group_hits(iocs, already_alarmed, already_checked)

        # Create an array with all md5 hashes to send to the different providers
        # we now have an array with unique md5's to go test
        md5_list = []
        for md5 in md5_dict:
            md5_list.append(md5)

        self.logger.debug('md5 hashes to check: %s', md5_list)

        # Run the checks
        check_results = self.check_hashes(md5_list)

        # Get the alarmed hashes with their corresponding mutations
        alarmed_hashes = self.get_mutations(check_results)

        # Get the report
        report = self.build_report(md5_dict, alarmed_hashes)

        return report