示例#1
0
    def makerequest(self, query, stime, etime, maxid):
        payload = {
            'min_time': calendar.timegm(stime.utctimetuple()),
            'max_time': calendar.timegm(etime.utctimetuple()),
            'q': query
        }
        if maxid is not None:
            payload['max_id'] = maxid
        hdrs = {'X-Papertrail-Token': self._apikey}

        max_retries = 3
        total_retries = 0
        while True:
            logger.debug("Sending request to papertrail API")
            resp = requests.get(self._papertrail_api, headers=hdrs, params=payload)
            if resp.status_code == 200:
                break
            else:
                logger.debug("Received invalid status code: {0}: {1}".format(resp.status_code, resp.text))
                total_retries += 1
                if total_retries < max_retries:
                    logger.debug("Sleeping a bit then retrying")
                    time.sleep(2)
                else:
                    logger.error("Received too many error messages...exiting")
                    logger.error("Last malformed response: {0}: {1}".format(resp.status_code, resp.text))
                    sys.exit(1)

        return self.parse_events(resp.json())
示例#2
0
    def run(self):
        self.taskQueue.set_message_class(RawMessage)

        while True:
            try:
                records = self.taskQueue.get_messages(self.options.prefetch)
                for msg in records:
                    msg_body = msg.get_body()
                    try:
                        # get_body() should be json
                        message_json = json.loads(msg_body)
                        self.on_message(message_json)
                        # delete message from queue
                        self.taskQueue.delete_message(msg)
                    except ValueError:
                        logger.error('Invalid message, not JSON <dropping message and continuing>: %r' % msg_body)
                        self.taskQueue.delete_message(msg)
                        continue
                time.sleep(.1)
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.connection, self.taskQueue = connect_sqs(
                    options.region,
                    options.accesskey,
                    options.secretkey,
                    options.taskexchange
                )
                self.taskQueue.set_message_class(RawMessage)
示例#3
0
def clearESCache():
    es = esConnect(None)
    indexes = es.get_indices()
    # assums index names  like events-YYYYMMDD etc.
    # used to avoid operating on current indexes
    dtNow = datetime.utcnow()
    indexSuffix = date.strftime(dtNow, '%Y%m%d')
    previousSuffix = date.strftime(dtNow - timedelta(days=1), '%Y%m%d')
    for targetindex in sorted(indexes):
        if indexSuffix not in targetindex and previousSuffix not in targetindex:
            url = '{0}/{1}/_stats'.format(random.choice(options.esservers), targetindex)
            r = requests.get(url)
            if r.status_code == 200:
                indexstats = json.loads(r.text)
                if indexstats['_all']['total']['search']['query_current'] == 0:
                    fielddata = indexstats['_all']['total']['fielddata']['memory_size_in_bytes']
                    if fielddata > 0:
                        logger.info('target: {0}: field data {1}'.format(targetindex, indexstats['_all']['total']['fielddata']['memory_size_in_bytes']))
                        clearurl = '{0}/{1}/_cache/clear'.format(random.choice(options.esservers), targetindex)
                        clearRequest = requests.post(clearurl)
                        logger.info(clearRequest.text)
                        # stop at one?
                        if options.conservative:
                            return
                else:
                    logger.debug('{0}: <ignoring due to current search > field data {1}'.format(targetindex, indexstats['_all']['total']['fielddata']['memory_size_in_bytes']))
            else:
                logger.error('{0} returned {1}'.format(url, r.status_code))
示例#4
0
def main():
    '''
    Get aggregated statistics on incoming events
    to use in alerting/notices/queries about event patterns over time
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
    index = options.index
    stats = esSearch(es)
    logger.debug(json.dumps(stats))
    sleepcycles = 0
    try:
        while not es.index_exists(index):
            sleep(3)
            if sleepcycles == 3:
                logger.debug("The index is not created. Terminating eventStats.py cron job.")
                exit(1)
            sleepcycles += 1
        if es.index_exists(index):
            # post to elastic search servers directly without going through
            # message queues in case there is an availability issue
            es.save_event(index=index, body=json.dumps(stats))

    except Exception as e:
        logger.error("Exception %r when gathering statistics " % e)

    logger.debug('finished')
示例#5
0
def esCloseIndices():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
    except Exception as e:
        logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e))

    # examine each index pulled from get_indice
    # to determine if it meets aging criteria
    month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))
    month_ago_date = month_ago_date.replace(tzinfo=None)
    for index in indices:
        if 'events' in index:
            index_date = index.rsplit('-', 1)[1]
            logger.debug("Checking to see if Index: %s can be closed." % (index))
            if len(index_date) == 8:
                index_date_obj = datetime.strptime(index_date, '%Y%m%d')
                try:
                    if month_ago_date > index_date_obj:
                        logger.debug("Index: %s will be closed." % (index))
                        es.close_index(index)
                    else:
                        logger.debug("Index: %s  does not meet aging criteria and will not be closed." % (index))
                except Exception as e:
                    logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
示例#6
0
def esSearch(es, macassignments=None):
    '''
    Search ES for an event that ties a username to a mac address
    This example searches for junos wifi correlations on authentication success
    Expecting an event like: user: [email protected]; mac: 5c:f9:38:b1:de:cf; author reason: roamed session; ssid: ANSSID; AP 46/2\n
    '''
    usermacre=re.compile(r'''user: (?P<username>.*?); mac: (?P<macaddress>.*?); ''',re.IGNORECASE)
    correlations={}

    search_query = SearchQuery(minutes=options.correlationminutes)
    search_query.add_must(TermMatch('details.program', 'AUTHORIZATION-SUCCESS'))
    search_query.add_must_not(PhraseMatch('summary', 'last-resort'))

    try:
        full_results = search_query.execute(es)
        results = full_results['hits']

        for r in results:
            fields = re.search(usermacre,r['_source']['summary'])
            if fields:
                if '{0} {1}'.format(fields.group('username'),fields.group('macaddress')) not in correlations:
                    if fields.group('macaddress')[0:8].lower() in macassignments:
                        entity=macassignments[fields.group('macaddress')[0:8].lower()]
                    else:
                        entity='unknown'
                    correlations['{0} {1}'.format(fields.group('username'),fields.group('macaddress'))]=dict(username=fields.group('username'),
                                                                                                             macaddress=fields.group('macaddress'),
                                                                                                             entity=entity,
                                                                                                             utctimestamp=r['_source']['utctimestamp'])
        return correlations

    except ElasticsearchBadServer:
        logger.error('Elastic Search server could not be reached, check network connectivity')
示例#7
0
 def run(self):
     if self.slack_client.rtm_connect():
         logger.info("Bot connected to slack")
         self.post_welcome_message(random.choice(greetings))
         self.listen_for_messages()
     else:
         logger.error("Unable to connect to slack")
         sys.exit(1)
示例#8
0
 def read_state_file(self):
     '''Populate self.data by reading and parsing the state file'''
     try:
         with open(self.filename, 'r') as f:
             self.data = json.load(f)
     except IOError:
         self.data = {}
     except ValueError:
         logger.error("%s state file found but isn't a recognized json format" % self.filename)
         raise
     except TypeError:
         logger.error("%s state file found and parsed but it doesn't contain an iterable object" % self.filename)
         raise
示例#9
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        mozdefdb = client.meteor
        ensureIndexes(mozdefdb)
        esResults = getESAlerts(es)
        updateMongo(mozdefdb, esResults)

    except Exception as e:
        logger.error("Exception %r sending health to mongo" % e)
示例#10
0
 def onMessage(self, message, metadata):
     if metadata['doc_type'] != 'vulnerability':
         return (message, metadata)
     handler = self.get_handler(message)
     if handler is None:
         return (None, None)
     if not handler.validate(message):
         logger.error('Invalid format for vulnerability {0}'.format(message))
         return (None, None)
     metadata['id'] = handler.calculate_id(message)
     metadata['doc_type'] = 'vulnerability_state'
     metadata['index'] = 'vulnerabilities'
     return (message, metadata)
示例#11
0
def esStoreCorrelations(es, correlations):
    for c in correlations:
        event = dict(utctimestamp=correlations[c]['utctimestamp'],
                     summary=c,
                     details=dict(username=correlations[c]['username'],
                                  macaddress=correlations[c]['macaddress'],
                                  entity=correlations[c]['entity']),
                     category='indicators')
        try:
            es.save_object(index='intelligence',
                           doc_id=getDocID(c),
                           body=json.dumps(event))
        except Exception as e:
            logger.error("Exception %r when posting correlation " % e)
示例#12
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        # use meteor db
        mozdefdb = client.meteor
        esResults = searchESForBROAttackers(es, 100)
        updateMongoWithESEvents(mozdefdb, esResults)
        searchMongoAlerts(mozdefdb)

    except ValueError as e:
        logger.error("Exception %r collecting attackers to mongo" % e)
示例#13
0
    def watchItem(self,
                  watchcontent=None,
                  comment=None,
                  duration=None,
                  referenceID=None,
                  userID=None):
        try:
            # DB connection/table
            mongoclient = MongoClient(self.options.mongohost,
                                      self.options.mongoport)
            watchlist = mongoclient.meteor['watchlist']

            # already in the table?
            watched = watchlist.find_one({'watchcontent': str(watchcontent)})
            if watched is None:
                # insert
                watched = dict()
                watched['_id'] = genMeteorID()
                watched['watchcontent'] = str(watchcontent)
                watched['dateAdded'] = datetime.utcnow()
                # Compute start and end dates
                # default
                end_date = datetime.utcnow() + timedelta(hours=1)
                if duration == '12hr':
                    end_date = datetime.utcnow() + timedelta(hours=12)
                elif duration == '1d':
                    end_date = datetime.utcnow() + timedelta(days=1)
                elif duration == '2d':
                    end_date = datetime.utcnow() + timedelta(days=2)
                elif duration == '3d':
                    end_date = datetime.utcnow() + timedelta(days=3)
                elif duration == '1w':
                    end_date = datetime.utcnow() + timedelta(days=7)
                elif duration == '30d':
                    end_date = datetime.utcnow() + timedelta(days=30)
                watched['dateExpiring'] = end_date
                watched['comment'] = comment
                watched['creator'] = userID
                watched['reference'] = referenceID
                ref = watchlist.insert(watched)
                logger.debug('{0} written to db.\n'.format(ref))
                logger.debug('%s added to the watchlist table.\n' %
                             (watchcontent))

            else:
                logger.error('%s is already present in the watchlist table\n' %
                             (str(watchcontent)))
        except Exception as e:
            logger.error('Error while watching %s: %s\n' % (watchcontent, e))
示例#14
0
    def parse_json_alert_config(self, config_file):
        """
        Helper function to parse an alert config file
        """
        alert_dir = os.path.join(os.path.dirname(__file__), "..")
        config_file_path = os.path.abspath(os.path.join(
            alert_dir, config_file))
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = json.load(fd)
            except ValueError:
                logger.error("FAILED to open the configuration file\n")

        return json_obj
示例#15
0
 def __init__(self, region_name='us-east-1', aws_access_key_id=None, aws_secret_access_key=None):
     self.aws_access_key_id = aws_access_key_id
     self.aws_secret_access_key = aws_secret_access_key
     self.credentials = {}
     self.session_credentials = None
     self.session_conn_sts = None
     try:
         self.local_conn_sts = boto.sts.connect_to_region(
             **get_aws_credentials(
                 region_name,
                 self.aws_access_key_id,
                 self.aws_secret_access_key))
     except Exception, e:
         logger.error("Unable to connect to STS due to exception %s" % e.message)
         raise
示例#16
0
def main():
    logger.debug('starting')
    logger.debug(options)
    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))
        client = MongoClient(options.mongohost, options.mongoport)
        # use meteor db
        mozdefdb = client.meteor
        esResults = searchESForBROAttackers(es, 100)
        updateMongoWithESEvents(mozdefdb, esResults)
        searchMongoAlerts(mozdefdb)

    except ValueError as e:
        logger.error("Exception %r collecting attackers to mongo" % e)
示例#17
0
 def run(self):
     # XXX: fetch from the config file
     subscriber = pubsub.SubscriberClient.from_service_account_file(
         self.options.credentials_file)
     res = subscriber.subscribe(self.options.resource_name,
                                callback=self.onMessage)
     try:
         res.result()
     except Exception as e:
         logger.exception(e)
         logger.error(
             "Received error during subscribing - killing self and my background thread in 5 seconds for uwsgi to bring me back"
         )
         time.sleep(5)
         res.cancel()
         sys.exit(1)
示例#18
0
def main():
    if hasUWSGI:
        logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id()))
    else:
        logger.info('started without uwsgi')

    if options.mqprotocol not in ('sqs'):
        logger.error('Can only process SQS queues, terminating')
        sys.exit(1)

    sqs_queue = connect_sqs(region_name=options.region,
                            aws_access_key_id=options.accesskey,
                            aws_secret_access_key=options.secretkey,
                            task_exchange=options.taskexchange)
    # consume our queue
    taskConsumer(sqs_queue, es, options).run()
示例#19
0
 def load_configs(self):
     '''Load all configured rules'''
     self.configs = []
     rules_location = os.path.join(self.config.alert_data_location, "rules")
     files = glob.glob(rules_location + "/*.json")
     for f in files:
         with open(f) as fd:
             try:
                 cfg = DotDict(hjson.load(fd))
                 self.validate_alert(cfg)
                 # We set the alert name to the filename (excluding .json)
                 alert_name = basename(f).replace('.json', '')
                 cfg['custom_alert_name'] = alert_name
                 self.configs.append(cfg)
             except Exception:
                 logger.error("Loading rule file {} failed".format(f))
示例#20
0
def esStoreCorrelations(es, correlations):
    for c in correlations:
        event=dict(
            utctimestamp=correlations[c]['utctimestamp'],
            summary=c,
            details=dict(
                username=correlations[c]['username'],
                macaddress=correlations[c]['macaddress'],
                entity=correlations[c]['entity']
            ),
            category='indicators'
        )
        try:
            es.save_object(index='intelligence', doc_id=getDocID(c), body=json.dumps(event))
        except Exception as e:
            logger.error("Exception %r when posting correlation " % e)
示例#21
0
def main():
    if hasUWSGI:
        logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id()))
    else:
        logger.info('started without uwsgi')

    if options.mqprotocol not in ('sqs'):
        logger.error('Can only process SQS queues, terminating')
        sys.exit(1)

    mqConn, eventTaskQueue = connect_sqs(options.region, options.accesskey,
                                         options.secretkey,
                                         options.taskexchange)

    # consume our queue
    taskConsumer(mqConn, eventTaskQueue, es, options).run()
示例#22
0
def broadcastAttacker(attacker):
    '''
    send this attacker info to our message queue
    '''
    try:
        connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(options.mquser,
                                                         options.mqpassword,
                                                         options.mqserver,
                                                         options.mqport,
                                                         options.mqvhost)
        if options.mqprotocol == 'amqps':
            mqSSL = True
        else:
            mqSSL = False
        mqConn = Connection(connString, ssl=mqSSL)

        alertExchange = Exchange(
            name=options.alertexchange,
            type='topic',
            durable=True)
        alertExchange(mqConn).declare()
        mqproducer = mqConn.Producer(serializer='json')

        logger.debug('Kombu configured')
    except Exception as e:
        logger.error('Exception while configuring kombu for alerts: {0}'.format(e))
    try:
        # generate an 'alert' structure for this attacker:
        mqAlert = dict(severity='NOTICE', category='attacker')

        if 'datecreated' in attacker:
            mqAlert['utctimestamp'] = attacker['datecreated'].isoformat()

        mqAlert['summary'] = 'New Attacker: {0} events: {1}, alerts: {2}'.format(attacker['indicators'], attacker['eventscount'], attacker['alertscount'])
        logger.debug(mqAlert)
        ensurePublish = mqConn.ensure(
            mqproducer,
            mqproducer.publish,
            max_retries=10)
        ensurePublish(
            mqAlert,
            exchange=alertExchange,
            routing_key=options.routingkey
        )
    except Exception as e:
        logger.error('Exception while publishing attacker: {0}'.format(e))
 def __init__(self,
              region_name='us-east-1',
              aws_access_key_id=None,
              aws_secret_access_key=None):
     self.aws_access_key_id = aws_access_key_id
     self.aws_secret_access_key = aws_secret_access_key
     self.credentials = {}
     self.session_credentials = None
     self.session_conn_sts = None
     try:
         self.local_conn_sts = boto.sts.connect_to_region(
             **get_aws_credentials(region_name, self.aws_access_key_id,
                                   self.aws_secret_access_key))
     except Exception, e:
         logger.error("Unable to connect to STS due to exception %s" %
                      e.message)
         raise
示例#24
0
    def _oauth_handshake(self):
        self._person_api_session = authenticate(
            self._config.oauth_url,
            AuthParams(
                client_id=self._config.person_api_client_id,
                client_secret=self._config.person_api_client_secret,
                audience=self._config.person_api_audience,
                scope=self._config.person_api_scope,
                grants=self._config.person_api_grants,
            ),
        )

        if self._person_api_session is None:
            logger.error("Failed to establish OAuth session")
            raise AuthFailure()

        self._last_authenticated = datetime.now()
示例#25
0
 def read_state_file(self):
     '''Populate self.data by reading and parsing the state file'''
     try:
         with open(self.filename, 'r') as f:
             self.data = json.load(f)
     except IOError:
         self.data = {}
     except ValueError:
         logger.error(
             "%s state file found but isn't a recognized json format" %
             self.filename)
         raise
     except TypeError:
         logger.error(
             "%s state file found and parsed but it doesn't contain an iterable object"
             % self.filename)
         raise
示例#26
0
def esSearch(es, macassignments=None):
    '''
    Search ES for an event that ties a username to a mac address
    This example searches for junos wifi correlations on authentication success
    Expecting an event like: user: [email protected]; mac: 5c:f9:38:b1:de:cf; author reason: roamed session; ssid: ANSSID; AP 46/2\n
    '''
    usermacre = re.compile(
        r'''user: (?P<username>.*?); mac: (?P<macaddress>.*?); ''',
        re.IGNORECASE)
    correlations = {}

    search_query = SearchQuery(minutes=options.correlationminutes)
    search_query.add_must(TermMatch('details.program',
                                    'AUTHORIZATION-SUCCESS'))
    search_query.add_must_not(PhraseMatch('summary', 'last-resort'))

    try:
        full_results = search_query.execute(es)
        results = full_results['hits']

        for r in results:
            fields = re.search(usermacre, r['_source']['summary'])
            if fields:
                if '{0} {1}'.format(
                        fields.group('username'),
                        fields.group('macaddress')) not in correlations:
                    if fields.group(
                            'macaddress')[0:8].lower() in macassignments:
                        entity = macassignments[fields.group('macaddress')
                                                [0:8].lower()]
                    else:
                        entity = 'unknown'
                    correlations['{0} {1}'.format(
                        fields.group('username'),
                        fields.group('macaddress'))] = dict(
                            username=fields.group('username'),
                            macaddress=fields.group('macaddress'),
                            entity=entity,
                            utctimestamp=r['_source']['utctimestamp'])
        return correlations

    except ElasticsearchBadServer:
        logger.error(
            'Elastic Search server could not be reached, check network connectivity'
        )
示例#27
0
def main():
    # Configuration loading
    config_location = os.path.dirname(sys.argv[0]) + "/" + "auth02mozdef.json"
    with open(config_location) as fd:
        config = DotDict(hjson.load(fd))

    if config is None:
        logger.error("No configuration file 'auth02mozdef.json' found.")
        sys.exit(1)

    state = load_state(config.state_file)
    # If bearer isn't set, reach out to auth0 for it
    if state['bearer'] is None:
        state['bearer'] = fetch_new_bearer(config)
    else:
        # Verify bearer token is still valid
        if not verify_bearer(state['bearer']):
            state['bearer'] = fetch_new_bearer(config)

    headers = {
        "Authorization": "Bearer {}".format(state['bearer']),
        "Accept": "application/json"
    }

    fromid = state['fromid']
    # Auth0 will interpret a 0 state as an error on our hosted instance, but will accept an empty parameter "as if it was 0"
    if fromid == 0 or fromid == "0":
        fromid = ""
    totals = 1
    start = 0
    length = 0

    # Fetch until we've gotten all messages
    while totals > start + length:
        (totals, start, length,
         lastid) = fetch_auth0_logs(config, headers, fromid)

        if totals == -1:
            if fromid == lastid:
                # We got everything, we're done!
                break
        fromid = lastid

    state['fromid'] = lastid
    save_state(config.state_file, state)
示例#28
0
    def main(self):
        self.parse_config('get_watchlist.conf', ['api_url', 'jwt_secret', 'use_auth'])

        jwt_token = None
        if self.config.use_auth.lower() != 'false':
            jwt_token = JWTAuth(self.config.jwt_secret)
            jwt_token.set_header_format('Bearer %s')

        r = requests.get(self.config.api_url, auth=jwt_token)
        # Connect to rest api and grab response
        if r.ok:
            response = r.text
            terms_list = json.loads(response)
            for term in terms_list:
                self.watchterm = term
                self.process_alert()
        else:
            logger.error('The watchlist request failed. Status {0}.\n'.format(r))
示例#29
0
    def main(self):
        self.parse_config('get_watchlist.conf', ['api_url', 'jwt_secret', 'use_auth'])

        jwt_token = None
        if self.config.use_auth.lower() != 'false':
            jwt_token = JWTAuth(self.config.jwt_secret)
            jwt_token.set_header_format('Bearer %s')

        r = requests.get(self.config.api_url, auth=jwt_token)
        # Connect to rest api and grab response
        if r.ok:
            response = r.text
            terms_list = json.loads(response)
            for term in terms_list:
                self.watchterm = term
                self.process_alert()
        else:
            logger.error('The watchlist request failed. Status {0}.\n'.format(r))
示例#30
0
def main():
    if hasUWSGI:
        logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id()))
    else:
        logger.info('started without uwsgi')

    if options.mqprotocol not in ('sqs'):
        logger.error('Can only process SQS queues, terminating')
        sys.exit(1)

    sqs_conn, eventTaskQueue = connect_sqs(
        task_exchange=options.taskexchange,
        **get_aws_credentials(
            options.region,
            options.accesskey,
            options.secretkey))
    # consume our queue
    taskConsumer(sqs_conn, eventTaskQueue, es, options).run()
示例#31
0
def sendEventToPlugins(anevent, metadata, pluginList):
    '''compare the event to the plugin registrations.
       plugins register with a list of keys or values
       or values they want to match on
       this function compares that registration list
       to the current event and sends the event to plugins
       in order
    '''
    if not isinstance(anevent, dict):
        raise TypeError('event is type {0}, should be a dict'.format(
            type(anevent)))

    # expecting tuple of module,criteria,priority in pluginList
    # sort the plugin list by priority
    executed_plugins = []
    for plugin in sorted(pluginList, key=itemgetter(2), reverse=False):
        # assume we don't run this event through the plugin
        send = False
        if isinstance(plugin[1], list):
            try:
                plugin_matching_keys = set(
                    [item.lower() for item in plugin[1]])
                event_tokens = [e for e in dict2List(anevent)]
                if plugin_matching_keys.intersection(event_tokens):
                    send = True
            except TypeError:
                logger.error(
                    'TypeError on set intersection for dict {0}'.format(
                        anevent))
                return (anevent, metadata)
        if send:
            (anevent, metadata) = plugin[0].onMessage(anevent, metadata)
            if anevent is None:
                # plug-in is signalling to drop this message
                # early exit
                return (anevent, metadata)
            plugin_name = plugin[0].__module__.replace('plugins.', '')
            executed_plugins.append(plugin_name)
    # Tag all events with what plugins ran on it
    if 'mozdef' not in anevent:
        anevent['mozdef'] = {}
        anevent['mozdef']['plugins'] = executed_plugins

    return (anevent, metadata)
示例#32
0
def broadcastAttacker(attacker):
    '''
    send this attacker info to our message queue
    '''
    try:
        connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(
            options.mquser, options.mqpassword, options.mqserver,
            options.mqport, options.mqvhost)
        if options.mqprotocol == 'amqps':
            mqSSL = True
        else:
            mqSSL = False
        mqConn = Connection(connString, ssl=mqSSL)

        alertExchange = Exchange(name=options.alertexchange,
                                 type='topic',
                                 durable=True)
        alertExchange(mqConn).declare()
        mqproducer = mqConn.Producer(serializer='json')

        logger.debug('Kombu configured')
    except Exception as e:
        logger.error(
            'Exception while configuring kombu for alerts: {0}'.format(e))
    try:
        # generate an 'alert' structure for this attacker:
        mqAlert = dict(severity='NOTICE', category='attacker')

        if 'datecreated' in attacker:
            mqAlert['utctimestamp'] = attacker['datecreated'].isoformat()

        mqAlert[
            'summary'] = 'New Attacker: {0} events: {1}, alerts: {2}'.format(
                attacker['indicators'], attacker['eventscount'],
                attacker['alertscount'])
        logger.debug(mqAlert)
        ensurePublish = mqConn.ensure(mqproducer,
                                      mqproducer.publish,
                                      max_retries=10)
        ensurePublish(mqAlert,
                      exchange=alertExchange,
                      routing_key=options.routingkey)
    except Exception as e:
        logger.error('Exception while publishing attacker: {0}'.format(e))
示例#33
0
    def onMessage(self, request, response):
        '''
        request: http://bottlepy.org/docs/dev/api.html#the-request-object
        response: http://bottlepy.org/docs/dev/api.html#the-response-object
        '''
        response.headers['X-PLUGIN'] = self.description

        watchcontent = None
        comment = None
        duration = None
        referenceID = None
        userid = None
        watchitem = False

        # loop through the fields of the form
        # and fill in our values
        try:
            for field in request.json:
                if self.name in field:
                    watchitem = field[self.name]
                if 'watchcontent' in field:
                    watchcontent = field['watchcontent']
                if 'duration' in field:
                    duration = field['duration']
                if 'comment' in field:
                    comment = field['comment']
                if 'referenceid' in field:
                    referenceID = field['referenceid']
                if 'userid' in field:
                    userid = field['userid']
            if watchitem and watchcontent is not None:
                if len(watchcontent) < 2:
                    logger.error(
                        '{0} does not meet requirements. Not added. \n'.format(
                            watchcontent))

                else:
                    self.watchItem(str(watchcontent), comment, duration,
                                   referenceID, userid)

        except Exception as e:
            logger.error('Error handling request.json %r \n' % (e))

        return (request, response)
示例#34
0
 def onMessage(self, message, metadata):
     """
         The complianceitems plugin is called when an event
         is posted with a doctype 'complianceitems'.
         Compliance items are stored in the complianceitems
         index, with doctype last_known_state
     """
     if not self.validate(message['details']):
         logger.error('Invalid format for complianceitem {0}'.format(message))
         return (None, None)
     item = self.cleanup_item(message['details'])
     docidstr = 'complianceitems'
     docidstr += item['check']['ref']
     docidstr += item['check']['test']['value']
     docidstr += item['target']
     metadata['id'] = hashlib.md5(docidstr).hexdigest()
     metadata['doc_type'] = 'last_known_state'
     metadata['index'] = 'complianceitems'
     return (item, metadata)
class RoleManager:
    def __init__(self,
                 region_name='us-east-1',
                 aws_access_key_id=None,
                 aws_secret_access_key=None):
        self.aws_access_key_id = aws_access_key_id
        self.aws_secret_access_key = aws_secret_access_key
        self.credentials = {}
        self.session_credentials = None
        self.session_conn_sts = None
        try:
            self.local_conn_sts = boto.sts.connect_to_region(
                **get_aws_credentials(region_name, self.aws_access_key_id,
                                      self.aws_secret_access_key))
        except Exception, e:
            logger.error("Unable to connect to STS due to exception %s" %
                         e.message)
            raise

        if self.aws_access_key_id is not None or self.aws_secret_access_key is not None:
            # We're using API credentials not an IAM Role
            try:
                if self.session_credentials is None or self.session_credentials.is_expired(
                ):
                    self.session_credentials = self.local_conn_sts.get_session_token(
                    )
            except Exception, e:
                logger.error(
                    "Unable to get session token due to exception %s" %
                    e.message)
                raise
            try:
                creds = get_aws_credentials(
                    region_name, self.session_credentials.access_key,
                    self.session_credentials.secret_key,
                    self.session_credentials.session_token
                ) if self.session_credentials else {}
                self.session_conn_sts = boto.sts.connect_to_region(**creds)
            except Exception, e:
                logger.error(
                    "Unable to connect to STS with session token due to exception %s"
                    % e.message)
                raise
示例#36
0
    def __init__(self):
        '''
        sends geomodel alert to SSO dashboard
        '''
        self.alert_classname = 'AlertGeomodel'

        config_file_path = os.path.join(os.path.dirname(__file__), 'dashboard_geomodel.json')
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = hjson.load(fd)
            except ValueError:
                logger.error("FAILED to open the configuration file" + str(config_file_path))
        self.config = json_obj

        self.connect_db()

        self.registration = 'geomodel'
        self.priority = 1
def main():
    # meant only to talk to SQS using boto
    # and process events as json.

    if hasUWSGI:
        logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id()))
    else:
        logger.info('started without uwsgi')

    if options.mqprotocol not in ('sqs'):
        logger.error('Can only process SQS queues, terminating')
        sys.exit(1)

    sqs_conn, eventTaskQueue = connect_sqs(
        task_exchange=options.taskexchange,
        **get_aws_credentials(options.region, options.accesskey,
                              options.secretkey))
    # consume our queue
    taskConsumer(sqs_conn, eventTaskQueue, es).run()
示例#38
0
    def __init__(self):
        '''
        sends geomodel alert to SSO dashboard
        '''
        self.alert_classname = 'AlertGeomodel'

        config_file_path = os.path.join(os.path.dirname(__file__), 'dashboard_geomodel.json')
        json_obj = {}
        with open(config_file_path, "r") as fd:
            try:
                json_obj = hjson.load(fd)
            except ValueError:
                logger.error("FAILED to open the configuration file" + str(config_file_path))
        self.config = json_obj

        self.connect_db()

        self.registration = 'geomodel'
        self.priority = 1
示例#39
0
    def onMessage(self, alert):
        source = alert['_source']

        # Find the self.option that contains one of the message tags
        selected_option = self.identify_option(source['tags'])
        if selected_option is None:
            logger.error("Unable to find config option for alert tags: {0}".format(source['tags']))

        if 'summary' in source:
            headers = {
                'Content-type': 'application/json',
            }

            payload = hjson.dumpsJSON({
                "service_key": "{0}".format(selected_option['service_key']),
                "event_type": "trigger",
                "description": source['summary'],
                "client": "MozDef",
                "client_url": "{0}/alert/{1}".format(self.options['web_url'], alert['_id']),
                "contexts": [
                    {
                        "type": "link",
                        "href": "{0}".format(selected_option['doc']),
                        "text": "View runbook on mana"
                    }
                ]
            })

            headers = {
                'Content-type': 'application/json',
            }
            resp = requests.post(
                'https://events.pagerduty.com/generic/2010-04-15/create_event.json',
                headers=headers,
                data=payload,
            )
            if not resp.ok:
                logger.exception("Received invalid response from pagerduty: {0} - {1}".format(resp.status_code, resp.text))
            else:
                logger.info("Triggered pagerduty notification for alert - {0}".format(alert['_id']))

        return message
示例#40
0
def isJVMMemoryHigh():
    url = "{0}/_nodes/stats?pretty=true".format(random.choice(options.esservers))
    r = requests.get(url)
    logger.debug(r)
    if r.status_code == 200:
        nodestats = r.json()

        for node in nodestats['nodes']:
            loadaverage = nodestats['nodes'][node]['os']['cpu']['load_average']
            cpuusage = nodestats['nodes'][node]['os']['cpu']['percent']
            nodename = nodestats['nodes'][node]['name']
            jvmused = nodestats['nodes'][node]['jvm']['mem']['heap_used_percent']
            logger.debug('{0}: cpu {1}%  jvm {2}% load average: {3}'.format(nodename, cpuusage, jvmused, loadaverage))
            if jvmused > options.jvmlimit:
                logger.info('{0}: cpu {1}%  jvm {2}% load average: {3} recommending cache clear'.format(nodename, cpuusage, jvmused, loadaverage))
                return True
        return False
    else:
        logger.error(r)
        return False
示例#41
0
 def onMessage(self, message, metadata):
     """
         The complianceitems plugin is called when an event
         is posted with a doctype 'complianceitems'.
         Compliance items are stored in the complianceitems
         index, with doctype last_known_state
     """
     if not self.validate(message['details']):
         logger.error(
             'Invalid format for complianceitem {0}'.format(message))
         return (None, None)
     item = self.cleanup_item(message['details'])
     docidstr = 'complianceitems'
     docidstr += item['check']['ref']
     docidstr += item['check']['test']['value']
     docidstr += item['target']
     metadata['id'] = hashlib.md5(docidstr).hexdigest()
     metadata['doc_type'] = 'last_known_state'
     metadata['index'] = 'complianceitems'
     return (item, metadata)
示例#42
0
def isJVMMemoryHigh():
    url = "{0}/_nodes/stats?pretty=true".format(random.choice(options.esservers))
    r = requests.get(url)
    logger.debug(r)
    if r.status_code == 200:
        nodestats = r.json()

        for node in nodestats['nodes']:
            loadaverage = nodestats['nodes'][node]['os']['cpu']['load_average']
            cpuusage = nodestats['nodes'][node]['os']['cpu']['percent']
            nodename = nodestats['nodes'][node]['name']
            jvmused = nodestats['nodes'][node]['jvm']['mem']['heap_used_percent']
            logger.debug('{0}: cpu {1}%  jvm {2}% load average: {3}'.format(nodename, cpuusage, jvmused, loadaverage))
            if jvmused > options.jvmlimit:
                logger.info('{0}: cpu {1}%  jvm {2}% load average: {3} recommending cache clear'.format(nodename, cpuusage, jvmused, loadaverage))
                return True
        return False
    else:
        logger.error(r)
        return False
示例#43
0
def verify_events(options):
    es_client = ElasticsearchClient(options.esservers)
    for required_field in options.required_fields:
        logger.debug('Looking for events without ' + required_field)
        search_query = SearchQuery(hours=12)
        search_query.add_must_not(ExistsMatch(required_field))

        # Exclude all events that are mozdef related health and stats
        search_query.add_must_not(TermMatch('_type', 'mozdefstats'))
        search_query.add_must_not(TermMatch('_type', 'mozdefhealth'))

        search_query.add_aggregation(Aggregation('_type'))
        # We don't care about the actual events, we only want the numbers
        results = search_query.execute(es_client, size=1)
        for aggreg_term in results['aggregations']['_type']['terms']:
            count = aggreg_term['count']
            category = aggreg_term['key']
            logger.error(
                "Found {0} bad events of _type '{1}' missing '{2}' field".
                format(count, category, required_field))
示例#44
0
def main():
    # meant only to talk to SQS using boto
    # and process events as json.

    if hasUWSGI:
        logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id()))
    else:
        logger.info("started without uwsgi")

    if options.mqprotocol not in ("sqs"):
        logger.error("Can only process SQS queues, terminating")
        sys.exit(1)

    sqs_queue = connect_sqs(
        region_name=options.region,
        aws_access_key_id=options.accesskey,
        aws_secret_access_key=options.secretkey,
        task_exchange=options.taskexchange,
    )
    # consume our queue
    taskConsumer(sqs_queue, es).run()
示例#45
0
 def run(self):
     while True:
         try:
             records = self.sqs_queue.receive_messages(MaxNumberOfMessages=self.options.prefetch)
             for msg in records:
                 msg_body = msg.body
                 try:
                     # get_body() should be json
                     message_json = json.loads(msg_body)
                     self.on_message(message_json)
                     # delete message from queue
                     msg.delete()
                 except ValueError:
                     logger.error("Invalid message, not JSON <dropping message and continuing>: %r" % msg_body)
                     msg.delete()
                     continue
             time.sleep(self.options.sleep_time)
         except (SSLEOFError, SSLError, socket.error):
             logger.info("Received network related error...reconnecting")
             time.sleep(5)
             self.sqs_queue = connect_sqs(options.region, options.accesskey, options.secretkey, options.taskexchange)
示例#46
0
def esPruneIndexes():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
        # do the pruning
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.debug('Deleting index: %s' % index_to_prune)
                        es.delete_index(index_to_prune, True)
                    else:
                        logger.error('Error deleting index %s, index missing' % index_to_prune)
            except Exception as e:
                logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
示例#47
0
    def save_event(self, event, metadata):
        try:
            # drop the message if a plug in set it to None
            # signaling a discard
            if event is None:
                return

            # make a json version for posting to elastic search
            jbody = json.JSONEncoder().encode(event)

            try:
                bulk = False
                if self.options.esbulksize != 0:
                    bulk = True

                self.esConnection.save_event(index=metadata["index"],
                                             doc_id=metadata["id"],
                                             body=jbody,
                                             bulk=bulk)

            except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:
                # handle loss of server or race condition with index rotation/creation/aliasing
                try:
                    self.esConnection = esConnect()
                    return
                except (ElasticsearchBadServer, ElasticsearchInvalidIndex,
                        ElasticsearchException):
                    logger.exception(
                        "ElasticSearchException: {0} reported while indexing event, messages lost"
                        .format(e))
                    return
            except ElasticsearchException as e:
                logger.exception(
                    "ElasticSearchException: {0} reported while indexing event, messages lost"
                    .format(e))
                logger.error("Malformed jbody: %r" % jbody)
                return
        except Exception as e:
            logger.exception(e)
            logger.error("Malformed message: %r" % event)
示例#48
0
def main():
    logger.debug('started')

    json_headers = {
        'Content-Type': 'application/json',
    }
    try:
        esserver = options.esservers[0]
        idate = date.strftime(datetime.utcnow() - timedelta(days=1), '%Y%m%d')
        bucketdate = date.strftime(datetime.utcnow() - timedelta(days=1),
                                   '%Y-%m')
        hostname = socket.gethostname()

        # Create or update snapshot configuration
        logger.debug('Configuring snapshot repository')
        snapshot_config = {
            "type": "s3",
            "settings": {
                "bucket": options.aws_bucket,
                "base_path":
                "elasticsearch/{0}/{1}".format(bucketdate, hostname)
            }
        }
        r = requests.put('%s/_snapshot/s3backup' % esserver,
                         headers=json_headers,
                         data=json.dumps(snapshot_config))
        if 'status' in r.json():
            logger.error("Error while registering snapshot repo: %s" % r.text)
        else:
            logger.debug('snapshot repo registered')

        # do the actual snapshotting
        for (index, dobackup, rotation,
             pruning) in zip(options.indices, options.dobackup,
                             options.rotation, options.pruning):
            if dobackup == '1':
                index_to_snapshot = index
                if rotation == 'daily':
                    index_to_snapshot += '-%s' % idate
                elif rotation == 'monthly':
                    index_to_snapshot += '-%s' % idate[:6]

                logger.debug(
                    'Creating %s snapshot (this may take a while)...' %
                    index_to_snapshot)
                snapshot_config = {'indices': index_to_snapshot}
                epoch = calendar.timegm(datetime.utcnow().utctimetuple())
                r = requests.put(
                    '{0}/_snapshot/s3backup/{1}-{2}?wait_for_completion=true'.
                    format(esserver, index_to_snapshot, epoch),
                    headers=json_headers,
                    data=json.dumps(snapshot_config))
                if 'status' in r.json():
                    logger.error('Error snapshotting %s: %s' %
                                 (index_to_snapshot, r.json()))
                else:
                    logger.debug('snapshot %s finished' % index_to_snapshot)

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
示例#49
0
def verify_events(options):
    es_client = ElasticsearchClient(options.esservers)
    for required_field in options.required_fields:
        logger.debug('Looking for events without ' + required_field)
        search_query = SearchQuery(hours=12)
        search_query.add_must_not(ExistsMatch(required_field))

        # Exclude all events that are mozdef related health and stats
        search_query.add_must_not(TermMatch('_type', 'mozdefstats'))
        search_query.add_must_not(TermMatch('_type', 'mozdefhealth'))

        search_query.add_aggregation(Aggregation('_type'))
        # We don't care about the actual events, we only want the numbers
        results = search_query.execute(es_client, size=1)
        for aggreg_term in results['aggregations']['_type']['terms']:
            count = aggreg_term['count']
            category = aggreg_term['key']
            logger.error("Found {0} bad events of _type '{1}' missing '{2}' field".format(
                count,
                category,
                required_field
            ))
示例#50
0
    def onMessage(self, request, response):
        '''
        request: http://bottlepy.org/docs/dev/api.html#the-request-object
        response: http://bottlepy.org/docs/dev/api.html#the-response-object

        '''
        # format/validate request.json:
        ipaddress = None
        sendToBHVPC = False

        # loop through the fields of the form
        # and fill in our values
        try:
            for field in request.json:
                # were we checked?
                if self.name in field:
                    sendToBHVPC = field[self.name]
                if 'ipaddress' in field:
                    ipaddress = field['ipaddress']
            # are we configured?
            if self.multioptions is None:
                logger.error(
                    "Customs server blockip requested but not configured\n")
                sendToBHVPC = False

            if sendToBHVPC and ipaddress is not None:
                # figure out the CIDR mask
                if isIPv4(ipaddress) or isIPv6(ipaddress):
                    ipcidr = netaddr.IPNetwork(ipaddress)
                    if not ipcidr.ip.is_loopback() \
                       and not ipcidr.ip.is_private() \
                       and not ipcidr.ip.is_reserved():
                        ipaddress = str(ipcidr.cidr)
                        self.addBlackholeEntry(ipaddress)
                        logger.info('Blackholed {0}\n'.format(ipaddress))
        except Exception as e:
            logger.error('Error handling request.json %r \n' % (e))

        return (request, response)
    def run(self):
        self.taskQueue.set_message_class(RawMessage)
        while True:
            try:
                records = self.taskQueue.get_messages(options.prefetch)
                for msg in records:
                    body_message = msg.get_body()
                    event = json.loads(body_message)

                    if not event['Message']:
                        logger.error(
                            'Invalid message format for cloudtrail SQS messages'
                        )
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    if event['Message'] == 'CloudTrail validation message.':
                        # We don't care about these messages
                        continue

                    message_json = json.loads(event['Message'])

                    if 's3ObjectKey' not in message_json:
                        logger.error(
                            'Invalid message format, expecting an s3ObjectKey in Message'
                        )
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    s3_log_files = message_json['s3ObjectKey']
                    for log_file in s3_log_files:
                        logger.debug('Downloading and parsing ' + log_file)
                        bucket = self.s3_connection.get_bucket(
                            message_json['s3Bucket'])

                        log_file_lookup = bucket.lookup(log_file)
                        events = self.process_file(log_file_lookup)
                        for event in events:
                            self.on_message(event)

                    self.taskQueue.delete_message(msg)
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.connection, self.taskQueue = connect_sqs(
                    task_exchange=options.taskexchange,
                    **get_aws_credentials(options.region, options.accesskey,
                                          options.secretkey))
                self.taskQueue.set_message_class(RawMessage)
示例#52
0
文件: plugins.py 项目: IFGHou/MozDef
def sendEventToPlugins(anevent, metadata, pluginList):
    '''compare the event to the plugin registrations.
       plugins register with a list of keys or values
       or values they want to match on
       this function compares that registration list
       to the current event and sends the event to plugins
       in order
    '''
    if not isinstance(anevent, dict):
        raise TypeError('event is type {0}, should be a dict'.format(type(anevent)))

    # expecting tuple of module,criteria,priority in pluginList
    # sort the plugin list by priority
    executed_plugins = []
    for plugin in sorted(pluginList, key=itemgetter(2), reverse=False):
        # assume we don't run this event through the plugin
        send = False
        if isinstance(plugin[1], list):
            try:
                plugin_matching_keys = set([item.lower() for item in plugin[1]])
                event_tokens = [e for e in dict2List(anevent)]
                if plugin_matching_keys.intersection(event_tokens):
                    send = True
            except TypeError:
                logger.error('TypeError on set intersection for dict {0}'.format(anevent))
                return (anevent, metadata)
        if send:
            (anevent, metadata) = plugin[0].onMessage(anevent, metadata)
            if anevent is None:
                # plug-in is signalling to drop this message
                # early exit
                return (anevent, metadata)
            plugin_name = plugin[0].__module__.replace('plugins.', '')
            executed_plugins.append(plugin_name)
    # Tag all events with what plugins ran on it
    anevent['plugins'] = executed_plugins

    return (anevent, metadata)
示例#53
0
    def save_event(self, event, metadata):
        try:
            # drop the message if a plug in set it to None
            # signaling a discard
            if event is None:
                return

            # make a json version for posting to elastic search
            jbody = json.JSONEncoder().encode(event)

            try:
                bulk = False
                if self.options.esbulksize != 0:
                    bulk = True

                self.esConnection.save_event(
                    index=metadata['index'],
                    doc_id=metadata['id'],
                    body=jbody,
                    bulk=bulk
                )

            except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:
                # handle loss of server or race condition with index rotation/creation/aliasing
                try:
                    self.esConnection = esConnect()
                    return
                except kombu.exceptions.MessageStateError:
                    return
            except ElasticsearchException as e:
                logger.exception('ElasticSearchException: {0} reported while indexing event'.format(e))
                logger.error('Malformed jbody: %r' % jbody)
                return
        except Exception as e:
            logger.exception(e)
            logger.error('Malformed message: %r' % event)
示例#54
0
 def assume_role(self,
                 role_arn,
                 role_session_name='unknown',
                 policy=None):
     '''Return a boto.sts.credential.Credential object given a role_arn.
     First check if a Credential oject exists in the local self.credentials
     cache that is not expired. If there isn't one, assume the role of role_arn
     store the Credential in the credentials cache and return it'''
     logger.debug("Connecting to sts")
     if role_arn in self.credentials:
         if not self.credentials[role_arn] or not self.credentials[role_arn].is_expired():
             # Return the cached value if it's False (indicating a permissions issue) or if
             # it hasn't expired.
             return self.credentials[role_arn]
     try:
         self.credentials[role_arn] = self.conn_sts.assume_role(
             role_arn=role_arn,
             role_session_name=role_session_name,
             policy=policy).credentials
         logger.debug("Assumed new role with credential %s" % self.credentials[role_arn].to_dict())
     except Exception, e:
         print e
         logger.error("Unable to assume role %s due to exception %s" % (role_arn, e.message))
         self.credentials[role_arn] = False
示例#55
0
    def run(self):
        self.taskQueue.set_message_class(RawMessage)
        while True:
            try:
                records = self.taskQueue.get_messages(options.prefetch)
                for msg in records:
                    body_message = msg.get_body()
                    event = json.loads(body_message)

                    if not event['Message']:
                        logger.error('Invalid message format for cloudtrail SQS messages')
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    if event['Message'] == 'CloudTrail validation message.':
                        # We don't care about these messages
                        continue

                    message_json = json.loads(event['Message'])

                    if 's3ObjectKey' not in message_json:
                        logger.error('Invalid message format, expecting an s3ObjectKey in Message')
                        logger.error('Malformed Message: %r' % body_message)
                        continue

                    s3_log_files = message_json['s3ObjectKey']
                    for log_file in s3_log_files:
                        logger.debug('Downloading and parsing ' + log_file)
                        bucket = self.s3_connection.get_bucket(message_json['s3Bucket'])

                        log_file_lookup = bucket.lookup(log_file)
                        events = self.process_file(log_file_lookup)
                        for event in events:
                            self.on_message(event)

                    self.taskQueue.delete_message(msg)
            except (SSLEOFError, SSLError, socket.error):
                logger.info('Received network related error...reconnecting')
                time.sleep(5)
                self.connection, self.taskQueue = connect_sqs(
                    task_exchange=options.taskexchange,
                    **get_aws_credentials(
                        options.region,
                        options.accesskey,
                        options.secretkey))
                self.taskQueue.set_message_class(RawMessage)
示例#56
0
def keyMapping(aDict):
    '''map common key/fields to a normalized structure,
       explicitly typed when possible to avoid schema changes for upsteam consumers
       Special accomodations made for logstash,nxlog, beaver, heka and CEF
       Some shippers attempt to conform to logstash-style @fieldname convention.
       This strips the leading at symbol since it breaks some elastic search
       libraries like elasticutils.
    '''
    returndict = dict()

    # uncomment to save the source event for debugging, or chain of custody/forensics
    # returndict['original']=aDict

    # set the timestamp when we received it, i.e. now
    returndict['receivedtimestamp'] = toUTC(datetime.now()).isoformat()
    returndict['mozdefhostname'] = options.mozdefhostname
    returndict[u'details'] = {}
    try:
        for k, v in aDict.iteritems():
            k = removeAt(k).lower()

            if k == 'sourceip':
                returndict[u'details']['eventsourceipaddress'] = v

            if k in ('facility', 'source'):
                returndict[u'source'] = v

            if k in ('message', 'summary'):
                returndict[u'summary'] = toUnicode(v)

            if k in ('payload') and 'summary' not in aDict:
                # special case for heka if it sends payload as well as a summary, keep both but move payload to the details section.
                returndict[u'summary'] = toUnicode(v)
            elif k in ('payload'):
                returndict[u'details']['payload'] = toUnicode(v)

            if k in ('eventtime', 'timestamp', 'utctimestamp', 'date'):
                returndict[u'utctimestamp'] = toUTC(v).isoformat()
                returndict[u'timestamp'] = toUTC(v).isoformat()

            if k in ('hostname', 'source_host', 'host'):
                returndict[u'hostname'] = toUnicode(v)

            if k in ('tags'):
                if 'tags' not in returndict:
                    returndict[u'tags'] = []
                if type(v) == list:
                    returndict[u'tags'] += v
                else:
                    if len(v) > 0:
                        returndict[u'tags'].append(v)

            # nxlog keeps the severity name in syslogseverity,everyone else should use severity or level.
            if k in ('syslogseverity', 'severity', 'severityvalue', 'level', 'priority'):
                returndict[u'severity'] = toUnicode(v).upper()

            if k in ('facility', 'syslogfacility'):
                returndict[u'facility'] = toUnicode(v)

            if k in ('pid', 'processid'):
                returndict[u'processid'] = toUnicode(v)

            # nxlog sets sourcename to the processname (i.e. sshd), everyone else should call it process name or pname
            if k in ('pname', 'processname', 'sourcename', 'program'):
                returndict[u'processname'] = toUnicode(v)

            # the file, or source
            if k in ('path', 'logger', 'file'):
                returndict[u'eventsource'] = toUnicode(v)

            if k in ('type', 'eventtype', 'category'):
                returndict[u'category'] = toUnicode(v)

            # custom fields as a list/array
            if k in ('fields', 'details'):
                if type(v) is not dict:
                    returndict[u'details'][u'message'] = v
                else:
                    if len(v) > 0:
                        for details_key, details_value in v.iteritems():
                            returndict[u'details'][details_key] = details_value

            # custom fields/details as a one off, not in an array
            # i.e. fields.something=value or details.something=value
            # move them to a dict for consistency in querying
            if k.startswith('fields.') or k.startswith('details.'):
                newName = k.replace('fields.', '')
                newName = newName.lower().replace('details.', '')
                # add field with a special case for shippers that
                # don't send details
                # in an array as int/floats/strings
                # we let them dictate the data type with field_datatype
                # convention
                if newName.endswith('_int'):
                    returndict[u'details'][unicode(newName)] = int(v)
                elif newName.endswith('_float'):
                    returndict[u'details'][unicode(newName)] = float(v)
                else:
                    returndict[u'details'][unicode(newName)] = toUnicode(v)

        # nxlog windows log handling
        if 'Domain' in aDict and 'SourceModuleType' in aDict:
            # nxlog parses all windows event fields very well
            # copy all fields to details
            returndict[u'details'][k] = v

        if 'utctimestamp' not in returndict:
            # default in case we don't find a reasonable timestamp
            returndict['utctimestamp'] = toUTC(datetime.now()).isoformat()

    except Exception as e:
        logger.exception('Received exception while normalizing message: %r' % e)
        logger.error('Malformed message: %r' % aDict)
        return None

    return returndict
示例#57
0
    def on_message(self, body):
        # print("RECEIVED MESSAGE: %r" % (body, ))
        try:
            # default elastic search metadata for an event
            metadata = {
                'index': 'events',
                'id': None
            }
            # just to be safe..check what we were sent.
            if isinstance(body, dict):
                bodyDict = body
            elif isinstance(body, str) or isinstance(body, unicode):
                try:
                    bodyDict = json.loads(body)   # lets assume it's json
                except ValueError as e:
                    # not json..ack but log the message
                    logger.error("Unknown body type received %r" % body)
                    return
            else:
                logger.error("Unknown body type received %r\n" % body)
                return

            if 'customendpoint' in bodyDict and bodyDict['customendpoint']:
                # custom document
                # send to plugins to allow them to modify it if needed
                (normalizedDict, metadata) = sendEventToPlugins(bodyDict, metadata, pluginList)
            else:
                # normalize the dict
                # to the mozdef events standard
                normalizedDict = keyMapping(bodyDict)

                # send to plugins to allow them to modify it if needed
                if normalizedDict is not None and isinstance(normalizedDict, dict):
                    (normalizedDict, metadata) = sendEventToPlugins(normalizedDict, metadata, pluginList)

            # drop the message if a plug in set it to None
            # signaling a discard
            if normalizedDict is None:
                return

            # make a json version for posting to elastic search
            jbody = json.JSONEncoder().encode(normalizedDict)

            try:
                bulk = False
                if options.esbulksize != 0:
                    bulk = True

                bulk = False
                self.esConnection.save_event(
                    index=metadata['index'],
                    doc_id=metadata['id'],
                    body=jbody,
                    bulk=bulk
                )

            except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:
                # handle loss of server or race condition with index rotation/creation/aliasing
                try:
                    self.esConnection = esConnect()
                    return
                except kombu.exceptions.MessageStateError:
                    # state may be already set.
                    return
            except ElasticsearchException as e:
                # exception target for queue capacity issues reported by elastic search so catch the error, report it and retry the message
                try:
                    logger.exception('ElasticSearchException: {0} reported while indexing event'.format(e))
                    return
                except kombu.exceptions.MessageStateError:
                    # state may be already set.
                    return
        except Exception as e:
            logger.exception(e)
            logger.error('Malformed message: %r' % body)
示例#58
0
    def on_message(self, message):
        try:
            # default elastic search metadata for an event
            metadata = {
                'index': 'events',
                'id': None
            }
            event = {}

            event['receivedtimestamp'] = toUTC(datetime.now()).isoformat()
            event['mozdefhostname'] = self.options.mozdefhostname

            if 'tags' in event:
                event['tags'].extend([self.options.taskexchange])
            else:
                event['tags'] = [self.options.taskexchange]

            event['severity'] = 'INFO'
            event['details'] = {}

            for message_key, message_value in message.iteritems():
                if 'Message' == message_key:
                    try:
                        message_json = json.loads(message_value)
                        for inside_message_key, inside_message_value in message_json.iteritems():
                            if inside_message_key in ('type', 'category'):
                                event['category'] = inside_message_value
                                # add type subcategory for filtering after
                                # original type field is rewritten as category
                                event['type'] = 'event'
                            elif inside_message_key in ('processid', 'pid'):
                                processid = str(inside_message_value)
                                processid = processid.replace('[', '')
                                processid = processid.replace(']', '')
                                event['processid'] = processid
                            elif inside_message_key in ('processname','pname'):
                                event['processname'] = inside_message_value
                            elif inside_message_key in ('hostname'):
                                event['hostname'] = inside_message_value
                            elif inside_message_key in ('time', 'timestamp'):
                                event['timestamp'] = toUTC(inside_message_value).isoformat()
                                event['utctimestamp'] = toUTC(event['timestamp']).astimezone(pytz.utc).isoformat()
                            elif inside_message_key in ('summary','payload', 'message'):
                                event['summary'] = inside_message_value.lstrip()
                            elif inside_message_key in ('source'):
                                event['source'] = inside_message_value
                            elif inside_message_key in ('fields', 'details'):
                                if type(inside_message_value) is not dict:
                                    event[u'details'][u'message'] = inside_message_value
                                else:
                                    if len(inside_message_value) > 0:
                                        for details_key, details_value in inside_message_value.iteritems():
                                            event[u'details'][details_key] = details_value
                            else:
                                event['details'][inside_message_key] = inside_message_value
                    except ValueError:
                        event['summary'] = message_value
            (event, metadata) = sendEventToPlugins(event, metadata, self.pluginList)
            # Drop message if plugins set to None
            if event is None:
                return
            self.save_event(event, metadata)
        except Exception as e:
            logger.exception(e)
            logger.error('Malformed message: %r' % message)
示例#59
0
def keyMapping(aDict):
    '''map common key/fields to a normalized structure,
       explicitly typed when possible to avoid schema changes for upsteam consumers
       Special accomodations made for logstash,nxlog, beaver, heka and CEF
       Some shippers attempt to conform to logstash-style @fieldname convention.
       This strips the leading at symbol since it breaks some elastic search
       libraries like elasticutils.
    '''
    returndict = dict()

    returndict['source'] = 'cloudtrail'
    returndict['details'] = {}
    returndict['category'] = 'cloudtrail'
    returndict['processid'] = str(os.getpid())
    returndict['processname'] = sys.argv[0]
    returndict['severity'] = 'INFO'
    if 'sourceIPAddress' in aDict and 'eventName' in aDict and 'eventSource' in aDict:
        summary_str = "{0} performed {1} in {2}".format(
            aDict['sourceIPAddress'],
            aDict['eventName'],
            aDict['eventSource']
        )
        returndict['summary'] = summary_str

    if 'eventName' in aDict:
        # Uppercase first character
        aDict['eventName'] = aDict['eventName'][0].upper() + aDict['eventName'][1:]
        returndict['details']['eventVerb'] = CLOUDTRAIL_VERB_REGEX.findall(aDict['eventName'])[0]
        returndict['details']['eventReadOnly'] = (returndict['details']['eventVerb'] in ['Describe', 'Get', 'List'])
    # set the timestamp when we received it, i.e. now
    returndict['receivedtimestamp'] = toUTC(datetime.now()).isoformat()
    returndict['mozdefhostname'] = options.mozdefhostname
    try:
        for k, v in aDict.iteritems():
            k = removeAt(k).lower()

            if k == 'sourceip':
                returndict[u'details']['sourceipaddress'] = v

            elif k == 'sourceipaddress':
                returndict[u'details']['sourceipaddress'] = v

            elif k in ('facility', 'source'):
                returndict[u'source'] = v

            elif k in ('eventsource'):
                returndict[u'hostname'] = v

            elif k in ('message', 'summary'):
                returndict[u'summary'] = toUnicode(v)

            elif k in ('payload') and 'summary' not in aDict:
                # special case for heka if it sends payload as well as a summary, keep both but move payload to the details section.
                returndict[u'summary'] = toUnicode(v)
            elif k in ('payload'):
                returndict[u'details']['payload'] = toUnicode(v)

            elif k in ('eventtime', 'timestamp', 'utctimestamp', 'date'):
                returndict[u'utctimestamp'] = toUTC(v).isoformat()
                returndict[u'timestamp'] = toUTC(v).isoformat()

            elif k in ('hostname', 'source_host', 'host'):
                returndict[u'hostname'] = toUnicode(v)

            elif k in ('tags'):
                if 'tags' not in returndict:
                    returndict[u'tags'] = []
                if type(v) == list:
                    returndict[u'tags'] += v
                else:
                    if len(v) > 0:
                        returndict[u'tags'].append(v)

            # nxlog keeps the severity name in syslogseverity,everyone else should use severity or level.
            elif k in ('syslogseverity', 'severity', 'severityvalue', 'level', 'priority'):
                returndict[u'severity'] = toUnicode(v).upper()

            elif k in ('facility', 'syslogfacility'):
                returndict[u'facility'] = toUnicode(v)

            elif k in ('pid', 'processid'):
                returndict[u'processid'] = toUnicode(v)

            # nxlog sets sourcename to the processname (i.e. sshd), everyone else should call it process name or pname
            elif k in ('pname', 'processname', 'sourcename', 'program'):
                returndict[u'processname'] = toUnicode(v)

            # the file, or source
            elif k in ('path', 'logger', 'file'):
                returndict[u'eventsource'] = toUnicode(v)

            elif k in ('type', 'eventtype', 'category'):
                returndict[u'category'] = toUnicode(v)
                returndict[u'type'] = 'cloudtrail'

            # custom fields as a list/array
            elif k in ('fields', 'details'):
                if type(v) is not dict:
                    returndict[u'details'][u'message'] = v
                else:
                    if len(v) > 0:
                        for details_key, details_value in v.iteritems():
                            returndict[u'details'][details_key] = details_value

            # custom fields/details as a one off, not in an array
            # i.e. fields.something=value or details.something=value
            # move them to a dict for consistency in querying
            elif k.startswith('fields.') or k.startswith('details.'):
                newName = k.replace('fields.', '')
                newName = newName.lower().replace('details.', '')
                # add a dict to hold the details if it doesn't exist
                if 'details' not in returndict:
                    returndict[u'details'] = dict()
                # add field with a special case for shippers that
                # don't send details
                # in an array as int/floats/strings
                # we let them dictate the data type with field_datatype
                # convention
                if newName.endswith('_int'):
                    returndict[u'details'][unicode(newName)] = int(v)
                elif newName.endswith('_float'):
                    returndict[u'details'][unicode(newName)] = float(v)
                else:
                    returndict[u'details'][unicode(newName)] = toUnicode(v)
            else:
                returndict[u'details'][k] = v

        if 'utctimestamp' not in returndict:
            # default in case we don't find a reasonable timestamp
            returndict['utctimestamp'] = toUTC(datetime.now()).isoformat()

    except Exception as e:
        logger.exception(e)
        logger.error('Malformed message: %r' % aDict)

    return returndict
示例#60
0
    def on_message(self, body, message):
        # print("RECEIVED MESSAGE: %r" % (body, ))
        try:
            # default elastic search metadata for an event
            metadata = {
                'index': 'events',
                'doc_type': 'event',
                'id': None
            }
            # just to be safe..check what we were sent.
            if isinstance(body, dict):
                bodyDict = body
            elif isinstance(body, str) or isinstance(body, unicode):
                try:
                    bodyDict = json.loads(body)   # lets assume it's json
                except ValueError as e:
                    # not json..ack but log the message
                    logger.error("Exception: unknown body type received: %r" % body)
                    message.ack()
                    return
            else:
                logger.error("Exception: unknown body type received: %r" % body)
                message.ack()
                return

            if 'customendpoint' in bodyDict and bodyDict['customendpoint']:
                # custom document
                # send to plugins to allow them to modify it if needed
                (normalizedDict, metadata) = sendEventToPlugins(bodyDict, metadata, pluginList)
            else:
                # normalize the dict
                # to the mozdef events standard
                normalizedDict = keyMapping(bodyDict)

                # send to plugins to allow them to modify it if needed
                if normalizedDict is not None and isinstance(normalizedDict, dict):
                    (normalizedDict, metadata) = sendEventToPlugins(normalizedDict, metadata, pluginList)

            # drop the message if a plug in set it to None
            # signaling a discard
            if normalizedDict is None:
                message.ack()
                return

            # make a json version for posting to elastic search
            jbody = json.JSONEncoder().encode(normalizedDict)

            if isCEF(normalizedDict):
                # cef records are set to the 'deviceproduct' field value.
                metadata['doc_type'] = 'cef'
                if 'details' in normalizedDict and 'deviceproduct' in normalizedDict['details']:
                    # don't create strange doc types..
                    if ' ' not in normalizedDict['details']['deviceproduct'] and '.' not in normalizedDict['details']['deviceproduct']:
                        metadata['doc_type'] = normalizedDict['details']['deviceproduct']

            try:
                bulk = False
                if options.esbulksize != 0:
                    bulk = True

                self.esConnection.save_event(
                    index=metadata['index'],
                    doc_id=metadata['id'],
                    doc_type=metadata['doc_type'],
                    body=jbody,
                    bulk=bulk
                )

            except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:
                # handle loss of server or race condition with index rotation/creation/aliasing
                try:
                    self.esConnection = esConnect()
                    message.requeue()
                    return
                except kombu.exceptions.MessageStateError:
                    # state may be already set.
                    return
            except ElasticsearchException as e:
                # exception target for queue capacity issues reported by elastic search so catch the error, report it and retry the message
                try:
                    logger.exception('ElasticSearchException while indexing event: %r' % e)
                    logger.error('Malformed message body: %r' % body)
                    message.requeue()
                    return
                except kombu.exceptions.MessageStateError:
                    # state may be already set.
                    return
            # post the dict (kombu serializes it to json) to the events topic queue
            # using the ensure function to shortcut connection/queue drops/stalls, etc.
            # ensurePublish = self.connection.ensure(self.mqproducer, self.mqproducer.publish, max_retries=10)
            # ensurePublish(normalizedDict, exchange=self.topicExchange, routing_key='mozdef.event')
            message.ack()
        except Exception as e:
            logger.exception(e)
            logger.error('Malformed message body: %r' % body)