def verify_defaults(self, result): assert result['category'] == 'suricata' assert result['eventsource'] == 'nsm' assert toUTC(result['receivedtimestamp']).isoformat() == result['receivedtimestamp'] assert result['severity'] == 'INFO' assert 'event_type' in result assert 'source' in result assert toUTC(result['timestamp']).isoformat() == result['timestamp'] assert toUTC(result['utctimestamp']).isoformat() == result['utctimestamp']
def verify_defaults(self, result): assert result['category'] == 'proxy' assert result['customendpoint'] == ' ' assert result['eventsource'] == 'squid' assert toUTC(result['receivedtimestamp']).isoformat( ) == result['receivedtimestamp'] assert result['severity'] == 'INFO'
def authenticate(self): # This value controls how long we sleep # between reauthenticating and getting a new set of creds # eventually this gets set by aws response self.flush_wait_time = 1800 if options.cloudtrail_arn not in [ '<cloudtrail_arn>', 'cloudtrail_arn' ]: client = boto3.client('sts', aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey) response = client.assume_role( RoleArn=options.cloudtrail_arn, RoleSessionName='MozDef-CloudTrail-Reader', ) role_creds = { 'aws_access_key_id': response['Credentials']['AccessKeyId'], 'aws_secret_access_key': response['Credentials']['SecretAccessKey'], 'aws_session_token': response['Credentials']['SessionToken'] } current_time = toUTC(datetime.now()) # Let's remove 3 seconds from the flush wait time just in case self.flush_wait_time = (response['Credentials']['Expiration'] - current_time).seconds - 3 else: role_creds = {} role_creds['region_name'] = options.region self.s3_client = boto3.client('s3', **get_aws_credentials(**role_creds))
def authenticate(self): # This value controls how long we sleep # between reauthenticating and getting a new set of creds # eventually this gets set by aws response self.flush_wait_time = 1800 if options.cloudtrail_arn not in [ "<cloudtrail_arn>", "cloudtrail_arn" ]: client = boto3.client("sts", aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey) response = client.assume_role( RoleArn=options.cloudtrail_arn, RoleSessionName="MozDef-CloudTrail-Reader") role_creds = { "aws_access_key_id": response["Credentials"]["AccessKeyId"], "aws_secret_access_key": response["Credentials"]["SecretAccessKey"], "aws_session_token": response["Credentials"]["SessionToken"], } current_time = toUTC(datetime.now()) # Let's remove 3 seconds from the flush wait time just in case self.flush_wait_time = (response["Credentials"]["Expiration"] - current_time).seconds - 3 else: role_creds = {} role_creds["region_name"] = options.region self.s3_client = boto3.client("s3", **get_aws_credentials(**role_creds))
def wrapper(query: SearchQuery, esindex: str) -> Optional[Entry]: results = query.execute(client, indices=[esindex]).get('hits', []) if len(results) == 0: return None state_dict = results[0].get('_source', {}) try: state_dict['localities'] = [ # Convert dictionary localities into `Locality`s after # parsing the `datetime` from `lastaction`. Locality(**_dict_take( { k: v if k != 'lastaction' else toUTC(v) for k, v in loc.items() }, Locality._fields)) for loc in state_dict['localities'] ] eid = results[0]['_id'] state = State(**_dict_take(state_dict, State._fields)) return Entry(eid, state) except TypeError: return None except KeyError: return None
def sync_alert_schedules(): '''an endpoint to return alerts schedules''' if not request.body: response.status = 503 return response alert_schedules = json.loads(request.body.read()) request.body.close() response.content_type = "application/json" mongoclient = MongoClient(options.mongohost, options.mongoport) schedulers_db = mongoclient.meteor['alertschedules'].with_options( codec_options=CodecOptions(tz_aware=True)) results = schedulers_db.find() for result in results: if result['name'] in alert_schedules: new_sched = alert_schedules[result['name']] result['total_run_count'] = new_sched['total_run_count'] result['last_run_at'] = new_sched['last_run_at'] if result['last_run_at']: result['last_run_at'] = toUTC(result['last_run_at']) logger.debug("Inserting schedule for {0} into mongodb".format( result['name'])) schedulers_db.save(result) response.status = 200 return response
def esCloseIndices(): logger.debug('started') try: es = ElasticsearchClient( (list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() except Exception as e: logger.error( "Unhandled exception while connecting to ES, terminating: %r" % (e)) # examine each index pulled from get_indice # to determine if it meets aging criteria month_ago_date = toUTC( datetime.now()) - timedelta(days=int(options.index_age)) month_ago_date = month_ago_date.replace(tzinfo=None) for index in indices: if 'events' in index: index_date = index.rsplit('-', 1)[1] logger.debug("Checking to see if Index: %s can be closed." % (index)) if len(index_date) == 8: index_date_obj = datetime.strptime(index_date, '%Y%m%d') try: if month_ago_date > index_date_obj: logger.debug("Index: %s will be closed." % (index)) es.close_index(index) else: logger.debug( "Index: %s does not meet aging criteria and will not be closed." % (index)) except Exception as e: logger.error( "Unhandled exception while closing indices, terminating: %r" % (e))
def createAlertDict(self, summary, category, tags, events, severity='NOTICE', url=None, ircchannel=None): """ Create an alert dict """ alert = { 'utctimestamp': toUTC(datetime.now()).isoformat(), 'severity': severity, 'summary': summary, 'category': category, 'tags': tags, 'events': [], 'ircchannel': ircchannel, } if url: alert['url'] = url for e in events: alert['events'].append({ 'documentindex': e['_index'], 'documenttype': e['_type'], 'documentsource': e['_source'], 'documentid': e['_id'] }) self.log.debug(alert) return alert
def test_simple_query(self): objs = [{ 'type_': 'locality', 'username': '******', 'localities': [{ 'sourceipaddress': '1.2.3.4', 'city': 'Toronto', 'country': 'CA', 'lastaction': toUTC(datetime.now()), 'latitude': 43.6529, 'longitude': -79.3849, 'radius': 50 }] }] for obj in objs: self.populate_test_event(obj) self.refresh(self.event_index_name) query_iface = locality.wrap_query(self.es_client) loc_cfg = config.Localities(self.event_index_name, 30, 50.0) entry = locality.find(query_iface, 'tester1', loc_cfg.es_index) assert entry is not None assert entry.state.username == 'tester1'
def createAlertDict( self, summary, category, tags, events, severity="NOTICE", url=None, ircchannel=None, ): """ Create an alert dict """ alert = { "utctimestamp": toUTC(datetime.now()).isoformat(), "severity": severity, "summary": summary, "category": category, "tags": tags, "events": [], "ircchannel": ircchannel, } if url: alert["url"] = url for e in events: alert["events"].append( { "documentindex": e["_index"], "documentsource": e["_source"], "documentid": e["_id"], } ) self.log.debug(alert) return alert
def update_alert_schedules(): '''an endpoint to return alerts schedules''' if not request.body: response.status = 503 return response alert_schedules = json.loads(request.body.read()) request.body.close() response.content_type = "application/json" mongoclient = MongoClient(options.mongohost, options.mongoport) schedulers_db = mongoclient.meteor['alertschedules'].with_options( codec_options=CodecOptions(tz_aware=True)) schedulers_db.remove() for alert_name, alert_schedule in alert_schedules.items(): if alert_schedule['last_run_at']: alert_schedule['last_run_at'] = toUTC( alert_schedule['last_run_at']) logger.debug( "Inserting schedule for {0} into mongodb".format(alert_name)) schedulers_db.insert(alert_schedule) response.status = 200 return response
def onMessage(self, message): try: # default elastic search metadata for an event metadata = {"index": "events", "id": None} event = {} event["receivedtimestamp"] = toUTC(datetime.now()).isoformat() event["mozdefhostname"] = self.options.mozdefhostname event["details"] = json.loads(message.data.decode("UTF-8")) if "tags" in event["details"]: event["tags"] = event["details"]["tags"].extend( [self.options.resource_name]) else: event["tags"] = [self.options.resource_name] event["tags"].extend(["pubsub"]) (event, metadata) = sendEventToPlugins(event, metadata, self.pluginList) # Drop message if plugins set to None if event is None: message.ack() return self.save_event(event, metadata) message.ack() except Exception as e: logger.exception(e) logger.error("Malformed message: %r" % message) message.ack()
def esCloseIndices(): logger.debug('started') try: es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() except Exception as e: logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e)) # examine each index pulled from get_indice # to determine if it meets aging criteria month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age)) month_ago_date = month_ago_date.replace(tzinfo=None) for index in indices: if 'events' in index: index_date = index.rsplit('-', 1)[1] logger.debug("Checking to see if Index: %s can be closed." % (index)) if len(index_date) == 8: index_date_obj = datetime.strptime(index_date, '%Y%m%d') try: if month_ago_date > index_date_obj: logger.debug("Index: %s will be closed." % (index)) es.close_index(index) else: logger.debug("Index: %s does not meet aging criteria and will not be closed." % (index)) except Exception as e: logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
def esPruneIndexes(): if options.output == 'syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport))) else: sh = logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') try: es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) indices = es.get_indices() # do the pruning for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning): try: if pruning != '0': index_to_prune = index if rotation == 'daily': idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d') index_to_prune += '-%s' % idate elif rotation == 'monthly': idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m') index_to_prune += '-%s' % idate if index_to_prune in indices: logger.debug('Deleting index: %s' % index_to_prune) es.delete_index(index_to_prune, True) else: logger.error('Error deleting index %s, index missing' % index_to_prune) except Exception as e: logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e)) except Exception as e: logger.error("Unhandled exception, terminating: %r" % e)
def getQueueSizes(): logger.debug('starting') logger.debug(options) es = ElasticsearchClient(options.esservers) sqs_client = boto3.client("sqs", region_name=options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey) queues_stats = { 'queues': [], 'total_feeds': len(options.taskexchange), 'total_messages_ready': 0, 'username': '******' } for queue_name in options.taskexchange: logger.debug('Looking for sqs queue stats in queue' + queue_name) queue_url = sqs_client.get_queue_url(QueueName=queue_name)['QueueUrl'] queue_attributes = sqs_client.get_queue_attributes( QueueUrl=queue_url, AttributeNames=['All'])['Attributes'] queue_stats = { 'queue': queue_name, } if 'ApproximateNumberOfMessages' in queue_attributes: queue_stats['messages_ready'] = int( queue_attributes['ApproximateNumberOfMessages']) queues_stats['total_messages_ready'] += queue_stats[ 'messages_ready'] if 'ApproximateNumberOfMessagesNotVisible' in queue_attributes: queue_stats['messages_inflight'] = int( queue_attributes['ApproximateNumberOfMessagesNotVisible']) if 'ApproximateNumberOfMessagesDelayed' in queue_attributes: queue_stats['messages_delayed'] = int( queue_attributes['ApproximateNumberOfMessagesDelayed']) queues_stats['queues'].append(queue_stats) # setup a log entry for health/status. sqsid = '{0}-{1}'.format(options.account, options.region) healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(), hostname=sqsid, processid=os.getpid(), processname=sys.argv[0], severity='INFO', summary='mozdef health/status', category='mozdef', source='aws-sqs', tags=[], details=queues_stats) healthlog['tags'] = ['mozdef', 'status', 'sqs'] healthlog['type'] = 'mozdefhealth' # post to elasticsearch servers directly without going through # message queues in case there is an availability issue es.save_event(index=options.index, body=json.dumps(healthlog)) # post another doc with a static docid and tag # for use when querying for the latest sqs status healthlog['tags'] = ['mozdef', 'status', 'sqs-latest'] es.save_event(index=options.index, doc_id=getDocID(sqsid), body=json.dumps(healthlog))
def on_message(self, body, message): try: # just to be safe..check what we were sent. if isinstance(body, dict): body_dict = body elif isinstance(body, str) or isinstance(body, unicode): try: body_dict = json.loads(body) # lets assume it's json except ValueError as e: # not json..ack but log the message logger.exception("mozdefbot_slack exception: unknown body type received %r" % body) return else: logger.exception("mozdefbot_slack exception: unknown body type received %r" % body) return if 'notify_mozdefbot' in body_dict and body_dict['notify_mozdefbot'] is False: # If the alert tells us to not notify, then don't post message message.ack() return # process valid message # see where we send this alert channel = options.default_alert_channel if 'ircchannel' in body_dict: if body_dict['ircchannel'] in options.channels: channel = body_dict['ircchannel'] # see if we need to delay a bit before sending the alert, to avoid # flooding the channel if self.lastalert is not None: delta = toUTC(datetime.now()) - self.lastalert logger.info('new alert, delta since last is {}\n'.format(delta)) if delta.seconds < 2: logger.info('throttling before writing next alert\n') time.sleep(1) self.lastalert = toUTC(datetime.now()) if len(body_dict['summary']) > 450: logger.info('alert is more than 450 bytes, truncating\n') body_dict['summary'] = body_dict['summary'][:450] + ' truncated...' logger.info("Posting alert: {0}".format(body_dict['summary'])) self.bot.post_alert_message(body_dict, channel) message.ack() except ValueError as e: logger.exception("mozdefbot_slack exception while processing events queue %r" % e)
def _retrieve_duplicate_chain(api: RESTConfig, label: AlertLabel, email: Email) -> types.Optional[DuplicateChain]: url = "{}/alerttriagechain".format(api.url) payload = { "alert": label.value, "user": email, } jwt_auth = None if api.token is not None: jwt_auth = JWTAuth(api.token) jwt_auth.set_header_format("Bearer %s") try: resp = requests.get(url, params=payload, auth=jwt_auth) resp_data = resp.json() except json.JSONDecodeError as ex: raise APIError("Did not receive JSON response: {}".format(ex)) except requests.exceptions.RequestException as ex: raise APIError("Failed to make request: {}".format(ex)) error = resp_data.get("error") if error is not None: if resp.status_code != 200: raise APIError(error) return None # No duplicate chain found ids = resp_data.get("identifiers", []) if len(ids) == 0: return None try: created = toUTC(resp_data["created"]) modified = toUTC(resp_data["modified"]) except KeyError: raise APIError( "Duplicate chain data missing created or modified field") except ValueError: raise APIError("Duplicate chain data contains unexpected timestamps") return DuplicateChain(ids, created, modified)
def on_message(self, body, message): try: # just to be safe..check what we were sent. if isinstance(body, dict): body_dict = body elif isinstance(body, str): try: body_dict = json.loads(body) # lets assume it's json except ValueError as e: # not json..ack but log the message logger.exception("mozdefbot_slack exception: unknown body type received %r" % body) return else: logger.exception("mozdefbot_slack exception: unknown body type received %r" % body) return if 'notify_mozdefbot' in body_dict and body_dict['notify_mozdefbot'] is False: # If the alert tells us to not notify, then don't post message message.ack() return # process valid message # see where we send this alert channel = options.default_alert_channel if 'ircchannel' in body_dict: if body_dict['ircchannel'] in options.channels: channel = body_dict['ircchannel'] # see if we need to delay a bit before sending the alert, to avoid # flooding the channel if self.lastalert is not None: delta = toUTC(datetime.now()) - self.lastalert logger.info('new alert, delta since last is {}\n'.format(delta)) if delta.seconds < 2: logger.info('throttling before writing next alert\n') time.sleep(1) self.lastalert = toUTC(datetime.now()) if len(body_dict['summary']) > 450: logger.info('alert is more than 450 bytes, truncating\n') body_dict['summary'] = body_dict['summary'][:450] + ' truncated...' logger.info("Posting alert: {0}".format(body_dict['summary'])) self.bot.post_alert_message(body_dict, channel) message.ack() except ValueError as e: logger.exception("mozdefbot_slack exception while processing events queue %r" % e)
def run(self): while True: try: curRequestTime = toUTC( datetime.now()) - timedelta(seconds=options.ptbackoff) records = self.ptrequestor.request(options.ptquery, self.lastRequestTime, curRequestTime) # update last request time for the next request self.lastRequestTime = curRequestTime for msgid in records: msgdict = records[msgid] # strip any line feeds from the message itself, we just convert them # into spaces msgdict["message"] = msgdict["message"].replace( "\n", " ").replace("\r", "") event = dict() event["tags"] = ["papertrail", options.ptacctname] event["details"] = msgdict if "generated_at" in event["details"]: event["utctimestamp"] = toUTC( event["details"]["generated_at"]).isoformat() if "hostname" in event["details"]: event["hostname"] = event["details"]["hostname"] if "message" in event["details"]: event["summary"] = event["details"]["message"] if "severity" in event["details"]: event["severity"] = event["details"]["severity"] if "source_ip" in event["details"]: event["sourceipaddress"] = event["details"][ "source_ip"] else: event["severity"] = "INFO" event["category"] = "syslog" # process message self.on_message(event, msgdict) time.sleep(options.sleep_time) except ValueError as e: logger.exception("Exception while handling message: %r" % e)
def run(self): while True: try: curRequestTime = toUTC( datetime.now()) - timedelta(seconds=options.ptbackoff) records = self.ptrequestor.request(options.ptquery, self.lastRequestTime, curRequestTime) # update last request time for the next request self.lastRequestTime = curRequestTime for msgid in records: msgdict = records[msgid] # strip any line feeds from the message itself, we just convert them # into spaces msgdict['message'] = msgdict['message'].replace( '\n', ' ').replace('\r', '') event = dict() event['tags'] = ['papertrail', options.ptacctname] event['details'] = msgdict if 'generated_at' in event['details']: event['utctimestamp'] = toUTC( event['details']['generated_at']).isoformat() if 'hostname' in event['details']: event['hostname'] = event['details']['hostname'] if 'message' in event['details']: event['summary'] = event['details']['message'] if 'severity' in event['details']: event['severity'] = event['details']['severity'] if 'source_ip' in event['details']: event['sourceipaddress'] = event['details'][ 'source_ip'] else: event['severity'] = 'INFO' event['category'] = 'syslog' # process message self.on_message(event, msgdict) time.sleep(options.ptinterval) except ValueError as e: logger.exception('Exception while handling message: %r' % e)
def main(self): search_query = SearchQuery(hours=6) day_old_date = toUTC(datetime.now() - timedelta(days=1)).isoformat() search_query.add_must(LessThanMatch('utctimestamp', day_old_date)) self.filtersManual(search_query) self.searchEventsAggregated('mozdefhostname', samplesLimit=1000) self.walkAggregations(threshold=1)
def auth_headers(self): headers = {} utcnow = toUTC(datetime.now()) date = utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT") authVar = jwt.encode({'iss': self.key}, self.secret, algorithm='HS256') authorization = "Bearer %s" % (authVar.decode('utf-8')) headers['date'] = date headers['Authorization'] = authorization return headers
def genNewAttacker(): newAttacker = dict() newAttacker['_id'] = genMeteorID() newAttacker['lastseentimestamp'] = toUTC(datetime.now()) newAttacker['firstseentimestamp'] = toUTC(datetime.now()) newAttacker['eventscount'] = 0 newAttacker['alerts'] = list() newAttacker['alertscount'] = 0 newAttacker['category'] = 'unknown' newAttacker['score'] = 0 newAttacker['geocoordinates'] = dict(countrycode='', longitude=0, latitude=0) newAttacker['tags'] = list() newAttacker['notes'] = list() newAttacker['indicators'] = list() newAttacker['attackphase'] = 'unknown' newAttacker['datecreated'] = toUTC(datetime.now()) newAttacker['creator'] = sys.argv[0] return newAttacker
def __init__(self, ptRequestor, esConnection): self.ptrequestor = ptRequestor self.esConnection = esConnection # calculate our initial request window self.lastRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptinterval) - \ timedelta(seconds=options.ptbackoff) if options.esbulksize != 0: # if we are bulk posting enable a timer to occasionally flush the bulker even if it's not full # to prevent events from sticking around an idle worker self.esConnection.start_bulk_timer()
def handle_event(event): timestamp = toUTC(datetime.now()).isoformat() event['timestamp'] = timestamp event['receivedtimestamp'] = timestamp event['utctimestamp'] = timestamp # add demo to the tags so it's clear it's not real data. if 'tags' not in event: event['tags'] = list() event['tags'] += 'demodata' return event
def new(executed_at: Optional[datetime]=None) -> 'ExecutionState': '''Construct a new `ExecutionState` representing the execution of an alert at a specific time. By default, the execution time will be set to when this function is called if not explicitly provided. ''' if executed_at is None: executed_at = toUTC(datetime.now()) return ExecutionState(_TYPE_NAME, executed_at)
def test_add_required_fields_default(self): mock_class = MockHostname() socket.gethostname = mock_class.hostname self.event.add_required_fields() assert self.event['receivedtimestamp'] is not None assert toUTC(self.event['receivedtimestamp']).isoformat() == self.event['receivedtimestamp'] assert self.event['utctimestamp'] is not None assert toUTC(self.event['utctimestamp']).isoformat() == self.event['utctimestamp'] assert self.event['timestamp'] is not None assert toUTC(self.event['timestamp']).isoformat() == self.event['timestamp'] assert self.event['mozdefhostname'] == 'randomhostname' assert self.event['tags'] == [] assert self.event['category'] == 'UNKNOWN' assert self.event['hostname'] == 'UNKNOWN' assert self.event['processid'] == 'UNKNOWN' assert self.event['processname'] == 'UNKNOWN' assert self.event['severity'] == 'UNKNOWN' assert self.event['source'] == 'UNKNOWN' assert self.event['summary'] == 'example summary' assert self.event['tags'] == [] assert self.event['details'] == {}
def execute(self, elasticsearch_client, indices=['events', 'events-previous'], size=1000, request_timeout=30): if self.must == [] and self.must_not == [] and self.should == [] and self.aggregation == []: raise AttributeError('Must define a must, must_not, should query, or aggregation') if self.date_timedelta: end_date = toUTC(datetime.now()) begin_date = toUTC(datetime.now() - timedelta(**self.date_timedelta)) utc_range_query = RangeMatch('utctimestamp', begin_date, end_date) received_range_query = RangeMatch('receivedtimestamp', begin_date, end_date) range_query = utc_range_query | received_range_query self.add_must(range_query) search_query = None search_query = BooleanMatch(must=self.must, must_not=self.must_not, should=self.should) results = [] if len(self.aggregation) == 0: results = elasticsearch_client.search(search_query, indices, size, request_timeout) else: results = elasticsearch_client.aggregated_search(search_query, indices, self.aggregation, size, request_timeout) return results
def execute(self, elasticsearch_client, indices=['events-*'], size=1000, request_timeout=30): if self.must == [] and self.must_not == [] and self.should == [] and self.aggregation == []: raise AttributeError('Must define a must, must_not, should query, or aggregation') if self.date_timedelta: end_date = toUTC(datetime.now()) begin_date = toUTC(datetime.now() - timedelta(**self.date_timedelta)) utc_range_query = RangeMatch('utctimestamp', begin_date, end_date) received_range_query = RangeMatch('receivedtimestamp', begin_date, end_date) range_query = utc_range_query | received_range_query self.add_must(range_query) search_query = None search_query = BooleanMatch(must=self.must, must_not=self.must_not, should=self.should) results = [] if len(self.aggregation) == 0: results = elasticsearch_client.search(search_query, indices, size, request_timeout) else: results = elasticsearch_client.aggregated_search(search_query, indices, self.aggregation, size, request_timeout) return results
def setup(self): super().setup() index = 'localities' if self.config_delete_indexes: self.es_client.delete_index(index, True) self.es_client.create_index(index) journal = geomodel.wrap_journal(self.es_client) def state(username, locs): return geomodel.State('locality', username, locs) def locality(cfg): return geomodel.Locality(**cfg) test_state = state('tester1', [ locality({ 'sourceipaddress': '53.12.88.76', 'city': 'Portland', 'country': 'US', 'lastaction': toUTC(datetime.now()) - timedelta(minutes=2), 'latitude': 45.5234, 'longitude': -122.6762, 'radius': 50, }), locality({ 'sourceipaddress': '1.2.3.4', 'city': 'Portland', 'country': 'US', 'lastaction': toUTC(datetime.now()) - timedelta(minutes=3), 'latitude': 45.5234, 'longitude': -122.6762, 'radius': 50, }) ]) journal(geomodel.Entry.new(test_state), index) self.refresh(index)
def run(self): while True: try: curRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptbackoff) records = self.ptrequestor.request(options.ptquery, self.lastRequestTime, curRequestTime) # update last request time for the next request self.lastRequestTime = curRequestTime for msgid in records: msgdict = records[msgid] # strip any line feeds from the message itself, we just convert them # into spaces msgdict['message'] = msgdict['message'].replace('\n', ' ').replace('\r', '') event = dict() event['tags'] = ['papertrail', options.ptacctname] event['details'] = msgdict if 'generated_at' in event['details']: event['utctimestamp'] = toUTC(event['details']['generated_at']).isoformat() if 'hostname' in event['details']: event['hostname'] = event['details']['hostname'] if 'message' in event['details']: event['summary'] = event['details']['message'] if 'severity' in event['details']: event['severity'] = event['details']['severity'] if 'source_ip' in event['details']: event['sourceipaddress'] = event['details']['source_ip'] else: event['severity'] = 'INFO' event['category'] = 'syslog' # process message self.on_message(event, msgdict) time.sleep(options.ptinterval) except ValueError as e: logger.exception('Exception while handling message: %r' % e)
def convert_key_date_format(self, needle, haystack): num_levels = needle.split(".") if len(num_levels) == 0: return False current_pointer = haystack for updated_key in num_levels: if updated_key == num_levels[-1]: current_pointer[updated_key] = toUTC( current_pointer[updated_key]).isoformat() return haystack if updated_key in current_pointer: current_pointer = current_pointer[updated_key] else: return haystack
class TestAlreadyProcessedEvents(GeoModelTest): alert_filename = 'geomodel_location' alert_classname = 'AlertGeoModel' default_event = { '_source': { 'details': { 'sourceipaddress': '1.2.3.4', 'username': '******', 'sourceipgeolocation': { 'city': 'Portland', 'country_code': 'US', 'latitude': 45.5234, 'longitude': -122.6762, }, }, 'tags': ['auth0'], } } events = [AlertTestSuite.create_event(default_event)] events[0]['_source'][ 'utctimestamp' ] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 4}) test_cases = [ NegativeAlertTestCase( description='Should not fire if encounters older events than state file', events=events, ) ] test_states = [ state( 'tester1', [ locality( { 'sourceipaddress': '1.2.3.4', 'city': 'Portland', 'country': 'US', 'lastaction': toUTC(datetime.now()), 'latitude': 45.5234, 'longitude': -122.6762, 'radius': 50, } ) ], ) ]
def enrich( alert: dict, search_window_hours: int, vpn_ip_cidrs: types.List[str], search_fn: types.Callable[[SearchQuery], types.List[dict]], ) -> dict: '''Search for events that describe an assignment of a VPN IP address to the sourceipaddress in an alert. ''' details = alert.get('details', {}) source_ip = details.get('sourceipaddress') if source_ip is None: return alert if netaddr.IPAddress(source_ip) not in netaddr.IPSet(vpn_ip_cidrs): return alert search_vpn_assignment = SearchQuery({ 'hours': search_window_hours, }) search_vpn_assignment.add_must([ TermMatch('tags', 'vpn'), TermMatch('tags', 'netfilter'), TermMatch('details.success', 'true'), TermMatch('details.vpnip', source_ip), PhraseMatch('summary', 'netfilter add upon connection'), ]) assign_events = sorted( [hit.get('_source', {}) for hit in search_fn(search_vpn_assignment)], key=lambda evt: toUTC(evt['utctimestamp']), reverse=True, # Sort into descending order from most recent to least. ) if len(assign_events) == 0: return alert event = assign_events[0] details['vpnassignment'] = { 'username': event['details']['username'], 'originalip': event['details']['sourceipaddress'], } alert['details'] = details return alert
def test_do_not_alert_when_travel_possible(self): evts = [ locality.Locality( sourceipaddress='1.2.3.123', city='Toronto', country='CA', lastaction=toUTC(datetime.now()) - timedelta(minutes=5), latitude=43.6529, longitude=-79.3849, radius=50), locality.Locality( sourceipaddress='123.3.2.1', city='San Francisco', country='US', lastaction=toUTC(datetime.now()) - timedelta(hours=10), latitude=37.773972, longitude=-122.431297, radius=50) ] alert_produced = alert('tester1', evts, [], 'INFO') assert alert_produced is None
def process_alerts(mozmsg, uptycs_alerts): for alert in uptycs_alerts: details = {} mozmsg.timestamp = toUTC(alert["alertTime"]).isoformat() mozmsg.log["hostname"] = alert["asset"]['hostName'] for item in alert: details[item] = alert[item] localdetails = normalize(details) mozmsg.details = localdetails mozmsg.summary = ("{} severity {} on {}".format( alert['severity'], alert['displayName'], alert['asset']['hostName'])) mozmsg.send()
def updateMongo(mozdefdb, esAlerts): alerts = mozdefdb['alerts'] for a in esAlerts['hits']: # insert alert into mongo if we don't already have it alertrecord = alerts.find_one({'esmetadata.id': a['_id']}) if alertrecord is None: # new record mrecord = a['_source'] # generate a meteor-compatible ID mrecord['_id'] = genMeteorID() # capture the elastic search meta data (index/id/type) # set the date back to a datetime from unicode, so mongo/meteor can properly sort, select. mrecord['utctimestamp']=toUTC(mrecord['utctimestamp']) # also set an epoch time field so minimongo can sort mrecord['utcepoch'] = calendar.timegm(mrecord['utctimestamp'].utctimetuple()) mrecord['esmetadata'] = dict() mrecord['esmetadata']['id'] = a['_id'] mrecord['esmetadata']['index'] = a['_index'] alerts.insert(mrecord)
def esSearch(es): search_query = SearchQuery(minutes=options.aggregationminutes) search_query.add_aggregation(Aggregation('category')) results = search_query.execute(es) mozdefstats = dict(utctimestamp=toUTC(datetime.now()).isoformat()) mozdefstats['category'] = 'stats' mozdefstats['hostname'] = socket.gethostname() mozdefstats['mozdefhostname'] = mozdefstats['hostname'] mozdefstats['severity'] = 'INFO' mozdefstats['source'] = 'mozdef' mozdefstats['tags'] = ['mozdef', 'stats'] mozdefstats['summary'] = 'Aggregated category counts' mozdefstats['processid'] = os.getpid() mozdefstats['processname'] = sys.argv[0] mozdefstats['details'] = dict(counts=list()) for bucket in results['aggregations']['category']['terms']: entry = dict() entry[bucket['key']] = bucket['count'] mozdefstats['details']['counts'].append(entry) return mozdefstats
def createAlertDict(self, summary, category, tags, events, severity='NOTICE', url=None, ircchannel=None): """ Create an alert dict """ alert = { 'utctimestamp': toUTC(datetime.now()).isoformat(), 'severity': severity, 'summary': summary, 'category': category, 'tags': tags, 'events': [], 'ircchannel': ircchannel, } if url: alert['url'] = url for e in events: alert['events'].append({ 'documentindex': e['_index'], 'documentsource': e['_source'], 'documentid': e['_id']}) self.log.debug(alert) return alert
def onMessage(self, message, metadata): if 'tags' not in message: return (message, metadata) if 'githubeventsqs' not in message['tags']: return (message, metadata) newmessage = {} newmessage['details'] = {} newmessage['category'] = 'github' newmessage['tags'] = ['github', 'webhook'] newmessage['eventsource'] = 'githubeventsqs' if 'event' in message['details']: newmessage['source'] = message['details']['event'] else: newmessage['source'] = 'UNKNOWN' if 'request_id' in message['details']: newmessage['details']['request_id'] = message['details']['request_id'] else: newmessage['details']['request_id'] = 'UNKNOWN' # iterate through top level keys - push, etc if newmessage['source'] in self.eventtypes: for key in self.yap[newmessage['source']]: mappedvalue = jmespath.search(self.yap[newmessage['source']][key], message) # JMESPath likes to silently return a None object if mappedvalue is not None: newmessage['details'][key] = mappedvalue if 'commit_ts' in newmessage['details']: newmessage['timestamp'] = newmessage['details']['commit_ts'] newmessage['utctimestamp'] = toUTC(newmessage['details']['commit_ts']).isoformat() else: newmessage = None return (newmessage, metadata)
def onMessage(self, message, metadata): # make sure I really wanted to see this message # bail out early if not if u'customendpoint' not in message: return message, metadata if u'category' not in message: return message, metadata if message['category'] != 'suricata': return message, metadata # move Suricata specific fields under 'details' while preserving metadata newmessage = dict() # Set NSM as type for categorical filtering of events. newmessage["type"] = "nsm" try: newmessage['details'] = json.loads(message['message']) except: newmessage['details'] = {} newmessage['rawdetails'] = message # move some fields that are expected at the event 'root' where they belong if 'host_from' in message: newmessage['hostname'] = message['host_from'] if 'tags' in message: newmessage['tags'] = message['tags'] if 'category' in message: newmessage['category'] = message['category'] newmessage[u'source'] = u'unknown' if 'source' in message: newmessage[u'source'] = message['source'] logtype = newmessage['source'] newmessage[u'event_type'] = u'unknown' if 'event_type' in message: newmessage[u'event_type'] = message['event_type'] eventtype = newmessage['event_type'] # add mandatory fields if 'flow' in newmessage['details']: if 'start' in newmessage['details']['flow']: newmessage[u'utctimestamp'] = toUTC(newmessage['details']['flow']['start']).isoformat() newmessage[u'timestamp'] = toUTC(newmessage['details']['flow']['start']).isoformat() else: # a malformed message somehow managed to crawl to us, let's put it somewhat together newmessage[u'utctimestamp'] = toUTC(datetime.now()).isoformat() newmessage[u'timestamp'] = toUTC(datetime.now()).isoformat() newmessage[u'receivedtimestamp'] = toUTC(datetime.now()).isoformat() newmessage[u'eventsource'] = u'nsm' newmessage[u'severity'] = u'INFO' newmessage[u'mozdefhostname'] = self.mozdefhostname if 'details' in newmessage: newmessage[u'details'][u'sourceipaddress'] = "0.0.0.0" newmessage[u'details'][u'destinationipaddress'] = "0.0.0.0" newmessage[u'details'][u'sourceport'] = 0 newmessage[u'details'][u'destinationport'] = 0 if 'alert' in newmessage[u'details']: newmessage[u'details'][u'suricata_alert'] = newmessage[u'details'][u'alert'] del(newmessage[u'details'][u'alert']) if 'src_ip' in newmessage['details']: newmessage[u'details'][u'sourceipaddress'] = newmessage['details']['src_ip'] del(newmessage['details']['src_ip']) if 'src_port' in newmessage['details']: newmessage[u'details'][u'sourceport'] = newmessage['details']['src_port'] del(newmessage['details']['src_port']) if 'dest_ip' in newmessage['details']: newmessage[u'details'][u'destinationipaddress'] = newmessage['details']['dest_ip'] del(newmessage['details']['dest_ip']) if 'dest_port' in newmessage['details']: newmessage[u'details'][u'destinationport'] = newmessage['details']['dest_port'] del(newmessage['details']['dest_port']) if 'file_name' in newmessage['details']: del(newmessage['details']['file_name']) if 'message' in newmessage['details']: del(newmessage['details']['message']) if 'source' in newmessage['details']: del(newmessage['details']['source']) if logtype == 'eve-log': if eventtype == 'alert': # Truncate packet, payload and payload_printable to reasonable sizes if 'packet' in newmessage[u'details']: newmessage[u'details'][u'packet'] = newmessage[u'details'][u'packet'][0:4095] if 'payload' in newmessage[u'details']: newmessage[u'details'][u'payload'] = newmessage[u'details'][u'payload'][0:4095] if 'payload_printable' in newmessage[u'details']: newmessage[u'details'][u'payload_printable'] = newmessage[u'details'][u'payload_printable'][0:4095] # Match names to Bro newmessage[u'details'][u'originipbytes'] = 0 newmessage[u'details'][u'responseipbytes'] = 0 newmessage[u'details'][u'orig_pkts'] = 0 newmessage[u'details'][u'resp_pkts'] = 0 if 'flow' in newmessage[u'details']: if 'bytes_toserver' in newmessage[u'details'][u'flow']: newmessage[u'details'][u'originipbytes'] = newmessage['details']['flow']['bytes_toserver'] del(newmessage['details']['flow']['bytes_toserver']) if 'bytes_toclient' in newmessage[u'details'][u'flow']: newmessage[u'details'][u'responseipbytes'] = newmessage['details']['flow']['bytes_toclient'] del(newmessage['details']['flow']['bytes_toclient']) if 'pkts_toserver' in newmessage[u'details'][u'flow']: newmessage[u'details'][u'orig_pkts'] = newmessage['details']['flow']['pkts_toserver'] del(newmessage['details']['flow']['pkts_toserver']) if 'pkts_toclient' in newmessage[u'details'][u'flow']: newmessage[u'details'][u'resp_pkts'] = newmessage['details']['flow']['pkts_toclient'] del(newmessage['details']['flow']['pkts_toclient']) if 'http' in newmessage[u'details']: if 'hostname' in newmessage[u'details'][u'http']: newmessage[u'details'][u'host'] = newmessage[u'details'][u'http'][u'hostname'] del(newmessage[u'details'][u'http'][u'hostname']) if 'http_method' in newmessage[u'details'][u'http']: newmessage[u'details'][u'method'] = newmessage[u'details'][u'http'][u'http_method'] del(newmessage[u'details'][u'http'][u'http_method']) if 'http_user_agent' in newmessage[u'details'][u'http']: newmessage[u'details'][u'user_agent'] = newmessage[u'details'][u'http'][u'http_user_agent'] del(newmessage[u'details'][u'http'][u'http_user_agent']) if 'status' in newmessage[u'details'][u'http']: newmessage[u'details'][u'status_code'] = newmessage[u'details'][u'http'][u'status'] del(newmessage[u'details'][u'http'][u'status']) if 'url' in newmessage[u'details'][u'http']: newmessage[u'details'][u'uri'] = newmessage[u'details'][u'http'][u'url'] del(newmessage[u'details'][u'http'][u'url']) if 'redirect' in newmessage[u'details'][u'http']: newmessage[u'details'][u'redirect_dst'] = newmessage[u'details'][u'http'][u'redirect'] del(newmessage[u'details'][u'http'][u'redirect']) if 'length' in newmessage[u'details'][u'http']: newmessage[u'details'][u'request_body_len'] = newmessage[u'details'][u'http'][u'length'] del(newmessage[u'details'][u'http'][u'length']) if 'http_response_body' in newmessage[u'details'][u'http']: newmessage[u'details'][u'http_response_body'] = newmessage[u'details'][u'http'][u'http_response_body'][0:4095] del(newmessage[u'details'][u'http'][u'http_response_body']) if 'http_response_body_printable' in newmessage[u'details'][u'http']: newmessage[u'details'][u'http_response_body_printable'] = newmessage[u'details'][u'http'][u'http_response_body_printable'][0:4095] del(newmessage[u'details'][u'http'][u'http_response_body_printable']) if 'app_proto' in newmessage[u'details']: newmessage['details']['service'] = newmessage['details']['app_proto'] del(newmessage['details']['app_proto']) # Make sure details.vars.flowbits exceptions are handled if 'vars' in newmessage['details']: if 'flowbits' in newmessage['details']['vars']: if 'ET.http.javaclient' in newmessage['details']['vars']['flowbits']: if 'ET.http.javaclient.vulnerable': del(newmessage['details']['vars']['flowbits']['ET.http.javaclient']) newmessage['details']['vars']['flowbits']['ET.http.javaclient.vulnerable'] = "True" newmessage[u'summary'] = ( u'{sourceipaddress}:'+ u'{sourceport} -> '+ u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) return (newmessage, metadata)
def searchMongoAlerts(mozdefdb): attackers = mozdefdb['attackers'] alerts = mozdefdb['alerts'] # search the last X alerts for IP addresses # aggregated by CIDR mask/24 # aggregate IPv4 addresses in the most recent alerts # to find common attackers. ipv4TopHits = alerts.aggregate([ # reverse sort the current alerts {"$sort": {"utcepoch": -1}}, # most recent 100 {"$limit": 100}, # must have an ip address {"$match": {"events.documentsource.details.sourceipaddress": {"$exists": True}}}, # must not be already related to an attacker {"$match": {"attackerid": {"$exists": False}}}, # make each event into it's own doc {"$unwind": "$events"}, {"$project": { "_id": 0, # emit the source ip only "sourceip": "$events.documentsource.details.sourceipaddress" }}, # count by ip {"$group": {"_id": "$sourceip", "hitcount": {"$sum": 1}}}, # limit to those with X observances {"$match": {"hitcount": {"$gt": options.ipv4attackerhitcount}}}, # sort {"$sort": SON([("hitcount", -1), ("_id", -1)])}, # top 10 {"$limit": 10} ]) for ip in ipv4TopHits: # sanity check ip['_id'] which should be the ipv4 address if isIPv4(ip['_id']) and ip['_id'] not in netaddr.IPSet(['0.0.0.0']): ipcidr = netaddr.IPNetwork(ip['_id']) # set CIDR # todo: lookup ipwhois for asn_cidr value # potentially with a max mask value (i.e. asn is /8, limit attackers to /24) ipcidr.prefixlen = options.ipv4attackerprefixlength # append to or create attacker. # does this match an existing attacker's indicators if not ipcidr.ip.is_loopback() and not ipcidr.ip.is_private() and not ipcidr.ip.is_reserved(): logger.debug('Searching for existing attacker with ip ' + str(ipcidr)) attacker = attackers.find_one({'indicators.ipv4address': str(ipcidr)}) if attacker is None: logger.debug('Attacker not found, creating new one') # new attacker # generate a meteor-compatible ID # save the ES document type, index, id newAttacker = genNewAttacker() # str to get the ip/cidr rather than netblock cidr. # i.e. '1.2.3.4/24' not '1.2.3.0/24' newAttacker['indicators'].append(dict(ipv4address=str(ipcidr))) matchingalerts = alerts.find( {"events.documentsource.details.sourceipaddress": str(ipcidr.ip), }) total_events = 0 if matchingalerts is not None: # update list of alerts this attacker matched. for alert in matchingalerts: newAttacker['alerts'].append( dict(alertid=alert['_id']) ) # update alert with attackerID alert['attackerid'] = newAttacker['_id'] alerts.save(alert) total_events += len(alert['events']) if len(alert['events']) > 0: newAttacker['lastseentimestamp'] = toUTC(alert['events'][-1]['documentsource']['utctimestamp']) newAttacker['alertscount'] = len(newAttacker['alerts']) newAttacker['eventscount'] = total_events attackers.insert(newAttacker) # update geoIP info latestGeoIP = [a['events'] for a in alerts.find( {"events.documentsource.details.sourceipaddress": str(ipcidr.ip), })][-1][0]['documentsource'] updateAttackerGeoIP(mozdefdb, newAttacker['_id'], latestGeoIP) if options.broadcastattackers: broadcastAttacker(newAttacker) else: logger.debug('Found existing attacker') # if alert not present in this attackers list # append this to the list # todo: trim the list at X (i.e. last 100) # search alerts without attackerid matchingalerts = alerts.find( {"events.documentsource.details.sourceipaddress": str(ipcidr.ip), "attackerid":{"$exists": False} }) if matchingalerts is not None: logger.debug('Matched alert with attacker') # update list of alerts this attacker matched. for alert in matchingalerts: attacker['alerts'].append( dict(alertid=alert['_id']) ) # update alert with attackerID alert['attackerid'] = attacker['_id'] alerts.save(alert) attacker['eventscount'] += len(alert['events']) attacker['lastseentimestamp'] = toUTC(alert['events'][-1]['documentsource']['utctimestamp']) # geo ip could have changed, update it to the latest updateAttackerGeoIP(mozdefdb, attacker['_id'], alert['events'][-1]['documentsource']) # update counts attacker['alertscount'] = len(attacker['alerts']) attackers.save(attacker) # should we autocategorize the attacker # based on their alerts? if attacker['category'] == 'unknown' and options.autocategorize: # take a look at recent alerts for this attacker # and if they are all the same category # auto-categorize the attacker matchingalerts = alerts.find( {"attackerid": attacker['_id']} ).sort('utcepoch', -1).limit(50) # summarize the alert categories # returns list of tuples: [(u'bruteforce', 8)] categoryCounts= mostCommon(matchingalerts,'category') # are the alerts all the same category? if len(categoryCounts) == 1: # is the alert category mapped to an attacker category? for category in options.categorymapping: if category.keys()[0] == categoryCounts[0][0]: attacker['category'] = category[category.keys()[0]] attackers.save(attacker)
def keyMapping(aDict): '''map common key/fields to a normalized structure, explicitly typed when possible to avoid schema changes for upsteam consumers Special accomodations made for logstash,nxlog, beaver, heka and CEF Some shippers attempt to conform to logstash-style @fieldname convention. This strips the leading at symbol since it breaks some elastic search libraries like elasticutils. ''' returndict = dict() # uncomment to save the source event for debugging, or chain of custody/forensics # returndict['original']=aDict # set the timestamp when we received it, i.e. now returndict['receivedtimestamp'] = toUTC(datetime.now()).isoformat() returndict['mozdefhostname'] = options.mozdefhostname returndict[u'details'] = {} try: for k, v in aDict.iteritems(): k = removeAt(k).lower() if k == 'sourceip': returndict[u'details']['eventsourceipaddress'] = v if k in ('facility', 'source'): returndict[u'source'] = v if k in ('message', 'summary'): returndict[u'summary'] = toUnicode(v) if k in ('payload') and 'summary' not in aDict: # special case for heka if it sends payload as well as a summary, keep both but move payload to the details section. returndict[u'summary'] = toUnicode(v) elif k in ('payload'): returndict[u'details']['payload'] = toUnicode(v) if k in ('eventtime', 'timestamp', 'utctimestamp', 'date'): returndict[u'utctimestamp'] = toUTC(v).isoformat() returndict[u'timestamp'] = toUTC(v).isoformat() if k in ('hostname', 'source_host', 'host'): returndict[u'hostname'] = toUnicode(v) if k in ('tags'): if 'tags' not in returndict: returndict[u'tags'] = [] if type(v) == list: returndict[u'tags'] += v else: if len(v) > 0: returndict[u'tags'].append(v) # nxlog keeps the severity name in syslogseverity,everyone else should use severity or level. if k in ('syslogseverity', 'severity', 'severityvalue', 'level', 'priority'): returndict[u'severity'] = toUnicode(v).upper() if k in ('facility', 'syslogfacility'): returndict[u'facility'] = toUnicode(v) if k in ('pid', 'processid'): returndict[u'processid'] = toUnicode(v) # nxlog sets sourcename to the processname (i.e. sshd), everyone else should call it process name or pname if k in ('pname', 'processname', 'sourcename', 'program'): returndict[u'processname'] = toUnicode(v) # the file, or source if k in ('path', 'logger', 'file'): returndict[u'eventsource'] = toUnicode(v) if k in ('type', 'eventtype', 'category'): returndict[u'category'] = toUnicode(v) # custom fields as a list/array if k in ('fields', 'details'): if type(v) is not dict: returndict[u'details'][u'message'] = v else: if len(v) > 0: for details_key, details_value in v.iteritems(): returndict[u'details'][details_key] = details_value # custom fields/details as a one off, not in an array # i.e. fields.something=value or details.something=value # move them to a dict for consistency in querying if k.startswith('fields.') or k.startswith('details.'): newName = k.replace('fields.', '') newName = newName.lower().replace('details.', '') # add field with a special case for shippers that # don't send details # in an array as int/floats/strings # we let them dictate the data type with field_datatype # convention if newName.endswith('_int'): returndict[u'details'][unicode(newName)] = int(v) elif newName.endswith('_float'): returndict[u'details'][unicode(newName)] = float(v) else: returndict[u'details'][unicode(newName)] = toUnicode(v) # nxlog windows log handling if 'Domain' in aDict and 'SourceModuleType' in aDict: # nxlog parses all windows event fields very well # copy all fields to details returndict[u'details'][k] = v if 'utctimestamp' not in returndict: # default in case we don't find a reasonable timestamp returndict['utctimestamp'] = toUTC(datetime.now()).isoformat() except Exception as e: logger.exception('Received exception while normalizing message: %r' % e) logger.error('Malformed message: %r' % aDict) return None return returndict
def main(): if options.output=='syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport))) else: sh=logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') # logger.debug(options) try: es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers))) s = requests.Session() s.headers.update({'Accept': 'application/json'}) s.headers.update({'Content-type': 'application/json'}) s.headers.update({'Authorization': 'SSWS {0}'.format(options.apikey)}) # capture the time we start running so next time we catch any events created while we run. state = State(options.state_file) lastrun = toUTC(datetime.now()).isoformat() r = s.get('https://{0}/api/v1/events?startDate={1}&limit={2}'.format( options.oktadomain, toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'), options.recordlimit )) if r.status_code == 200: oktaevents = json.loads(r.text) for event in oktaevents: if 'published' in event: if toUTC(event['published']) > toUTC(state.data['lastrun']): try: mozdefEvent = dict() mozdefEvent['utctimestamp']=toUTC(event['published']).isoformat() mozdefEvent['receivedtimestamp']=toUTC(datetime.now()).isoformat() mozdefEvent['category'] = 'okta' mozdefEvent['tags'] = ['okta'] if 'action' in event and 'message' in event['action']: mozdefEvent['summary'] = event['action']['message'] mozdefEvent['details'] = event # Actor parsing # While there are various objectTypes attributes, we just take any attribute that matches # in case Okta changes it's structure around a bit # This means the last instance of each attribute in all actors will be recorded in mozdef # while others will be discarded # Which ends up working out well in Okta's case. if 'actors' in event: for actor in event['actors']: if 'ipAddress' in actor: if netaddr.valid_ipv4(actor['ipAddress']): mozdefEvent['details']['sourceipaddress'] = actor['ipAddress'] if 'login' in actor: mozdefEvent['details']['username'] = actor['login'] if 'requestUri' in actor: mozdefEvent['details']['source_uri'] = actor['requestUri'] # We are renaming action to activity because there are # currently mapping problems with the details.action field mozdefEvent['details']['activity'] = mozdefEvent['details']['action'] mozdefEvent['details'].pop('action') jbody=json.dumps(mozdefEvent) res = es.save_event(doc_type='okta',body=jbody) logger.debug(res) except Exception as e: logger.error('Error handling log record {0} {1}'.format(r, e)) continue else: logger.error('Okta event does not contain published date: {0}'.format(event)) state.data['lastrun'] = lastrun state.write_state_file() else: logger.error('Could not get Okta events HTTP error code {} reason {}'.format(r.status_code, r.reason)) except Exception as e: logger.error("Unhandled exception, terminating: %r" % e)
def loggerTimeStamp(self, record, datefmt=None): return toUTC(datetime.now()).isoformat()
def on_message(self, message): try: # default elastic search metadata for an event metadata = { 'index': 'events', 'id': None } event = {} event['receivedtimestamp'] = toUTC(datetime.now()).isoformat() event['mozdefhostname'] = self.options.mozdefhostname if 'tags' in event: event['tags'].extend([self.options.taskexchange]) else: event['tags'] = [self.options.taskexchange] event['severity'] = 'INFO' event['details'] = {} for message_key, message_value in message.iteritems(): if 'Message' == message_key: try: message_json = json.loads(message_value) for inside_message_key, inside_message_value in message_json.iteritems(): if inside_message_key in ('type', 'category'): event['category'] = inside_message_value # add type subcategory for filtering after # original type field is rewritten as category event['type'] = 'event' elif inside_message_key in ('processid', 'pid'): processid = str(inside_message_value) processid = processid.replace('[', '') processid = processid.replace(']', '') event['processid'] = processid elif inside_message_key in ('processname','pname'): event['processname'] = inside_message_value elif inside_message_key in ('hostname'): event['hostname'] = inside_message_value elif inside_message_key in ('time', 'timestamp'): event['timestamp'] = toUTC(inside_message_value).isoformat() event['utctimestamp'] = toUTC(event['timestamp']).astimezone(pytz.utc).isoformat() elif inside_message_key in ('summary','payload', 'message'): event['summary'] = inside_message_value.lstrip() elif inside_message_key in ('source'): event['source'] = inside_message_value elif inside_message_key in ('fields', 'details'): if type(inside_message_value) is not dict: event[u'details'][u'message'] = inside_message_value else: if len(inside_message_value) > 0: for details_key, details_value in inside_message_value.iteritems(): event[u'details'][details_key] = details_value else: event['details'][inside_message_key] = inside_message_value except ValueError: event['summary'] = message_value (event, metadata) = sendEventToPlugins(event, metadata, self.pluginList) # Drop message if plugins set to None if event is None: return self.save_event(event, metadata) except Exception as e: logger.exception(e) logger.error('Malformed message: %r' % message)
def onMessage(self, message, metadata): # make sure I really wanted to see this message # bail out early if not if u'customendpoint' not in message: return message, metadata if u'category' not in message: return message, metadata if u'SOURCE' not in message: return message, metadata if message['category'] != 'bro': return message, metadata # set the doc type to bro # to avoid data type conflicts with other doc types # (int v string, etc) # index holds documents of type 'type' # index -> type -> doc metadata['doc_type']= 'nsm' # move Bro specific fields under 'details' while preserving metadata newmessage = dict() try: newmessage['details'] = json.loads(message['MESSAGE']) except: newmessage['details'] = {} newmessage['rawdetails'] = message newmessage['customendpoint'] = 'bro' # move some fields that are expected at the event 'root' where they belong if 'HOST_FROM' in message: newmessage['hostname'] = message['HOST_FROM'] if 'tags' in message: newmessage['tags'] = message['tags'] if 'category' in message: newmessage['category'] = message['category'] if 'SOURCE' in message: # transform bro_files into files fast newmessage['source'] = message['SOURCE'][4:] if 'resp_cc' in newmessage['details']: del(newmessage['details']['resp_cc']) # add mandatory fields if 'ts' in newmessage['details']: newmessage[u'utctimestamp'] = toUTC(float(newmessage['details']['ts'])).isoformat() newmessage[u'timestamp'] = toUTC(float(newmessage['details']['ts'])).isoformat() # del(newmessage['details']['ts']) else: # a malformed message somehow managed to crawl to us, let's put it somewhat together newmessage[u'utctimestamp'] = toUTC(datetime.now()).isoformat() newmessage[u'timestamp'] = toUTC(datetime.now()).isoformat() newmessage[u'receivedtimestamp'] = toUTC(datetime.now()).isoformat() newmessage[u'eventsource'] = u'nsm' newmessage[u'severity'] = u'INFO' newmessage[u'mozdefhostname'] = self.mozdefhostname if 'id.orig_h' in newmessage['details']: newmessage[u'details'][u'sourceipaddress'] = newmessage['details']['id.orig_h'] del(newmessage['details']['id.orig_h']) if 'id.orig_p' in newmessage['details']: newmessage[u'details'][u'sourceport'] = newmessage['details']['id.orig_p'] del(newmessage['details']['id.orig_p']) if 'id.resp_h' in newmessage['details']: newmessage[u'details'][u'destinationipaddress'] = newmessage['details']['id.resp_h'] del(newmessage['details']['id.resp_h']) if 'id.resp_p' in newmessage['details']: newmessage[u'details'][u'destinationport'] = newmessage['details']['id.resp_p'] del(newmessage['details']['id.resp_p']) if 'details' in newmessage: if 'FILE_NAME' in newmessage['details']: del(newmessage['details']['FILE_NAME']) if 'MESSAGE' in newmessage['details']: del(newmessage['details']['MESSAGE']) if 'SOURCE' in newmessage['details']: del(newmessage['details']['SOURCE']) # All Bro logs need special treatment, so we provide it # Not a known log source? Mark it as such and return if 'source' not in newmessage: newmessage['source'] = u'unknown' return newmessage, metadata else: logtype = newmessage['source'] if logtype == 'conn': newmessage[u'details'][u'originipbytes'] = newmessage['details']['orig_ip_bytes'] newmessage[u'details'][u'responseipbytes'] = newmessage['details']['resp_ip_bytes'] del(newmessage['details']['orig_ip_bytes']) del(newmessage['details']['resp_ip_bytes']) if 'history' not in newmessage['details']: newmessage['details'][u'history'] = '' newmessage[u'summary'] = ( u'{sourceipaddress}:'+ u'{sourceport} -> '+ u'{destinationipaddress}:' u'{destinationport} '+ u'{history} '+ u'{originipbytes} bytes / ' u'{responseipbytes} bytes' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'files': if 'rx_hosts' in newmessage['details']: newmessage['details'][u'sourceipaddress'] = u'{0}'.format(newmessage['details']['rx_hosts'][0]) if 'tx_hosts' in newmessage['details']: newmessage['details'][u'destinationipaddress'] = u'{0}'.format(newmessage['details']['tx_hosts'][0]) if 'mime_type' not in newmessage['details']: newmessage['details'][u'mime_type'] = u'unknown' if 'filename' not in newmessage['details']: newmessage['details'][u'filename'] = u'unknown' if 'total_bytes' not in newmessage['details']: newmessage['details'][u'total_bytes'] = u'0' if 'md5' not in newmessage['details']: newmessage['details'][u'md5'] = u'None' if 'filesource' not in newmessage['details']: newmessage['details'][u'filesource'] = u'None' newmessage[u'summary'] = ( u'{rx_hosts[0]} ' u'downloaded (MD5) ' u'{md5} ' u'MIME {mime_type} ' u'({total_bytes} bytes) ' u'from {tx_hosts[0]} ' u'via {filesource}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'dns': if 'qtype_name' not in newmessage['details']: newmessage['details'][u'qtype_name'] = u'unknown' if 'query' not in newmessage['details']: newmessage['details'][u'query'] = u'' if 'rcode_name' not in newmessage['details']: newmessage['details'][u'rcode_name'] = u'' newmessage[u'summary'] = ( u'DNS {qtype_name} type query ' u'{sourceipaddress} -> ' u'{destinationipaddress}:{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'http': if 'method' not in newmessage['details']: newmessage['details'][u'method'] = u'' if 'host' not in newmessage['details']: newmessage['details'][u'host'] = u'' if 'uri' not in newmessage['details']: newmessage['details'][u'uri'] = u'' newmessage['details'][u'uri'] = newmessage['details'][u'uri'][:1024] if 'status_code' not in newmessage['details']: newmessage['details'][u'status_code'] = u'' newmessage[u'summary'] = ( u'HTTP {method} ' u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'ssl': if 'server_name' not in newmessage['details']: # fake it till you make it newmessage['details'][u'server_name'] = newmessage['details']['destinationipaddress'] newmessage[u'summary'] = ( u'SSL: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'dhcp': newmessage[u'summary'] = ( '{assigned_ip} assigned to ' '{mac}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'ftp': if 'command' not in newmessage['details']: newmessage['details'][u'command'] = u'' if 'user' not in newmessage['details']: newmessage['details'][u'user'] = u'' newmessage[u'summary'] = ( u'FTP: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'pe': if 'os' not in newmessage['details']: newmessage['details']['os'] = '' if 'subsystem' not in newmessage['details']: newmessage['details']['subsystem'] = '' newmessage[u'summary'] = ( u'PE file: {os} ' u'{subsystem}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'smtp': newmessage[u'summary'] = ( u'SMTP: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'ssh': summary = ( u'SSH: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) if 'auth_success' in newmessage['details']: summary += u' success {0}'.format(newmessage['details']['auth_success']) newmessage[u'summary'] = summary return (newmessage, metadata) if logtype == 'tunnel': if 'tunnel_type' not in newmessage['details']: newmessage['details'][u'tunnel_type'] = u'' if 'action' not in newmessage['details']: newmessage['details'][u'action'] = u'' newmessage[u'summary'] = ( u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'{tunnel_type} ' u'{action}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'intel': if 'seen.indicator' in newmessage['details']: newmessage['details']['seenindicator'] = newmessage['details']['seen.indicator'] del(newmessage['details']['seen.indicator']) else: newmessage['details'][u'seenindicator'] = u'' if 'seen.node' in newmessage['details']: newmessage['details'][u'seennode'] = newmessage['details']['seen.node'] del(newmessage['details']['seen.node']) if 'seen.where' in newmessage['details']: newmessage['details'][u'seenwhere'] = newmessage['details']['seen.where'] del(newmessage['details']['seen.where']) if 'seen.indicator_type' in newmessage['details']: newmessage['details'][u'seenindicatortype'] = newmessage['details']['seen.indicator_type'] del(newmessage['details']['seen.indicator_type']) newmessage[u'summary'] = ( u'Bro intel match ' u'of {seenindicatortype} ' u'in {seenwhere}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'known_certs': if 'serial' not in newmessage['details']: newmessage['details'][u'serial'] = u'0' newmessage[u'summary'] = ( u'Certificate X509 seen from: ' u'{host}:' u'{port_num}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'known_devices': if 'mac' not in newmessage['details']: newmessage['details'][u'mac'] = u'' if 'dhcp_host_name' not in newmessage['details']: newmessage['details'][u'dhcp_host_name'] = u'' newmessage[u'summary'] = ( u'New host: ' u'{mac}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'known_hosts': if 'host' not in newmessage['details']: newmessage['details'][u'host'] = u'' newmessage[u'summary'] = ( u'New host: ' u'{host}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'known_services': if 'service' not in newmessage['details']: newmessage['details']['service'] = [] if not newmessage['details']['service']: newmessage['details'][u'service'] = [u'Unknown'] if 'host' not in newmessage['details']: newmessage['details'][u'host'] = u'unknown' if 'port_num' not in newmessage['details']: newmessage['details'][u'port_num'] = u'0' if 'port_proto' not in newmessage['details']: newmessage['details'][u'port_proto'] = u'' newmessage[u'summary'] = ( u'New service: ' u'{service[0]} ' u'on host ' u'{host}:' u'{port_num} / ' u'{port_proto}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'notice': newmessage['details'][u'indicators'] = [] if 'sub' not in newmessage['details']: newmessage['details'][u'sub'] = u'' if 'msg' not in newmessage['details']: newmessage['details'][u'msg'] = u'' if 'note' not in newmessage['details']: newmessage['details'][u'note'] = u'' # clean up the action notice IP addresses if 'actions' in newmessage['details']: if newmessage['details']['actions'] == "Notice::ACTION_LOG": # retrieve indicator ip addresses from the sub field # "sub": "Indicator: 1.2.3.4, Indicator: 5.6.7.8" newmessage['details']['indicators'] = [ip for ip in findIPv4(newmessage['details']['sub'])] # remove the details.src field and add it to indicators # as it may not be the actual source. if 'src' in newmessage['details']: if isIPv4(newmessage[u'details'][u'src']): newmessage[u'details'][u'indicators'].append(newmessage[u'details'][u'src']) # If details.src is present overwrite the source IP address with it newmessage[u'details'][u'sourceipaddress'] = newmessage[u'details'][u'src'] newmessage[u'details'][u'sourceipv4address'] = newmessage[u'details'][u'src'] if isIPv6(newmessage[u'details'][u'src']): newmessage[u'details'][u'indicators'].append(newmessage[u'details'][u'src']) # If details.src is present overwrite the source IP address with it newmessage[u'details'][u'sourceipv6address'] = newmessage[u'details'][u'src'] del newmessage[u'details'][u'src'] sumstruct = {} sumstruct['note'] = newmessage['details'][u'note'] if 'sourceipv6address' in newmessage['details']: sumstruct['src'] = newmessage['details']['sourceipv6address'] else: if 'sourceipv4address' in newmessage['details']: sumstruct['src'] = newmessage['details']['sourceipv4address'] else: sumstruct['src'] = u'unknown' if 'dst' in newmessage['details']: sumstruct['dst'] = newmessage['details']['dst'] del(newmessage[u'details'][u'dst']) if isIPv4(sumstruct[u'dst']): newmessage['details'][u'destinationipaddress'] = sumstruct['dst'] newmessage['details'][u'destinationipv4address'] = sumstruct['dst'] if isIPv6(sumstruct[u'dst']): newmessage['details'][u'destinationipv6address'] = sumstruct['dst'] else: sumstruct['dst'] = u'unknown' if 'p' in newmessage['details']: sumstruct['p'] = newmessage['details']['p'] else: sumstruct['p'] = u'unknown' newmessage[u'summary'] = ( u'{note} ' u'source {src} ' u'destination {dst} ' u'port {p}' ).format(**sumstruct) # Thank you for your service return (newmessage, metadata) if logtype == 'rdp': if 'cookie' not in newmessage['details']: newmessage['details'][u'cookie'] = u'unknown' newmessage[u'summary'] = ( u'RDP: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'sip': if 'status_msg' not in newmessage['details']: newmessage['details'][u'status_msg'] = u'unknown' if 'uri' not in newmessage['details']: newmessage['details'][u'uri'] = u'unknown' if 'method' not in newmessage['details']: newmessage['details'][u'method'] = u'unknown' newmessage[u'summary'] = ( u'SIP: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'method {method} ' u'status {status_msg}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'software': newmessage['details']['parsed_version'] = {} if 'name' not in newmessage['details']: newmessage['details'][u'name'] = u'unparsed' if 'software_type' not in newmessage['details']: newmessage['details'][u'software_type'] = u'unknown' if 'host' not in newmessage['details']: newmessage['details'] = u'' if 'version.addl' in newmessage['details']: newmessage['details']['parsed_version']['addl'] = newmessage['details']['version.addl'] del(newmessage['details']['version.addl']) if 'version.major' in newmessage['details']: newmessage['details']['parsed_version']['major'] = newmessage['details']['version.major'] del(newmessage['details']['version.major']) if 'version.minor' in newmessage['details']: newmessage['details']['parsed_version']['minor'] = newmessage['details']['version.minor'] del(newmessage['details']['version.minor']) if 'version.minor2' in newmessage['details']: newmessage['details']['parsed_version']['minor2'] = newmessage['details']['version.minor2'] del(newmessage['details']['version.minor2']) if 'version.minor3' in newmessage['details']: newmessage['details']['parsed_version']['minor3'] = newmessage['details']['version.minor3'] del(newmessage['details']['version.minor3']) newmessage[u'summary'] = ( u'Found {software_type} software ' u'on {host}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'socks': if 'version' not in newmessage['details']: newmessage['details'][u'version'] = u'0' if 'status' not in newmessage['details']: newmessage['details'][u'status'] = u'unknown' newmessage[u'summary'] = ( u'SOCKSv{version}: ' u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'status {status}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'dce_rpc': if 'endpoint' not in newmessage['details']: newmessage['details'][u'endpoint'] = u'unknown' if 'operation' not in newmessage['details']: newmessage['details'][u'operation'] = u'unknown' newmessage[u'summary'] = ( u'DCERPC: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'kerberos': if 'request_type' not in newmessage['details']: newmessage['details'][u'request_type'] = u'unknown' if 'client' not in newmessage['details']: newmessage['details'][u'client'] = u'unknown' if 'service' not in newmessage['details']: newmessage['details'][u'service'] = u'unknown' if 'success' not in newmessage['details']: newmessage['details'][u'success'] = u'unknown' if 'error_msg' not in newmessage['details']: newmessage['details'][u'error_msg'] = u'' newmessage[u'summary'] = ( u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'request {request_type} ' u'success {success}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'ntlm': newmessage['details'][u'ntlm'] = {} if 'domainname' in newmessage['details']: newmessage['details'][u'ntlm'][u'domainname'] = newmessage['details']['domainname'] del(newmessage['details']['domainname']) else: newmessage['details'][u'ntlm'][u'domainname'] = u'unknown' if 'hostname' in newmessage['details']: newmessage['details'][u'ntlm'][u'hostname'] = newmessage['details']['hostname'] del(newmessage['details']['hostname']) else: newmessage['details'][u'ntlm'][u'hostname'] = u'unknown' if 'username' in newmessage['details']: newmessage['details'][u'ntlm'][u'username'] = newmessage['details']['username'] del(newmessage['details']['username']) else: newmessage['details'][u'ntlm'][u'username'] = u'unknown' if 'success' not in newmessage['details']: newmessage['details'][u'success'] = u'unknown' if 'status' not in newmessage['details']: newmessage['details'][u'status'] = u'unknown' newmessage[u'summary'] = ( u'NTLM: {sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'success {success} ' u'status {status}' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'smb_files': newmessage['details']['smbtimes'] = {} if 'path' not in newmessage['details']: newmessage['details'][u'path'] = u'' if 'name' not in newmessage['details']: newmessage['details'][u'name'] = u'' if 'action' not in newmessage['details']: newmessage['details'][u'action'] = u'' if 'times.accessed' in newmessage['details']: newmessage['details']['smbtimes']['accessed'] = toUTC(float(newmessage['details']['times.accessed'])).isoformat() del(newmessage['details']['times.accessed']) if 'times.changed' in newmessage['details']: newmessage['details']['smbtimes']['changed'] = toUTC(float(newmessage['details']['times.changed'])).isoformat() del(newmessage['details']['times.changed']) if 'times.created' in newmessage['details']: newmessage['details']['smbtimes']['created'] = toUTC(float(newmessage['details']['times.created'])).isoformat() del(newmessage['details']['times.created']) if 'times.modified' in newmessage['details']: newmessage['details']['smbtimes']['modified'] = toUTC(float(newmessage['details']['times.modified'])).isoformat() del(newmessage['details']['times.modified']) newmessage[u'summary'] = ( 'SMB file: ' u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'{action}' ).format(**newmessage['details']) return(newmessage, metadata) if logtype == 'smb_mapping': if 'share_type' not in newmessage['details']: newmessage['details'][u'share_type'] = u'' if 'path' not in newmessage['details']: newmessage['details'][u'path'] = u'' newmessage[u'summary'] = ( 'SMB mapping: ' u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'{share_type}' ).format(**newmessage['details']) return(newmessage, metadata) if logtype == 'snmp': if 'version' not in newmessage['details']: newmessage['details'][u'version'] = u'Unknown' if 'get_bulk_requests' not in newmessage['details']: newmessage['details']['get_bulk_requests'] = 0 if 'get_requests' not in newmessage['details']: newmessage['details']['get_requests'] = 0 if 'set_requests' not in newmessage['details']: newmessage['details']['set_requests'] = 0 if 'get_responses' not in newmessage['details']: newmessage['details']['get_responses'] = 0 newmessage['details']['getreqestssum'] = u'{0}'.format(newmessage['details']['get_bulk_requests'] + newmessage['details']['get_requests']) newmessage[u'summary'] = ( u'SNMPv{version}: ' u'{sourceipaddress} -> ' u'{destinationipaddress}:' u'{destinationport} ' u'({getreqestssum} get / ' u'{set_requests} set requests ' u'{get_responses} get responses)' ).format(**newmessage['details']) return (newmessage, metadata) if logtype == 'x509': newmessage['details'][u'certificate'] = {} if 'basic_constraints.ca' in newmessage['details']: newmessage['details'][u'certificate'][u'basic_constraints_ca'] = newmessage['details'][u'basic_constraints.ca'] del(newmessage['details'][u'basic_constraints.ca']) if 'basic_constraints.path_len' in newmessage['details']: newmessage['details'][u'certificate'][u'basic_constraints_path_len'] = newmessage['details'][u'basic_constraints.path_len'] del(newmessage['details'][u'basic_constraints.path_len']) if 'certificate.exponent' in newmessage['details']: newmessage['details'][u'certificate'][u'exponent'] = newmessage['details'][u'certificate.exponent'] del(newmessage['details'][u'certificate.exponent']) if 'certificate.issuer' in newmessage['details']: newmessage['details'][u'certificate'][u'issuer'] = newmessage['details'][u'certificate.issuer'] del(newmessage['details'][u'certificate.issuer']) if 'certificate.key_alg' in newmessage['details']: newmessage['details'][u'certificate'][u'key_alg'] = newmessage['details'][u'certificate.key_alg'] del(newmessage['details'][u'certificate.key_alg']) if 'certificate.key_length' in newmessage['details']: newmessage['details'][u'certificate'][u'key_length'] = newmessage['details'][u'certificate.key_length'] del(newmessage['details'][u'certificate.key_length']) if 'certificate.key_type' in newmessage['details']: newmessage['details'][u'certificate'][u'key_type'] = newmessage['details'][u'certificate.key_type'] del(newmessage['details'][u'certificate.key_type']) if 'certificate.not_valid_after' in newmessage['details']: newmessage['details'][u'certificate'][u'not_valid_after'] = toUTC(float(newmessage['details'][u'certificate.not_valid_after'])).isoformat() del(newmessage['details'][u'certificate.not_valid_after']) if 'certificate.not_valid_before' in newmessage['details']: newmessage['details'][u'certificate'][u'not_valid_before'] = toUTC(float(newmessage['details'][u'certificate.not_valid_before'])).isoformat() del(newmessage['details'][u'certificate.not_valid_before']) if 'certificate.sig_alg' in newmessage['details']: newmessage['details'][u'certificate'][u'sig_alg'] = newmessage['details'][u'certificate.sig_alg'] del(newmessage['details'][u'certificate.sig_alg']) if 'certificate.subject' in newmessage['details']: newmessage['details'][u'certificate'][u'subject'] = newmessage['details'][u'certificate.subject'] del(newmessage['details'][u'certificate.subject']) if 'certificate.version' in newmessage['details']: newmessage['details'][u'certificate'][u'version'] = newmessage['details'][u'certificate.version'] del(newmessage['details'][u'certificate.version']) if 'certificate.serial' in newmessage['details']: newmessage['details'][u'certificate'][u'serial'] = newmessage['details'][u'certificate.serial'] del(newmessage['details'][u'certificate.serial']) else: newmessage['details'][u'certificate'][u'serial'] = u'0' newmessage[u'summary'] = ( 'X509 certificate seen' ).format(**newmessage['details']['certificate']) return (newmessage, metadata) return (newmessage, metadata)
def __init__(self, ptRequestor, esConnection): self.ptrequestor = ptRequestor self.esConnection = esConnection # calculate our initial request window self.lastRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptinterval) - \ timedelta(seconds=options.ptbackoff)
def onMessage(self, message, metadata): # make sure I really wanted to see this message # bail out early if not if u"customendpoint" not in message: return message, metadata if u"category" not in message: return message, metadata if message["category"] != "proxy": return message, metadata # move Squid specific fields under 'details' while preserving metadata newmessage = dict() # Set NSM as type for categorical filtering of events. newmessage["type"] = "squid" newmessage[u"mozdefhostname"] = self.mozdefhostname newmessage["details"] = {} # move some fields that are expected at the event 'root' where they belong if "HOST_FROM" in message: newmessage["hostname"] = message["HOST_FROM"] if "TAGS" in message: newmessage["tags"] = message["tags"] if "category" in message: newmessage["category"] = message["category"] newmessage[u"customendpoint"] = message["customendpoint"] newmessage[u"source"] = u"unknown" if "source" in message: newmessage[u"source"] = message["source"] if "MESSAGE" in message: newmessage[u"summary"] = message["MESSAGE"] if newmessage["source"] == "access": # http://www.squid-cache.org/Doc/config/logformat/ # https://wiki.squid-cache.org/Features/LogFormat # logformat squid %ts.%03tu %6tr %>a %>p %<a %<p %Ss %<Hs %>st %<st %rm %ru %>rs %<A %mt line = message["MESSAGE"].strip() tokens = line.split() newmessage[u"details"][u"duration"] = float(tokens[1]) / 1000.0 newmessage[u"details"][u"sourceipaddress"] = tokens[2] newmessage[u"details"][u"sourceport"] = int(self.create_int(tokens[3])) if self.isIPv4(tokens[4]): newmessage[u"details"][u"destinationipaddress"] = tokens[4] else: newmessage[u"details"][u"destinationipaddress"] = u"0.0.0.0" newmessage[u"details"][u"proxyaction"] = tokens[6] if newmessage[u"details"][u"proxyaction"] != "TCP_DENIED": newmessage[u"details"][u"destinationport"] = int(self.create_int(tokens[5])) newmessage[u"details"][u"host"] = tokens[13] else: (fqdn, dstport) = self.tokenize_url(tokens[11]) newmessage[u"details"][u"destinationport"] = dstport newmessage[u"details"][u"host"] = fqdn newmessage[u"details"][u"status"] = tokens[7] newmessage[u"details"][u"requestsize"] = int(tokens[8]) newmessage[u"details"][u"responsesize"] = int(tokens[9]) method = tokens[10] newmessage[u"details"][u"method"] = method newmessage[u"details"][u"destination"] = tokens[11] proto = tokens[12] if proto == "-" and method == "CONNECT": proto = "ssl" newmessage[u"details"][u"proto"] = proto newmessage[u"details"][u"mimetype"] = tokens[14] newmessage[u"utctimestamp"] = ( toUTC(float(tokens[0])) - timedelta(milliseconds=float(tokens[1])) ).isoformat() newmessage[u"timestamp"] = ( toUTC(float(tokens[0])) - timedelta(milliseconds=float(tokens[1])) ).isoformat() # add mandatory fields newmessage[u"receivedtimestamp"] = toUTC(datetime.now()).isoformat() newmessage[u"eventsource"] = u"squid" newmessage[u"severity"] = u"INFO" return (newmessage, metadata)
def verify_defaults(self, result): assert result['category'] == 'proxy' assert result['customendpoint'] == ' ' assert result['eventsource'] == 'squid' assert toUTC(result['receivedtimestamp']).isoformat() == result['receivedtimestamp'] assert result['severity'] == 'INFO'
def main(): if options.output=='syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport))) else: sh=logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') state = State(options.state_file_name) try: # capture the time we start running so next time we catch any events # created while we run. lastrun=toUTC(datetime.now()).isoformat() # get our credentials mozdefClient=json.loads(open(options.jsoncredentialfile).read()) client_email = mozdefClient['client_email'] private_key=mozdefClient['private_key'] # set the oauth scope we will request scope=[ 'https://www.googleapis.com/auth/admin.reports.audit.readonly', 'https://www.googleapis.com/auth/admin.reports.usage.readonly' ] # authorize our http object # we do this as a 'service account' so it's important # to specify the correct 'sub' option # or you will get access denied even with correct delegations/scope credentials = SignedJwtAssertionCredentials(client_email, private_key, scope=scope, sub=options.impersonate) http = Http() credentials.authorize(http) # build a request to the admin sdk api = build('admin', 'reports_v1', http=http) response = api.activities().list(userKey='all', applicationName='login', startTime=toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'), maxResults=options.recordlimit).execute() # fix up the event craziness to a flatter format events=[] if 'items' in response: for i in response['items']: # flatten the sub dict/lists to pull out the good parts event=dict(category='google') event['tags']=['google','authentication'] event['severity']='INFO' event['summary']='google authentication: ' details=dict() for keyValue in flattenDict(i): # change key/values like: # [email protected] # to actor_email=value key,value =keyValue.split('=') key=key.replace('.','_').lower() details[key]=value # find important keys # and adjust their location/name if 'ipaddress' in details: # it's the source ip details['sourceipaddress']=details['ipaddress'] del details['ipaddress'] if 'id_time' in details: event['timestamp']=details['id_time'] event['utctimestamp']=details['id_time'] if 'events_name' in details: event['summary']+= details['events_name'] + ' ' if 'actor_email' in details: event['summary']+= details['actor_email'] + ' ' event['details']=details events.append(event) # post events to mozdef logger.debug('posting {0} google events to mozdef'.format(len(events))) for e in events: requests.post(options.url,data=json.dumps(e)) # record the time we started as # the start time for next time. state.data['lastrun'] = lastrun state.write_state_file() except Exception as e: logger.error("Unhandled exception, terminating: %r" % e)
def onMessage(self, request, response): ''' request: http://bottlepy.org/docs/dev/api.html#the-request-object response: http://bottlepy.org/docs/dev/api.html#the-response-object ''' # an ES query/facet to count success/failed logins # oriented to the data having # category: authentication # details.success marked true/false for success/failed auth # details.username as the user begindateUTC=None enddateUTC=None resultsList = list() if begindateUTC is None: begindateUTC = datetime.now() - timedelta(hours=12) begindateUTC = toUTC(begindateUTC) if enddateUTC is None: enddateUTC = datetime.now() enddateUTC = toUTC(enddateUTC) es_client = ElasticsearchClient(list('{0}'.format(s) for s in self.restoptions['esservers'])) search_query = SearchQuery() # a query to tally users with failed logins date_range_match = RangeMatch('utctimestamp', begindateUTC, enddateUTC) search_query.add_must(date_range_match) search_query.add_must(PhraseMatch('category', 'authentication')) search_query.add_must(PhraseMatch('details.success','false')) search_query.add_must(ExistsMatch('details.username')) search_query.add_aggregation(Aggregation('details.success')) search_query.add_aggregation(Aggregation('details.username')) results = search_query.execute(es_client, indices=['events','events-previous']) # any usernames or words to ignore # especially useful if ES is analyzing the username field and breaking apart [email protected] # into user somewhere and .com stoplist =self.options.ignoreusernames.split(',') # walk the aggregate failed users # and look for successes/failures for t in results['aggregations']['details.username']['terms']: if t['key'] in stoplist: continue failures = 0 success = 0 username = t['key'] details_query = SearchQuery() details_query.add_must(date_range_match) details_query.add_must(PhraseMatch('category', 'authentication')) details_query.add_must(PhraseMatch('details.username', username)) details_query.add_aggregation(Aggregation('details.success')) details_results = details_query.execute(es_client) # details.success is boolean. As an aggregate is an int (0/1) for details_term in details_results['aggregations']['details.success']['terms']: if details_term['key'] == 1: success = details_term['count'] if details_term['key'] == 0: failures = details_term['count'] resultsList.append( dict( username=username, failures=failures, success=success, begin=begindateUTC.isoformat(), end=enddateUTC.isoformat() ) ) response.body = json.dumps(resultsList) response.status = 200 return (request, response)
def getQueueSizes(): logger.debug('starting') logger.debug(options) es = ElasticsearchClient(options.esservers) sqslist = {} sqslist['queue_stats'] = {} qcount = len(options.taskexchange) qcounter = qcount - 1 mqConn = boto.sqs.connect_to_region( options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey ) while qcounter >= 0: for exchange in options.taskexchange: logger.debug('Looking for sqs queue stats in queue' + exchange) eventTaskQueue = mqConn.get_queue(exchange) # get queue stats taskQueueStats = eventTaskQueue.get_attributes('All') sqslist['queue_stats'][qcounter] = taskQueueStats sqslist['queue_stats'][qcounter]['name'] = exchange qcounter -= 1 # setup a log entry for health/status. sqsid = '{0}-{1}'.format(options.account, options.region) healthlog = dict( utctimestamp=toUTC(datetime.now()).isoformat(), hostname=sqsid, processid=os.getpid(), processname=sys.argv[0], severity='INFO', summary='mozdef health/status', category='mozdef', source='aws-sqs', tags=[], details=[]) healthlog['details'] = dict(username='******') healthlog['details']['queues']= list() healthlog['details']['total_messages_ready'] = 0 healthlog['details']['total_feeds'] = qcount healthlog['tags'] = ['mozdef', 'status', 'sqs'] ready = 0 qcounter = qcount - 1 for q in sqslist['queue_stats'].keys(): queuelist = sqslist['queue_stats'][qcounter] if 'ApproximateNumberOfMessages' in queuelist: ready1 = int(queuelist['ApproximateNumberOfMessages']) ready = ready1 + ready healthlog['details']['total_messages_ready'] = ready if 'ApproximateNumberOfMessages' in queuelist: messages = int(queuelist['ApproximateNumberOfMessages']) if 'ApproximateNumberOfMessagesNotVisible' in queuelist: inflight = int(queuelist['ApproximateNumberOfMessagesNotVisible']) if 'ApproximateNumberOfMessagesDelayed' in queuelist: delayed = int(queuelist['ApproximateNumberOfMessagesDelayed']) if 'name' in queuelist: name = queuelist['name'] queueinfo=dict( queue=name, messages_delayed=delayed, messages_ready=messages, messages_inflight=inflight) healthlog['details']['queues'].append(queueinfo) qcounter -= 1 # post to elasticsearch servers directly without going through # message queues in case there is an availability issue es.save_event(index=options.index, doc_type='mozdefhealth', body=json.dumps(healthlog)) # post another doc with a static docid and tag # for use when querying for the latest sqs status healthlog['tags'] = ['mozdef', 'status', 'sqs-latest'] es.save_event(index=options.index, doc_type='mozdefhealth', doc_id=getDocID(sqsid), body=json.dumps(healthlog))
def test_eve_log_alert_http(self): event = { 'customendpoint': '', 'category': 'suricata', 'source': 'eve-log', 'event_type': 'alert' } MESSAGE = { "timestamp":"2018-09-12T22:24:09.546736+0000", "flow_id":1484802709084080, "in_iface":"enp216s0f0", "event_type":"alert", "vlan":75, "src_ip":"10.48.240.19", "src_port":44741, "dest_ip":"10.48.74.17", "dest_port":3128, "proto":"017", "alert":{ "action":"allowed", "gid":1, "signature_id":2024897, "rev":1, "signature":"ET USER_AGENTS Go HTTP Client User-Agent", "category":"", "severity":3 }, "app_proto":"http", "flow":{ "pkts_toserver":555, "pkts_toclient":20, "bytes_toserver":350, "bytes_toclient":4444, "start":"2018-10-12T22:24:09.546736+0000" }, "payload":"Q09OTkVDVCBzZWN1cml0eS10cmFja2VyLmRlYmlhbi5vcmc6NDQzIEhUVFAvMS4xDQpIb3N0OiBzZWN1cml0eS10cmFja2VyLmRlYmlhbi5vcmc6NDQzDQpVc2VyLUFnZW50OiBHby1odHRwLWNsaWVudC8xLjENCg0K", "payload_printable":"CONNECT security-tracker.debian.org:443 HTTP\/1.1\r\nHost: security-tracker.debian.org:443\r\nUser-Agent: Go-http-client\/1.1\r\n\r\n", "stream":0, "packet":"RQAAKAAAAABABgAACjBLMAowShHR6Aw4ClEmlrx\/mcdQEgoAAAAAAA==", "packet_info":{ "linktype":12 }, "http": { "hostname":"security-tracker.debian.org", "url":"security-tracker.debian.org:443", "http_user_agent":"Go-http-client\/1.1", "http_method":"CONNECT", "protocol":"HTTP\/1.1", "status":200, "length":0, "redirect":"afakedestination" }, } event['message'] = json.dumps(MESSAGE) result, metadata = self.plugin.onMessage(event, self.metadata) self.verify_defaults(result) self.verify_metadata(metadata) assert toUTC(MESSAGE['flow']['start']).isoformat() == result['utctimestamp'] assert toUTC(MESSAGE['flow']['start']).isoformat() == result['timestamp'] assert result['details']['host'] == MESSAGE['http']['hostname'] assert result['details']['method'] == MESSAGE['http']['http_method'] assert result['details']['user_agent'] == MESSAGE['http']['http_user_agent'] assert result['details']['status_code'] == MESSAGE['http']['status'] assert result['details']['uri'] == MESSAGE['http']['url'] assert result['details']['redirect_dst'] == MESSAGE['http']['redirect'] assert result['details']['request_body_len'] == MESSAGE['http']['length']
def keyMapping(aDict): '''map common key/fields to a normalized structure, explicitly typed when possible to avoid schema changes for upsteam consumers Special accomodations made for logstash,nxlog, beaver, heka and CEF Some shippers attempt to conform to logstash-style @fieldname convention. This strips the leading at symbol since it breaks some elastic search libraries like elasticutils. ''' returndict = dict() returndict['source'] = 'cloudtrail' returndict['details'] = {} returndict['category'] = 'cloudtrail' returndict['processid'] = str(os.getpid()) returndict['processname'] = sys.argv[0] returndict['severity'] = 'INFO' if 'sourceIPAddress' in aDict and 'eventName' in aDict and 'eventSource' in aDict: summary_str = "{0} performed {1} in {2}".format( aDict['sourceIPAddress'], aDict['eventName'], aDict['eventSource'] ) returndict['summary'] = summary_str if 'eventName' in aDict: # Uppercase first character aDict['eventName'] = aDict['eventName'][0].upper() + aDict['eventName'][1:] returndict['details']['eventVerb'] = CLOUDTRAIL_VERB_REGEX.findall(aDict['eventName'])[0] returndict['details']['eventReadOnly'] = (returndict['details']['eventVerb'] in ['Describe', 'Get', 'List']) # set the timestamp when we received it, i.e. now returndict['receivedtimestamp'] = toUTC(datetime.now()).isoformat() returndict['mozdefhostname'] = options.mozdefhostname try: for k, v in aDict.iteritems(): k = removeAt(k).lower() if k == 'sourceip': returndict[u'details']['sourceipaddress'] = v elif k == 'sourceipaddress': returndict[u'details']['sourceipaddress'] = v elif k in ('facility', 'source'): returndict[u'source'] = v elif k in ('eventsource'): returndict[u'hostname'] = v elif k in ('message', 'summary'): returndict[u'summary'] = toUnicode(v) elif k in ('payload') and 'summary' not in aDict: # special case for heka if it sends payload as well as a summary, keep both but move payload to the details section. returndict[u'summary'] = toUnicode(v) elif k in ('payload'): returndict[u'details']['payload'] = toUnicode(v) elif k in ('eventtime', 'timestamp', 'utctimestamp', 'date'): returndict[u'utctimestamp'] = toUTC(v).isoformat() returndict[u'timestamp'] = toUTC(v).isoformat() elif k in ('hostname', 'source_host', 'host'): returndict[u'hostname'] = toUnicode(v) elif k in ('tags'): if 'tags' not in returndict: returndict[u'tags'] = [] if type(v) == list: returndict[u'tags'] += v else: if len(v) > 0: returndict[u'tags'].append(v) # nxlog keeps the severity name in syslogseverity,everyone else should use severity or level. elif k in ('syslogseverity', 'severity', 'severityvalue', 'level', 'priority'): returndict[u'severity'] = toUnicode(v).upper() elif k in ('facility', 'syslogfacility'): returndict[u'facility'] = toUnicode(v) elif k in ('pid', 'processid'): returndict[u'processid'] = toUnicode(v) # nxlog sets sourcename to the processname (i.e. sshd), everyone else should call it process name or pname elif k in ('pname', 'processname', 'sourcename', 'program'): returndict[u'processname'] = toUnicode(v) # the file, or source elif k in ('path', 'logger', 'file'): returndict[u'eventsource'] = toUnicode(v) elif k in ('type', 'eventtype', 'category'): returndict[u'category'] = toUnicode(v) returndict[u'type'] = 'cloudtrail' # custom fields as a list/array elif k in ('fields', 'details'): if type(v) is not dict: returndict[u'details'][u'message'] = v else: if len(v) > 0: for details_key, details_value in v.iteritems(): returndict[u'details'][details_key] = details_value # custom fields/details as a one off, not in an array # i.e. fields.something=value or details.something=value # move them to a dict for consistency in querying elif k.startswith('fields.') or k.startswith('details.'): newName = k.replace('fields.', '') newName = newName.lower().replace('details.', '') # add a dict to hold the details if it doesn't exist if 'details' not in returndict: returndict[u'details'] = dict() # add field with a special case for shippers that # don't send details # in an array as int/floats/strings # we let them dictate the data type with field_datatype # convention if newName.endswith('_int'): returndict[u'details'][unicode(newName)] = int(v) elif newName.endswith('_float'): returndict[u'details'][unicode(newName)] = float(v) else: returndict[u'details'][unicode(newName)] = toUnicode(v) else: returndict[u'details'][k] = v if 'utctimestamp' not in returndict: # default in case we don't find a reasonable timestamp returndict['utctimestamp'] = toUTC(datetime.now()).isoformat() except Exception as e: logger.exception(e) logger.error('Malformed message: %r' % aDict) return returndict