def handler(event, context): # get the api key meraki_api_key = secrets_manager.get_secret_value( SecretId=MERAKI_API_KEY_NAME)["SecretString"] # setup a session session = requests.Session() session.headers = { 'Content-Type': 'application/json', 'X-Cisco-Meraki-API-Key': meraki_api_key } # get the org id url = 'https://api.meraki.com/api/v0/organizations' result = session.get(url) organizationId = result.json()[0]['id'] # get the networks associated with this org url = f'https://api.meraki.com/api/v0/organizations/{organizationId}/networks' result = session.get(url) networks = result.json() last_run_time = utcnow().isoformat() records_retrieved = False per_page = 1000 # Meraki has 'productTypes': ['appliance', 'camera', 'switch', 'wireless'] # what event/product types do we want? event_set = set(MERAKI_PRODUCT_TYPES.split(",")) for network in networks: network_id = network["id"] # get events since last time we ran (default an hour ago) start_after = get_parameter('/meraki-events/lastquerytime', (utcnow() - timedelta(minutes=60)).isoformat()) # reformat date string to what Meraki likes # Z instead of +00:00 start_after = '{}'.format(start_after.replace('+00:00', 'Z')) logger.info(f"Looking for records since: {start_after}") product_set = set(network["productTypes"]) # if it's not wireless, move on for event_type in event_set.intersection(product_set): next_page = True while next_page: url = f'https://api.meraki.com/api/v0/networks/{network_id}/events?productType={event_type}&perPage={per_page}&startingAfter={start_after}' result = session.get(url) events = result.json()['events'] start_after = result.json()['pageEndAt'] if len(events) == 0: next_page = False else: logger.info( f"sending: {len(events)} meraki records to firehose") send_to_firehose(events) records_retrieved = True # sometimes activity log lags behind realtime # so regardless of the time we request, there won't be records available until later # only move the time forward if we received records. if records_retrieved: put_parameter('/meraki-events/lastquerytime', last_run_time)
def onMessage(self, message, metadata): # our target shell event_shell = { "utctimestamp": utcnow().isoformat(), "severity": "INFO", "summary": "UNKNOWN", "category": "UNKNOWN", "source": "UNKNOWN", "tags": [], "plugins": [], "details": {}, } # maybe the shell elements are already there? event_set = set(enum_keys(event_shell)) message_set = set(enum_keys(message)) if not event_set.issubset(message_set): # we have work to do # merge the dicts letting any message values win # if the message lacks any keys, our shell values win message = merge(event_shell, message) # move any non shell keys to 'details' for item in message_set: # enum_keys traverses sub dicts, we only move the top level # so check if the key is note a core element # present in the top level and move it to details if item not in event_shell and item in message: message["details"][item] = message.get(item) del message[item] return (message, metadata)
def expire_sequence_alerts(db): # iterate inflight sequence alerts # for any that are past their expiration date # remove the inflight entry inflight_alerts = db["inflight_alerts"] alerts = db.inflight_alerts.find({}).sort("utcepoch", pymongo.DESCENDING) for alert in alerts: if toUTC(alert["expiration"]) < utcnow(): inflight_alerts.delete_one({"_id": alert["_id"]})
def handle(self, *args, **options): scheduler = django_rq.get_scheduler('default', interval=5) print 'updating scheduled jobs' now = dates.utcnow() self.current_jobs = [] self.cancel_old_periodic_jobs(scheduler) for job_info in settings.REPEATING_JOBS: self.schedule_job(now, scheduler, job_info)
def determine_deadman_trigger(alert_params, events): """Given a deadman alert's params and a set of events (or lack thereof) determine if it should fire and resolve summary/snippets, etc Largely the same as a threshold alert, except this accounts for a lack of events (altogether missing, or below a count) as the trigger """ counts = mostCommon(events, alert_params["aggregation_key"]) if not events: # deadman alerts are built to notice # when expected events are missing # but it means we have no events to pass on # make a meta event for the fact that events are missing events = [] meta_event = { "utctimestamp": utcnow().isoformat(), "severity": "INFO", "summary": "Expected event not found", "category": "deadman", "source": "deadman", "tags": ["deadman"], "plugins": [], "details": {}, } events.append(meta_event) if not counts: # make up a metadata count counts = [(alert_params["aggregation_key"], 0)] for i in counts: # lack of events, or event count below the threshold is a trigger if i[1] <= alert_params["threshold"]: alert = alert_params alert["triggered"] = True # set the summary via chevron/mustache template # with the alert plus metadata metadata = {"metadata": {"value": i[0], "count": i[1]}} alert = merge(alert, metadata) # limit events to those matching the aggregation_key value # so the alert only gets events that match the count mostCommon results alert["events"] = [] for event in events: dotted_event = DotDict(event) if i[0] == dotted_event.get(alert_params["aggregation_key"]): alert["events"].append(dotted_event) alert["summary"] = chevron.render(alert["summary"], alert) # walk the alert events for any requested event snippets for event in alert["events"][:alert_params["event_sample_count"]]: alert["summary"] += " " + chevron.render( alert_params["event_snippet"], event) yield alert
def validate_outlook_token_lifetime(payload): if 'nbf' not in payload: raise APIException('Missing valid from date') if 'exp' not in payload: raise APIException('Missing valid to date') valid_from_epoch = payload['nbf'] valid_until_epoch = payload['exp'] valid_from_datetime = datetime.utcfromtimestamp(valid_from_epoch).replace( tzinfo=utc) valid_until_datetime = datetime.utcfromtimestamp( valid_until_epoch).replace(tzinfo=utc) # 5 miinute padding to accomodate time difference b/t server & client padding = timedelta(minutes=5) if valid_from_datetime - utcnow() > padding: raise APIException('Token not yet valid') if utcnow() - valid_until_datetime > padding: raise APIException('Token expired')
def test_expire_sequence_alerts(self, mongo_connection): # setup db = mongo_connection.test_alerta inflight_alerts = db["inflight_alerts"] inflight_alerts.delete_many({}) assert inflight_alerts.count_documents({}) == 0 # create an expired sequence alert, and see if the routine removes it offset = pd.Timedelta("7 days").to_pytimedelta() last_week = utcnow() - offset alert_shell = {"utctimestamp": last_week.isoformat(), "lifespan": "1 day"} alert_shell = get_sequence_alert_shell(alert_shell) # print(alert_shell) save_inflight_alert(db, alert_shell) assert inflight_alerts.count_documents({}) == 1 expire_sequence_alerts(db) assert inflight_alerts.count_documents({}) == 0 # tear down inflight_alerts.delete_many({}) assert inflight_alerts.count_documents({}) == 0
def get_sequence_alert_shell(alert_params): """ default dict for a sequence alert """ alert = { "alert_name": alert_params.get("alert_name", "unnamed"), "alert_type": alert_params.get("alert_type", "sequence"), "utctimestamp": alert_params.get("utctimestamp", utcnow().isoformat()), "lifespan": alert_params.get("lifespan", "3 days"), "severity": alert_params.get("severity", "INFO"), "summary": alert_params.get("summary", "sequence alert!"), "debug": alert_params.get("debug", True), "slots": alert_params.get("slots", []), } # calculate expiration in date format offset = pd.Timedelta(alert["lifespan"]).to_pytimedelta() alert["expiration"] = alert_params.get( "expiration", (toUTC(alert["utctimestamp"]) + offset).isoformat()) return merge(alert_params, alert)
def get_deadman_alert_shell(alert_params): """ default dict for a deadman alert """ alert = { "alert_name": alert_params.get("alert_name", "unnamed"), "alert_type": alert_params.get("alert_type", "deadman"), "utctimestamp": utcnow().isoformat(), "severity": alert_params.get("severity", "INFO"), "summary": alert_params.get("summary", "deadman alert!"), "event_snippet": alert_params.get("event_snippet", ""), "event_sample_count": alert_params.get("event_sample_count", 0), "category": alert_params.get("category", "deadman"), "tags": alert_params.get("tags", ["deadman"]), "threshold": alert_params.get("threshold", 0), "aggregation_key": alert_params.get("aggregation_key", "none"), "criteria": alert_params.get("criteria", ""), "debug": alert_params.get("debug", True), "events": [], } return merge(alert_params, alert)
def handler(event, context): EVENTS_V1 = '/siem/v1/events' ALERTS_V1 = '/siem/v1/alerts' ENDPOINTS = [EVENTS_V1, ALERTS_V1] NOISY_EVENTTYPES = [ "Event::Endpoint::UpdateFailure", "Event::Endpoint::UpdateSuccess", "Event::Endpoint::SavDisabled", "Event::Endpoint::SavEnabled", "Event::Endpoint::Enc::DiskEncryptionStatusChanged" ] exclude_types = ','.join(["%s" % t for t in NOISY_EVENTTYPES]) exclude_types = 'exclude_types=' + exclude_types sophos_api_key = secrets_manager.get_secret_value( SecretId=SOPHOS_API_KEY_NAME)["SecretString"] sophos_basic_auth_string = secrets_manager.get_secret_value( SecretId=SOPHOS_BASIC_AUTH_STRING_NAME)["SecretString"] url = "https://api1.central.sophos.com/gateway" last_run_time = utcnow().isoformat() records_retrieved = False # setup a session session = requests.Session() session.headers = { 'Content-Type': 'application/json; charset=utf-8', 'Accept': 'application/json', 'X-Locale': 'en', 'Authorization': f'Basic {sophos_basic_auth_string}', 'x-api-key': sophos_api_key } # figure out the last time we checked for records since = get_parameter('/sophos-events/lastquerytime', (utcnow() - timedelta(hours=12)).isoformat()) # 'since' should be an iso formatted utc string # sophos wants epoch since = int(calendar.timegm((toUTC(since).timetuple()))) params = {'limit': 1000, 'from_date': since} for endpoint in ENDPOINTS: if 'cursor' in params: # rm any cursor left over from the last endpoint del params['cursor'] next_page = True while next_page: # figure out ending URL like # https://api1.central.sophos.com/gateway/siem/v1/events?limit=1000&from_date=1584374987&exclude_types=Event::Endpoint::UpdateFailure,Event::Endpoint::UpdateSuccess,Event::Endpoint::SavDisabled,Event::Endpoint::SavEnabled,Event::Endpoint::Enc::DiskEncryptionStatusChanged args = '&'.join(['%s=%s' % (k, v) for k, v in params.items()] + [ exclude_types, ]) events_request_url = '%s%s?%s' % (url, endpoint, args) response = session.get(events_request_url) result = response.json() events = result['items'] if events: logger.info( f"sending: {len(events)} sophos records to firehose") send_to_firehose(events) records_retrieved = True if not result['has_more']: next_page = False params['cursor'] = result['next_cursor'] # sometimes activity log lags behind realtime # so regardless of the time we request, there won't be records available until later # only move the time forward if we received records. if records_retrieved: put_parameter('/sophos-events/lastquerytime', last_run_time)
def setup_schedule(self, scheduler): now = dates.utcnow() self.cancel_old_periodic_jobs(scheduler) for job_info in settings.REPEATING_JOBS: self.schedule_job(now, scheduler, job_info)
def test_save_resolved_sequence_alert(self, mongo_connection): # sequence alerts are just a series of alerts # which should all be resolved (in order) before the alert # is created # the alerts are carried in 'slots' in the # sequence alert, all slots full of events and the alert fires. # setup db = mongo_connection.test_alerta inflight_alerts = db["inflight_alerts"] alerts = db["alerts"] alerts.delete_many({}) inflight_alerts.delete_many({}) assert alerts.count_documents({}) == 0 assert inflight_alerts.count_documents({}) == 0 # create an fulfilled sequence alert, and see if # it triggers an alert creation alert_shell = {"utctimestamp": utcnow().isoformat(), "lifespan": "7 day"} alert_shell = get_sequence_alert_shell(alert_shell) # create some events that satisfy the sequence # root user with no mfa events = [] for file in glob.glob("./tests/samples/sample_cloudtrail_login_no_mfa.json"): events += json.load(open(file)) assert len(events) > 0 # a summary that will get resolved by the events in the slots alert_shell[ "summary" ] = "{{slots.0.events.0.eventname}} by {{slots.0.events.0.useridentity.type}} {{metadata.count}} mfa:{{slots.0.events.0.additionaleventdata.mfaused}}" alert_shell["slots"] = [] # make a slot of threshold alert + events that trigger it alert_slot = get_threshold_alert_shell({}) alert_slot[ "event_snippet" ] = "{{eventname}}/{{responseelements.consolelogin}} mfa:{{additionaleventdata.mfaused}} from {{sourceipaddress}}" alert_slot["aggregation_key"] = "additionaleventdata.mfaused" alert_slot["events"] = events # since we are injecting alerts instead of querying athena # resolve the slot threshold alert manually for alert in determine_threshold_trigger(alert_slot, events): # did the snippet get resolved? assert "ConsoleLogin/Success" in alert["summary"] # did events get copied into the resulting alert? assert len(alert["events"]) > 0 # add this resolved threshold alert as a slot in the sequence alert alert_shell["slots"].append(alert) # save this inflight sequence alert save_inflight_alert(db, alert_shell) assert inflight_alerts.count_documents({}) == 1 # run the routine resolving sequence alerts: create_sequence_alerts create_sequence_alerts(db) # assert there is a new alert created assert alerts.count_documents({}) == 1 for alert in alerts.find({}): logger.info(f"found db alert: {alert['summary']}") # ensure the summary description was # resolved correctly by chevron assert "ConsoleLogin by Root" in alert["summary"] # ensure event snippets are preseved assert "ConsoleLogin/Success" in alert["slots"][0]["summary"] logger.info(f"found slot in sequence: {alert['slots'][0]['summary']}") # assert the inflight alert is removed assert inflight_alerts.count_documents({}) == 0