def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # post to hipchat headers = {'content-type': 'application/json'} payload = { 'color': 'red', 'message': body.replace('\n', '<br />'), 'notify': True } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload), headers=headers, verify=not self.hipchat_ignore_ssl_errors) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to hipchat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def alert(self, matches): body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' body += u' ```' headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to Telegram: %s" % e) elastalert_logger.info( "Alert sent to Telegram room %s" % self.telegram_room_id)
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' body = self.format_body(body) # post to slack headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'icon_emoji': self.slack_emoji_override, 'attachments': [{ 'color': self.slack_msg_color, 'title': self.rule['name'], 'text': body, 'fields': [] }] } try: response = requests.post(self.slack_webhook_url, data=json.dumps(payload), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack")
def alert(self, matches): body = "" for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += "\n----------------------------------------\n" # Hipchat sends 400 bad request on messages longer than 10000 characters if len(body) > 9999: body = body[:9980] + "..(truncated)" # post to hipchat headers = {"content-type": "application/json"} payload = {"color": "red", "message": body.replace("\n", "<br />"), "notify": True} try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post( self.url, data=json.dumps(payload), headers=headers, verify=not self.hipchat_ignore_ssl_errors ) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to hipchat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' post = {} post['apiKey'] = self.api_key post['message'] = self.create_default_title(matches) if self.account: post['user'] = self.account if self.recipients: post['recipients'] = self.recipients if self.teams: post['teams'] = self.teams post['description'] = body post['source'] = 'ElastAlert' post['tags'] = self.tags logging.debug(json.dumps(post)) try: r = requests.post(self.to_addr, json=post) logging.debug('request response: {0}'.format(r)) if r.status_code != 200: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format(self.to_addr, r)) r.raise_for_status() logging.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err))
def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception( "Error while commenting on ticket %s: %s" % (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) except JIRAError as e: raise EAException("Error creating JIRA ticket: %s" % (e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # post to pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.rule['name'], 'event_type': 'trigger', 'client': self.pagerduty_client_name, 'details': { "information": body.encode('UTF-8'), }, } try: response = requests.post(self.url, data=json.dumps(payload, ensure_ascii=False), headers=headers) response.raise_for_status() except RequestException as e: raise EAException("Error posting to pagerduty: %s" % e) elastalert_logger.info("Trigger sent to PagerDuty")
def alert(self, matches): body = self.create_alert_body(matches) # HipChat sends 400 bad request on messages longer than 10000 characters if (len(body) > 9999): body = body[:9980] + '..(truncated)' # Use appropriate line ending for text/html if self.hipchat_message_format == 'html': body = body.replace('\n', '<br />') # Post to HipChat headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to HipChat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def alert(self, matches): body = '' host = '' for match in matches: host = match['host'][0] body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' dashboard_url = self.rule['kibana4_dashboard_url'] # post to hipchat headers = {'content-type': 'application/json'} payload = { 'color': 'green', 'message': self.rule['name'] + ' from host {} <a href="{}">Details</a>'.format(host, dashboard_url), 'notify': True, 'message_format': 'html' } #'message': body.replace('\n', '<br />'), try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload), headers=headers, verify=not self.hipchat_ignore_ssl_errors) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to hipchat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to slack headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'fields': [] } ] } if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack")
def alert(self, matches): body = self.create_alert_body(matches) # post to victorops headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = { 'https': self.victorops_proxy } if self.victorops_proxy else None payload = { "message_type": self.victorops_message_type, "entity_display_name": self.victorops_entity_display_name, "monitoring_tool": "Elastalert", "state_message": body } try: response = requests.post(self.url, data=json.dumps(payload), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to VictorOps: %s" % e) elastalert_logger.info("Trigger sent to VictorOps")
def alert(self, matches): body = self.create_alert_body(matches) # Hipchat sends 400 bad request on messages longer than 10000 characters if (len(body) > 9999): body = body[:9980] + '..(truncated)' # post to hipchat headers = {'content-type': 'application/json'} payload = { 'color': self.hipchat_msg_color, 'message': body.replace('\n', '<br />'), 'notify': True } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload), headers=headers, verify=not self.hipchat_ignore_ssl_errors) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to hipchat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: elastalert_logger.info("Commenting on existing ticket %s" % (ticket.key)) for match in matches: self.comment_on_ticket(ticket, match) if self.pipeline is not None: self.pipeline["jira_ticket"] = ticket self.pipeline["jira_server"] = self.server return description = self.description + "\n" for match in matches: description += str(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: description += "\n----------------------------------------\n" self.jira_args["summary"] = title self.jira_args["description"] = description try: self.issue = self.client.create_issue(**self.jira_args) except JIRAError as e: raise EAException("Error creating JIRA ticket: %s" % (e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline["jira_ticket"] = self.issue self.pipeline["jira_server"] = self.server
def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception("Error while commenting on ticket %s: %s" % (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return description = self.description + '\n' for match in matches: description += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: description += '\n----------------------------------------\n' self.jira_args['summary'] = title self.jira_args['description'] = description try: self.issue = self.client.create_issue(**self.jira_args) except JIRAError as e: raise EAException("Error creating JIRA ticket: %s" % (e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server
def alert(self, matches): body = self.create_alert_body(matches) self.senddata(body) elastalert_logger.info("send message to %s" % "admin")
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' body = self.format_body(body) # post to slack headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'icon_emoji': self.slack_emoji_override, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.rule['name'], 'text': body, 'fields': [] } ] } try: response = requests.post(self.slack_webhook_url, data=json.dumps(payload), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack")
def get_all_terms(self, args): """ Performs a terms aggregation for each field to get every existing term. """ self.es = Elasticsearch(host=self.rules["es_host"], port=self.rules["es_port"]) window_size = datetime.timedelta(**self.rules.get("terms_window_size", {"days": 30})) field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE query_template = {"aggs": {"values": {"terms": field_name}}} if args and args.start: end = ts_to_dt(args.start) else: end = ts_now() start = end - window_size if self.rules.get("use_strftime_index"): index = format_index(self.rules["index"], start, end) else: index = self.rules["index"] time_filter = {self.rules["timestamp_field"]: {"lte": dt_to_ts(end), "gte": dt_to_ts(start)}} query_template["filter"] = {"bool": {"must": [{"range": time_filter}]}} query = {"aggs": {"filtered": query_template}} for field in self.fields: field_name["field"] = field res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout=50) if "aggregations" in res: buckets = res["aggregations"]["filtered"]["values"]["buckets"] keys = [bucket["key"] for bucket in buckets] self.seen_values[field] = keys elastalert_logger.info("Found %s unique values for %s" % (len(keys), field)) else: self.seen_values[field] = [] elastalert_logger.info("Found no values for %s" % (field))
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' post = {} post['apiKey'] = self.api_key post['message'] = self.create_default_title(matches) post['recipients'] = self.recipients post['description'] = body post['source'] = 'ElastAlert' post['tags'] = ['ElastAlert', self.rule['name']] logging.debug(json.dumps(post)) try: r = requests.post(self.to_addr, json=post) logging.debug('request response: {0}'.format(r)) if r.status_code != 200: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format(self.to_addr, r)) r.raise_for_status() logging.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err))
def configure_logging(args, conf): # configure logging from config file if provided if 'logging' in conf: # load new logging config logging.config.dictConfig(conf['logging']) if args.verbose and args.debug: elastalert_logger.info( "Note: --debug and --verbose flags are set. --debug takes precedent." ) # re-enable INFO log level on elastalert_logger in verbose/debug mode # (but don't touch it if it is already set to INFO or below by config) if args.verbose or args.debug: if elastalert_logger.level > logging.INFO or elastalert_logger.level == logging.NOTSET: elastalert_logger.setLevel(logging.INFO) if args.debug: elastalert_logger.info( """Note: In debug mode, alerts will be logged to console but NOT actually sent. To send them but remain verbose, use --verbose instead.""") if not args.es_debug and 'logging' not in conf: logging.getLogger('elasticsearch').setLevel(logging.WARNING) if args.es_debug_trace: tracer = logging.getLogger('elasticsearch.trace') tracer.setLevel(logging.INFO) tracer.addHandler(logging.FileHandler(args.es_debug_trace))
def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in matches: self.comment_on_ticket(ticket, match) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return description = self.description + '\n' for match in matches: description += str(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: description += '\n----------------------------------------\n' self.jira_args['summary'] = title self.jira_args['description'] = description try: self.issue = self.client.create_issue(**self.jira_args) except JIRAError as e: raise EAException("Error creating JIRA ticket: %s" % (e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue
def alert(self, matches): body = self.create_alert_body(matches) # post to pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.rule['name'], 'event_type': 'trigger', 'incident_key': self.pagerduty_incident_key, 'client': self.pagerduty_client_name, 'details': { "information": body.encode('UTF-8'), }, } try: response = requests.post(self.url, data=json.dumps(payload, ensure_ascii=False), headers=headers) response.raise_for_status() except RequestException as e: raise EAException("Error posting to pagerduty: %s" % e) elastalert_logger.info("Trigger sent to PagerDuty")
def alert(self, matches): body = '' for match in matches: body += str(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # post to slack headers = {'content-type': 'application/json'} payload = { 'username': self.slack_username_override, 'icon_emoji': self.slack_emoji_override, 'attachments': [{ 'color': self.slack_msg_color, 'title': self.rule['name'], 'text': body, 'fields': [] }] } try: response = requests.post(self.slack_webhook_url, json=payload, headers=headers) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack")
def check_matches(self, timestamp, query_key, aggregation_data): elastalert_logger.info( "===============================check_matches aggregation_data: %s" % str(aggregation_data)) metric_val = aggregation_data[self.metric_key]['value'] self.handle_event(aggregation_data, metric_val)
def senddata(self, content): mysql = Mysql(self.rule) now = datetime.datetime.now() now = now.strftime("%Y-%m-%d %H:%M:%S") insert_sql = 'insert into link_alert(' \ 'alert_ruleid, '\ 'alert_rule, '\ 'alert_userid,' \ 'alert_username,' \ 'alert_channel,' \ 'alert_account,' \ 'alert_message,' \ 'alert_time,' \ 'alert_status' \ ') values ' \ '(%s,%s,%s,%s,%s,%s,%s,%s,"0")' for alertperson in self.rule['alertpersons']: insert_data = [ self.rule['rule_id'], self.rule['name'], alertperson['user_id'], alertperson['user_name'], self.rule['alert_way'], alertperson['user_email'], content, now ] mysql.insertOne(insert_sql, insert_data) mysql.dispose() elastalert_logger.info("send msg and response: %s" % content)
def get_all_terms(self, args): """ Performs a terms aggregation for each field to get every existing term. """ self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port']) window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30})) field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE query_template = {"aggs": {"values": {"terms": field_name}}} if args and args.start: end = ts_to_dt(args.start) else: end = ts_now() start = end - window_size if self.rules.get('use_strftime_index'): index = format_index(self.rules['index'], start, end) else: index = self.rules['index'] time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}} query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}} query = {'aggs': {'filtered': query_template}} for field in self.fields: field_name['field'] = field res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout=50) if 'aggregations' in res: buckets = res['aggregations']['filtered']['values']['buckets'] keys = [bucket['key'] for bucket in buckets] self.seen_values[field] = keys elastalert_logger.info('Found %s unique values for %s' % (len(keys), field)) else: self.seen_values[field] = [] elastalert_logger.info('Found no values for %s' % (field))
def alert(self, matches): for match in matches: # Parse everything into description. description = str(BasicMatchString(self.rule, match)) # Set proper headers headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload = { "description": description, "short_description": self.rule['short_description'], "comments": self.rule['comments'], "assignment_group": self.rule['assignment_group'], "category": self.rule['category'], "subcategory": self.rule['subcategory'], "cmdb_ci": self.rule['cmdb_ci'], "caller_id": self.rule["caller_id"] } try: response = requests.post(self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ServiceNow: %s" % e) elastalert_logger.info("Alert sent to ServiceNow")
def alert(self, matches): body = "" for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += "\n----------------------------------------\n" body = self.format_body(body) # post to slack headers = {"content-type": "application/json"} # set https proxy, if it was provided proxies = {"https": self.slack_proxy} if self.slack_proxy else None payload = { "username": self.slack_username_override, "icon_emoji": self.slack_emoji_override, "attachments": [{"color": self.slack_msg_color, "title": self.rule["name"], "text": body, "fields": []}], } try: response = requests.post(self.slack_webhook_url, data=json.dumps(payload), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack")
def configure_logging(args, conf): # configure logging from config file if provided if 'logging' in conf: # load new logging config logging.config.dictConfig(conf['logging']) if args.verbose and args.debug: elastalert_logger.info( "Note: --debug and --verbose flags are set. --debug takes precedent." ) # re-enable INFO log level on elastalert_logger in verbose/debug mode # (but don't touch it if it is already set to INFO or below by config) if args.verbose or args.debug: if elastalert_logger.level > logging.INFO or elastalert_logger.level == logging.NOTSET: elastalert_logger.setLevel(logging.INFO) if args.debug: elastalert_logger.info( """Note: In debug mode, alerts will be logged to console but NOT actually sent. To send them but remain verbose, use --verbose instead.""" ) if not args.es_debug and 'logging' not in conf: logging.getLogger('elasticsearch').setLevel(logging.WARNING) if args.es_debug_trace: tracer = logging.getLogger('elasticsearch.trace') tracer.setLevel(logging.INFO) tracer.addHandler(logging.FileHandler(args.es_debug_trace))
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # post to victorops headers = {'content-type': 'application/json'} payload = { "message_type": self.victorops_message_type, "entity_display_name": self.victorops_entity_display_name, "monitoring_tool": "Elastalert", "state_message": body } try: response = requests.post(self.url, data=json.dumps(payload), headers=headers) response.raise_for_status() except RequestException as e: raise EAException("Error posting to VictorOps: %s" % e) elastalert_logger.info("Trigger sent to VictorOps")
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' if self.custom_message is None: self.message = self.create_title(matches) else: self.message = self.custom_message.format(**matches[0]) self.recipients = self._parse_responders(self.recipients, self.recipients_args, matches, self.default_reciepients) self.teams = self._parse_responders(self.teams, self.teams_args, matches, self.default_teams) post = {} post['message'] = self.message if self.account: post['user'] = self.account if self.recipients: post['responders'] = [{'username': r, 'type': 'user'} for r in self.recipients] if self.teams: post['teams'] = [{'name': r, 'type': 'team'} for r in self.teams] post['description'] = body post['source'] = 'ElastAlert' for i, tag in enumerate(self.tags): self.tags[i] = tag.format(**matches[0]) post['tags'] = self.tags if self.priority and self.priority not in ('P1', 'P2', 'P3', 'P4', 'P5'): logging.warn("Priority level does not appear to be specified correctly. \ Please make sure to set it to a value between P1 and P5") else: post['priority'] = self.priority if self.alias is not None: post['alias'] = self.alias.format(**matches[0]) logging.debug(json.dumps(post)) headers = { 'Content-Type': 'application/json', 'Authorization': 'GenieKey {}'.format(self.api_key), } # set https proxy, if it was provided proxies = {'https': self.opsgenie_proxy} if self.opsgenie_proxy else None try: r = requests.post(self.to_addr, json=post, headers=headers, proxies=proxies) logging.debug('request response: {0}'.format(r)) if r.status_code != 202: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format(self.to_addr, r)) r.raise_for_status() logging.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err))
def get_all_terms(self, args): """ Performs a terms aggregation for each field to get every existing term. """ self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port'], timeout=self.rules.get('es_conn_timeout', 50)) window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30})) field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE query_template = {"aggs": {"values": {"terms": field_name}}} if args and args.start: end = ts_to_dt(args.start) else: end = ts_now() start = end - window_size if self.rules.get('use_strftime_index'): index = format_index(self.rules['index'], start, end) else: index = self.rules['index'] time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}} query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}} query = {'aggs': {'filtered': query_template}} for field in self.fields: # For composite keys, we will need to perform sub-aggregations if type(field) == list: level = query_template['aggs'] # Iterate on each part of the composite key and add a sub aggs clause to the elastic search query for i, sub_field in enumerate(field): level['values']['terms']['field'] = sub_field if i < len(field) - 1: # If we have more fields after the current one, then set up the next nested structure level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}} level = level['values']['aggs'] else: # For non-composite keys, only a single agg is needed field_name['field'] = field res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s') if 'aggregations' in res: buckets = res['aggregations']['filtered']['values']['buckets'] if type(field) == list: # For composite keys, make the lookup based on all fields # Make it a tuple since it can be hashed and used in dictionary lookups self.seen_values[tuple(field)] = [] for bucket in buckets: # We need to walk down the hierarchy and obtain the value at each level self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket) # If we don't have any results, it could either be because of the absence of any baseline data # OR it may be because the composite key contained a non-primitive type. Either way, give the # end-users a heads up to help them debug what might be going on. if not self.seen_values[tuple(field)]: elastalert_logger.warning(( 'No results were found from all sub-aggregations. This can either indicate that there is ' 'no baseline data OR that a non-primitive field was used in a composite key.' )) else: keys = [bucket['key'] for bucket in buckets] self.seen_values[field] = keys elastalert_logger.info('Found %s unique values for %s' % (len(keys), field)) else: self.seen_values[field] = [] elastalert_logger.info('Found no values for %s' % (field))
def start(self): self.running = True elastalert_logger.info("Starting up") self.scheduler = schedule.Manager() for rule in self.rules: rule_interval = intervalInSecond(rule.get("trigger").get("schedule").get("interval")) self.scheduler.add_operation(self.run_rule, rule_interval, [rule])
def compare(self, event): elastalert_logger.info("Comparing value: %s with max: %s, min: %s", self.aggregation_num['agg_num'], self.rules.get('maxvalue'), self.rules.get('minvalue')) if self.aggregation_num['agg_num'] is None: return not self.rules['ignore_null'] if not 'minvalue' in self.rules: return not self.aggregation_num['agg_num'] <= self.rules['maxvalue'] if not 'maxvalue' in self.rules: return not self.rules['minvalue'] <= self.aggregation_num['agg_num'] return not self.rules['minvalue'] <= self.aggregation_num['agg_num'] <= self.rules['maxvalue']
def start(self): self.running = True elastalert_logger.info("Starting up") self.scheduler = schedule.Manager() for rule in self.rules: rule_interval = intervalInSecond( rule.get("trigger").get("schedule").get("interval")) self.scheduler.add_operation(self.run_rule, rule_interval, [rule])
def handle_uncaught_exception(self, exception, rule): """ Disables a rule and sends a notification. """ logging.error(traceback.format_exc()) self.handle_error('Uncaught exception running rule %s: %s' % (rule['name'], exception), {'rule': rule['name']}) if self.disable_rules_on_error: self.rules = [running_rule for running_rule in self.rules if running_rule['name'] != rule['name']] self.disabled_rules.append(rule) elastalert_logger.info('Rule %s disabled', rule['name']) if self.notify_email: self.send_notification_email(exception=exception, rule=rule)
def __init__(self, args): self.parse_args(args) self.debug = self.args.debug self.verbose = self.args.verbose self.rule_jobs = [] if self.verbose or self.debug: elastalert_logger.setLevel(logging.INFO) if self.debug: elastalert_logger.info( "Note: In debug mode, alerts will be logged to console but NOT actually sent. To send them, use --verbose." ) if not self.args.es_debug: logging.getLogger('elasticsearch').setLevel(logging.WARNING) if self.args.es_debug_trace: tracer = logging.getLogger('elasticsearch.trace') tracer.setLevel(logging.INFO) tracer.addHandler(logging.FileHandler(self.args.es_debug_trace)) self.conf = load_rules(self.args) self.global_config = load_global_config() #for key,value in self.conf.items(): # elastalert_logger.info("%s => %s", key, value) #self.max_query_size = self.conf['max_query_size'] self.rules = self.conf['rules'] #self.writeback_index = self.conf['writeback_index'] #self.run_every = self.conf['run_every'] #self.alert_time_limit = self.conf['alert_time_limit'] #self.old_query_limit = self.conf['old_query_limit'] #self.disable_rules_on_error = self.conf['disable_rules_on_error'] #self.notify_email = self.conf.get('notify_email', []) #self.from_addr = self.conf.get('from_addr', 'ElastAlert') #self.smtp_host = self.conf.get('smtp_host', 'localhost') #self.max_aggregation = self.conf.get('max_aggregation', 10000) self.alerts_sent = 0 self.num_hits = 0 self.current_es = None self.current_es_addr = None #self.buffer_time = self.conf['buffer_time'] self.silence_cache = {} self.rule_hashes = get_rule_hashes(self.conf, self.args.rule) self.starttime = self.args.start self.disabled_rules = [] #self.es_conn_config = self.build_es_conn_config(self.conf) #self.writeback_es = self.new_elasticsearch(self.es_conn_config) if self.args.silence: self.silence()
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' if self.custom_message is None: self.message = self.create_title(matches) else: self.message = self.custom_message.format(**matches[0]) self.recipients = self._parse_responders(self.recipients, self.recipients_args, matches, self.default_reciepients) self.teams = self._parse_responders(self.teams, self.teams_args, matches, self.default_teams) post = {} post['message'] = self.message if self.account: post['user'] = self.account if self.recipients: post['responders'] = [{'username': r, 'type': 'user'} for r in self.recipients] if self.teams: post['teams'] = [{'name': r, 'type': 'team'} for r in self.teams] post['description'] = body post['source'] = 'ElastAlert' post['tags'] = self.tags if self.priority and self.priority not in ('P1', 'P2', 'P3', 'P4', 'P5'): logging.warn("Priority level does not appear to be specified correctly. \ Please make sure to set it to a value between P1 and P5") else: post['priority'] = self.priority if self.alias is not None: post['alias'] = self.alias.format(**matches[0]) logging.debug(json.dumps(post)) headers = { 'Content-Type': 'application/json', 'Authorization': 'GenieKey {}'.format(self.api_key), } # set https proxy, if it was provided proxies = {'https': self.opsgenie_proxy} if self.opsgenie_proxy else None try: r = requests.post(self.to_addr, json=post, headers=headers, proxies=proxies) logging.debug('request response: {0}'.format(r)) if r.status_code != 202: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format(self.to_addr, r)) r.raise_for_status() logging.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err))
def run_rule(self, rule): """ Run a rule including querying and alerting on results. :param rule: The rule configuration. :return: The number of matches that the rule produced. """ elastalert_logger.info('Start to run rule: %s', rule.get('name')) # Run the rule. If querying over a large time period, split it up into segments self.num_hits = 0 rule_request = rule.get("input").get("search").get("request") if rule_request.get("elastic_host", None) is not None and rule_request.get( "elastic_port", None) is not None: self.current_es = Elasticsearch( host=rule.get("input").get("search").get("request").get( "elastic_host"), port=rule.get("input").get("search").get("request").get( "elastic_port")) else: self.current_es = self.new_elasticsearch(self.global_config) self.run_query(rule) # Process any new matches num_matches = len(rule['type'].matches) while rule['type'].matches: match = rule['type'].matches.pop(0) #if self.is_silenced(rule['name'] + key) or self.is_silenced(rule['name']): # elastalert_logger.info('Ignoring match for silenced rule %s%s' % (rule['name'], key)) # continue if rule.get('realert'): next_alert, exponent = self.next_alert_time( rule, rule['name'] + key, ts_now()) self.set_realert(rule['name'] + key, next_alert, exponent) # If no aggregation, alert immediately #if not rule['aggregation']: # self.alert([match], rule) # continue self.alert([match], rule) # Add it as an aggregated match #self.add_aggregated_alert(match, rule) # Mark this endtime for next run's start #rule['previous_endtime'] = endtime #time_taken = time.time() - run_start return num_matches
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' if self.custom_message is None: self.message = self.create_title(matches) else: self.message = self.custom_message.format(**matches[0]) post = {} post['message'] = self.message if self.account: post['user'] = self.account if self.recipients: post['responders'] = self._fill_responders(self.recipients, 'user') if self.teams: post['teams'] = self._fill_responders(self.teams, 'team') post['description'] = body post['source'] = 'ElastAlert' post['tags'] = self.tags if self.alias is not None: post['alias'] = self.alias.format(**matches[0]) logging.debug(json.dumps(post)) headers = { 'Content-Type': 'application/json', 'Authorization': 'GenieKey {}'.format(self.api_key), } # set https proxy, if it was provided proxies = { 'https': self.opsgenie_proxy } if self.opsgenie_proxy else None try: r = requests.post(self.to_addr, json=post, headers=headers, proxies=proxies) logging.debug('request response: {0}'.format(r)) if r.status_code != 202: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format( self.to_addr, r)) r.raise_for_status() logging.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err))
def __init__(self, args): self.parse_args(args) self.debug = self.args.debug self.verbose = self.args.verbose self.rule_jobs = [] if self.verbose or self.debug: elastalert_logger.setLevel(logging.INFO) if self.debug: elastalert_logger.info("Note: In debug mode, alerts will be logged to console but NOT actually sent. To send them, use --verbose.") if not self.args.es_debug: logging.getLogger('elasticsearch').setLevel(logging.WARNING) if self.args.es_debug_trace: tracer = logging.getLogger('elasticsearch.trace') tracer.setLevel(logging.INFO) tracer.addHandler(logging.FileHandler(self.args.es_debug_trace)) self.conf = load_rules(self.args) self.global_config = load_global_config() #for key,value in self.conf.items(): # elastalert_logger.info("%s => %s", key, value) #self.max_query_size = self.conf['max_query_size'] self.rules = self.conf['rules'] #self.writeback_index = self.conf['writeback_index'] #self.run_every = self.conf['run_every'] #self.alert_time_limit = self.conf['alert_time_limit'] #self.old_query_limit = self.conf['old_query_limit'] #self.disable_rules_on_error = self.conf['disable_rules_on_error'] #self.notify_email = self.conf.get('notify_email', []) #self.from_addr = self.conf.get('from_addr', 'ElastAlert') #self.smtp_host = self.conf.get('smtp_host', 'localhost') #self.max_aggregation = self.conf.get('max_aggregation', 10000) self.alerts_sent = 0 self.num_hits = 0 self.current_es = None self.current_es_addr = None #self.buffer_time = self.conf['buffer_time'] self.silence_cache = {} self.rule_hashes = get_rule_hashes(self.conf, self.args.rule) self.starttime = self.args.start self.disabled_rules = [] #self.es_conn_config = self.build_es_conn_config(self.conf) #self.writeback_es = self.new_elasticsearch(self.es_conn_config) if self.args.silence: self.silence()
def generate_aggregation_query(self): self.query_str = { self.metric_key: { self.rules['metric_agg_type']: { 'field': self.rules['metric_agg_key'] } } } elastalert_logger.info( "===============================str self.query_str: %s" % str(self.query_str)) return self.query_str
def add_aggregation_data(self, payload): elastalert_logger.info( "===============================str payload: %s" % str(payload)) for timestamp, payload_data in payload.iteritems(): if 'interval_aggs' in payload_data: self.unwrap_interval_buckets( timestamp, None, payload_data['interval_aggs']['buckets']) elif 'bucket_aggs' in payload_data: self.unwrap_term_buckets( timestamp, payload_data['bucket_aggs']['buckets']) else: self.check_matches(timestamp, None, payload_data)
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # Add JIRA ticket if it exists if self.pipeline is not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\nJIRA ticket: %s' % (url) to_addr = self.rule['email'] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(self.rule['email']) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port) else: self.smtp = SMTP_SSL(self.smtp_host) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls() if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException("Error connecting to SMTP host: %s" % (e)) except SMTPAuthenticationError as e: raise EAException("SMTP username/password rejected: %s" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info("Sent email to %s" % (self.rule['email']))
def run_rule(self, rule): """ Run a rule including querying and alerting on results. :param rule: The rule configuration. :return: The number of matches that the rule produced. """ elastalert_logger.info('Start to run rule: %s', rule.get('name')) # Run the rule. If querying over a large time period, split it up into segments self.num_hits = 0 rule_request = rule.get("input").get("search").get("request") if rule_request.get("elastic_host", None) is not None and rule_request.get("elastic_port", None) is not None: self.current_es = Elasticsearch(host=rule.get("input").get("search").get("request").get("elastic_host"), port=rule.get("input").get("search").get("request").get("elastic_port")) else: self.current_es = self.new_elasticsearch(self.global_config) self.run_query(rule) # Process any new matches num_matches = len(rule['type'].matches) while rule['type'].matches: match = rule['type'].matches.pop(0) #if self.is_silenced(rule['name'] + key) or self.is_silenced(rule['name']): # elastalert_logger.info('Ignoring match for silenced rule %s%s' % (rule['name'], key)) # continue if rule.get('realert'): next_alert, exponent = self.next_alert_time(rule, rule['name'] + key, ts_now()) self.set_realert(rule['name'] + key, next_alert, exponent) # If no aggregation, alert immediately #if not rule['aggregation']: # self.alert([match], rule) # continue self.alert([match], rule) # Add it as an aggregated match #self.add_aggregated_alert(match, rule) # Mark this endtime for next run's start #rule['previous_endtime'] = endtime #time_taken = time.time() - run_start return num_matches
def handle_uncaught_exception(self, exception, rule): """ Disables a rule and sends a notification. """ logging.error(traceback.format_exc()) self.handle_error( 'Uncaught exception running rule %s: %s' % (rule['name'], exception), {'rule': rule['name']}) if self.disable_rules_on_error: self.rules = [ running_rule for running_rule in self.rules if running_rule['name'] != rule['name'] ] self.disabled_rules.append(rule) elastalert_logger.info('Rule %s disabled', rule['name']) if self.notify_email: self.send_notification_email(exception=exception, rule=rule)
def alert(self, matches): body = self.create_alert_body(matches) body = self.fit_body(body) # limit body size json_content = self.content_factory(body) try: response = requests.post(self.url, headers=self.headers, json=json_content) except requests.RequestException as e: raise EAException("Error while contacting Ryver: {}".format(e)) self.check_ryver_response(response) elastalert_logger.info(self.log_message)
def alert(self, matches): body = '' for match in matches: body += str(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # Add JIRA ticket if it exists if self.pipeline is not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.rule['jira_server'], self.pipeline['jira_ticket']) body += '\nJIRA ticket: %s' % (url) slack.chat.post_message(self.channel, body, username=self.bot_name) elastalert_logger.info("Sent slack notification to %s" % (self.rule['slack_channel']))
def alert(self, matches): body = "" for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += "\n----------------------------------------\n" # Add JIRA ticket if it exists if self.pipeline is not None and "jira_ticket" in self.pipeline: url = "%s/browse/%s" % (self.pipeline["jira_server"], self.pipeline["jira_ticket"]) body += "\nJIRA ticket: %s" % (url) to_addr = self.rule["email"] email_msg = MIMEText(body.encode("UTF-8"), _charset="UTF-8") email_msg["Subject"] = self.create_title(matches) email_msg["To"] = ", ".join(self.rule["email"]) email_msg["From"] = self.from_addr email_msg["Reply-To"] = self.rule.get("email_reply_to", email_msg["To"]) if self.rule.get("cc"): email_msg["CC"] = ",".join(self.rule["cc"]) to_addr = to_addr + self.rule["cc"] if self.rule.get("bcc"): to_addr = to_addr + self.rule["bcc"] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port) else: self.smtp = SMTP_SSL(self.smtp_host) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn("STARTTLS"): self.smtp.starttls() if "smtp_auth_file" in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException("Error connecting to SMTP host: %s" % (e)) except SMTPAuthenticationError as e: raise EAException("SMTP username/password rejected: %s" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info("Sent email to %s" % (self.rule["email"]))
def alert(self, matches): body = '' for match in matches: body += str(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' # use instance role if aws_access_key and aws_secret_key are not specified if not self.aws_access_key and not self.aws_secret_key: sns_client = sns.connect_to_region(self.aws_region) else: sns_client = sns.connect_to_region(self.aws_region, aws_access_key_id=self.aws_access_key, aws_secret_access_key=self.aws_secret_key) sns_client.publish(self.sns_topic_arn, body, subject=self.create_default_title()) elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
def alert(self, matches): body = '' for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' if self.custom_message is None: self.message = self.create_default_title(matches) else: self.message = self.custom_message.format(**matches[0]) post = {} post['apiKey'] = self.api_key post['message'] = self.message if self.account: post['user'] = self.account if self.recipients: post['recipients'] = self.recipients if self.teams: post['teams'] = self.teams post['description'] = body post['source'] = 'ElastAlert' post['tags'] = self.tags if self.alias is not None: post['alias'] = self.alias.format(**matches[0]) logging.debug(json.dumps(post)) headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.opsgenie_proxy} if self.opsgenie_proxy else None try: r = requests.post(self.to_addr, json=post, headers=headers, proxies=proxies) logging.debug('request response: {0}'.format(r)) if r.status_code != 200: elastalert_logger.info("Error response from {0} \n " "API Response: {1}".format(self.to_addr, r)) r.raise_for_status() logging.info("Alert sent to OpsGenie") except Exception as err: raise EAException("Error sending alert: {0}".format(err))
def alert(self, matches): body = self.create_alert_body(matches) # use aws_access_key and aws_secret_key if specified; then use boto profile if specified; # otherwise use instance role if not self.aws_access_key and not self.aws_secret_key: if not self.boto_profile: sns_client = sns.connect_to_region(self.aws_region) else: sns_client = sns.connect_to_region(self.aws_region, profile_name=self.boto_profile) else: sns_client = sns.connect_to_region(self.aws_region, aws_access_key_id=self.aws_access_key, aws_secret_access_key=self.aws_secret_key) sns_client.publish(self.sns_topic_arn, body, subject=self.create_title(matches)) elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))