Exemplo n.º 1
0
    def get_query(filters,
                  starttime=None,
                  endtime=None,
                  sort=True,
                  timestamp_field='@timestamp'):
        """ Returns a query dict that will apply a list of filters, filter by
        start and end time, and sort results by timestamp.

        :param filters: A list of elasticsearch filters to use.
        :param starttime: A timestamp to use as the start time of the query.
        :param endtime: A timestamp to use as the end time of the query.
        :param sort: If true, sort results by timestamp. (Default True)
        :return: A query dictionary to pass to elasticsearch.
        """
        starttime = dt_to_ts(starttime)
        endtime = dt_to_ts(endtime)
        filters = copy.copy(filters)
        query = {'filter': {'bool': {'must': filters}}}
        if starttime and endtime:
            query['filter']['bool']['must'].append({
                'range': {
                    timestamp_field: {
                        'from': starttime,
                        'to': endtime
                    }
                }
            })
        if sort:
            query['sort'] = [{timestamp_field: {'order': 'asc'}}]
        return query
Exemplo n.º 2
0
def kibana4_context_args(match,
                         context_time=30,
                         query=None,
                         sort_field='timestamp_micros',
                         order='asc',
                         columns="host,program,message"):

    if not query:
        query = 'host:"%s"AND program:"%s"' % (match['host'], match['program'])

    query = urllib.quote(query)
    match_dt = ts_to_dt(match['@timestamp'])

    context_timedelta = datetime.timedelta(seconds=context_time)
    q_start = dt_to_ts(match_dt - context_timedelta)
    q_end = dt_to_ts(match_dt + context_timedelta)

    r = kibana4_uri_fmt.format(q_start=q_start,
                               q_end=q_end,
                               columns=columns,
                               query=query,
                               sort_field=sort_field,
                               order=order)

    return r
Exemplo n.º 3
0
    def get_all_terms(self, args):
        """ Performs a terms aggregation for each field to get every existing term. """
        self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port'])
        window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
        field_name = {"field": "", "size": 2147483647}  # Integer.MAX_VALUE
        query_template = {"aggs": {"values": {"terms": field_name}}}
        if args and args.start:
            end = ts_to_dt(args.start)
        else:
            end = ts_now()
        start = end - window_size
        if self.rules.get('use_strftime_index'):
            index = format_index(self.rules['index'], start, end)
        else:
            index = self.rules['index']
        time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}}
        query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
        query = {'aggs': {'filtered': query_template}}

        for field in self.fields:
            field_name['field'] = field
            res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout=50)
            if 'aggregations' in res:
                buckets = res['aggregations']['filtered']['values']['buckets']
                keys = [bucket['key'] for bucket in buckets]
                self.seen_values[field] = keys
                elastalert_logger.info('Found %s unique values for %s' % (len(keys), field))
            else:
                self.seen_values[field] = []
                elastalert_logger.info('Found no values for %s' % (field))
Exemplo n.º 4
0
 def find_recent_pending_alerts(self, time_limit):
     """ Queries writeback_es to find alerts that did not send
     and are newer than time_limit """
     query = {
         'query': {
             'query_string': {
                 'query': 'alert_sent:false'
             }
         },
         'filter': {
             'range': {
                 'alert_time': {
                     'from': dt_to_ts(ts_now() - time_limit),
                     'to': dt_to_ts(ts_now())
                 }
             }
         }
     }
     if self.writeback_es:
         try:
             res = self.writeback_es.search(index=self.writeback_index,
                                            doc_type='elastalert',
                                            body=query,
                                            size=1000)
             if res['hits']['hits']:
                 return res['hits']['hits']
         except:
             pass
     return []
Exemplo n.º 5
0
    def get_all_terms(self, args):
        """ Performs a terms aggregation for each field to get every existing term. """
        self.es = Elasticsearch(host=self.rules["es_host"], port=self.rules["es_port"])
        window_size = datetime.timedelta(**self.rules.get("terms_window_size", {"days": 30}))
        field_name = {"field": "", "size": 2147483647}  # Integer.MAX_VALUE
        query_template = {"aggs": {"values": {"terms": field_name}}}
        if args and args.start:
            end = ts_to_dt(args.start)
        else:
            end = ts_now()
        start = end - window_size
        if self.rules.get("use_strftime_index"):
            index = format_index(self.rules["index"], start, end)
        else:
            index = self.rules["index"]
        time_filter = {self.rules["timestamp_field"]: {"lte": dt_to_ts(end), "gte": dt_to_ts(start)}}
        query_template["filter"] = {"bool": {"must": [{"range": time_filter}]}}
        query = {"aggs": {"filtered": query_template}}

        for field in self.fields:
            field_name["field"] = field
            res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout=50)
            if "aggregations" in res:
                buckets = res["aggregations"]["filtered"]["values"]["buckets"]
                keys = [bucket["key"] for bucket in buckets]
                self.seen_values[field] = keys
                elastalert_logger.info("Found %s unique values for %s" % (len(keys), field))
            else:
                self.seen_values[field] = []
                elastalert_logger.info("Found no values for %s" % (field))
Exemplo n.º 6
0
    def get_all_terms(self, args):
        """ Performs a terms aggregation for each field to get every existing term. """
        self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port'], timeout=self.rules.get('es_conn_timeout', 50))
        window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
        field_name = {"field": "", "size": 2147483647}  # Integer.MAX_VALUE
        query_template = {"aggs": {"values": {"terms": field_name}}}
        if args and args.start:
            end = ts_to_dt(args.start)
        else:
            end = ts_now()
        start = end - window_size
        if self.rules.get('use_strftime_index'):
            index = format_index(self.rules['index'], start, end)
        else:
            index = self.rules['index']
        time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}}
        query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
        query = {'aggs': {'filtered': query_template}}

        for field in self.fields:
            # For composite keys, we will need to perform sub-aggregations
            if type(field) == list:
                level = query_template['aggs']
                # Iterate on each part of the composite key and add a sub aggs clause to the elastic search query
                for i, sub_field in enumerate(field):
                    level['values']['terms']['field'] = sub_field
                    if i < len(field) - 1:
                        # If we have more fields after the current one, then set up the next nested structure
                        level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
                        level = level['values']['aggs']
            else:
                # For non-composite keys, only a single agg is needed
                field_name['field'] = field
            res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
            if 'aggregations' in res:
                buckets = res['aggregations']['filtered']['values']['buckets']
                if type(field) == list:
                    # For composite keys, make the lookup based on all fields
                    # Make it a tuple since it can be hashed and used in dictionary lookups
                    self.seen_values[tuple(field)] = []
                    for bucket in buckets:
                        # We need to walk down the hierarchy and obtain the value at each level
                        self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
                    # If we don't have any results, it could either be because of the absence of any baseline data
                    # OR it may be because the composite key contained a non-primitive type.  Either way, give the
                    # end-users a heads up to help them debug what might be going on.
                    if not self.seen_values[tuple(field)]:
                        elastalert_logger.warning((
                            'No results were found from all sub-aggregations.  This can either indicate that there is '
                            'no baseline data OR that a non-primitive field was used in a composite key.'
                        ))
                else:
                    keys = [bucket['key'] for bucket in buckets]
                    self.seen_values[field] = keys
                    elastalert_logger.info('Found %s unique values for %s' % (len(keys), field))
            else:
                self.seen_values[field] = []
                elastalert_logger.info('Found no values for %s' % (field))
Exemplo n.º 7
0
    def get_all_terms(self, args):
        """ Performs a terms aggregation for each field to get every existing term. """
        self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port'], timeout=self.rules.get('es_conn_timeout', 50))
        window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
        field_name = {"field": "", "size": 2147483647}  # Integer.MAX_VALUE
        query_template = {"aggs": {"values": {"terms": field_name}}}
        if args and args.start:
            end = ts_to_dt(args.start)
        else:
            end = ts_now()
        start = end - window_size
        if self.rules.get('use_strftime_index'):
            index = format_index(self.rules['index'], start, end)
        else:
            index = self.rules['index']
        time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}}
        query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
        query = {'aggs': {'filtered': query_template}}

        for field in self.fields:
            # For composite keys, we will need to perform sub-aggregations
            if type(field) == list:
                level = query_template['aggs']
                # Iterate on each part of the composite key and add a sub aggs clause to the elastic search query
                for i, sub_field in enumerate(field):
                    level['values']['terms']['field'] = sub_field
                    if i < len(field) - 1:
                        # If we have more fields after the current one, then set up the next nested structure
                        level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
                        level = level['values']['aggs']
            else:
                # For non-composite keys, only a single agg is needed
                field_name['field'] = field
            res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
            if 'aggregations' in res:
                buckets = res['aggregations']['filtered']['values']['buckets']
                if type(field) == list:
                    # For composite keys, make the lookup based on all fields
                    # Make it a tuple since it can be hashed and used in dictionary lookups
                    self.seen_values[tuple(field)] = []
                    for bucket in buckets:
                        # We need to walk down the hierarchy and obtain the value at each level
                        self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
                    # If we don't have any results, it could either be because of the absence of any baseline data
                    # OR it may be because the composite key contained a non-primitive type.  Either way, give the
                    # end-users a heads up to help them debug what might be going on.
                    if not self.seen_values[tuple(field)]:
                        elastalert_logger.warning((
                            'No results were found from all sub-aggregations.  This can either indicate that there is '
                            'no baseline data OR that a non-primitive field was used in a composite key.'
                        ))
                else:
                    keys = [bucket['key'] for bucket in buckets]
                    self.seen_values[field] = keys
                    elastalert_logger.info('Found %s unique values for %s' % (len(keys), field))
            else:
                self.seen_values[field] = []
                elastalert_logger.info('Found no values for %s' % (field))
Exemplo n.º 8
0
    def silence(self):
        """ Silence an alert for a period of time. --silence and --rule must be passed as args. """
        if self.debug:
            logging.error('--silence not compatible with --debug')
            exit(1)

        if not self.args.rule:
            logging.error('--silence must be used with --rule')
            exit(1)

        # With --rule, self.rules will only contain that specific rule
        rule_name = self.rules[0]['name']

        try:
            unit, num = self.args.silence.split('=')
            silence_time = datetime.timedelta(**{unit: int(num)})
            # Double conversion to add tzinfo
            silence_ts = ts_to_dt(dt_to_ts(silence_time + datetime.datetime.utcnow()))
        except (ValueError, TypeError):
            logging.error('%s is not a valid time period' % (self.args.silence))
            exit(1)

        if not self.set_realert(rule_name, silence_ts, 0):
            logging.error('Failed to save silence command to elasticsearch')
            exit(1)

        logging.info('Success. %s will be silenced until %s' % (rule_name, silence_ts))
Exemplo n.º 9
0
 def get_match_str(self, match):
     lt = self.rules.get('use_local_time')
     starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
     endtime = pretty_ts(match[self.ts_field], lt)
     message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'],
                                                                      starttime,
                                                                      endtime)
     return message
Exemplo n.º 10
0
 def get_match_str(self, match):
     lt = self.rules.get('use_local_time')
     starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
     endtime = pretty_ts(match[self.ts_field], lt)
     message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'],
                                                                      starttime,
                                                                      endtime)
     return message
Exemplo n.º 11
0
 def find_recent_pending_alerts(self, time_limit):
     """ Queries writeback_es to find alerts that did not send
     and are newer than time_limit """
     query = {'query': {'query_string': {'query': 'alert_sent:false'}},
              'filter': {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit),
                                                  'to': dt_to_ts(ts_now())}}}}
     if self.writeback_es:
         try:
             res = self.writeback_es.search(index=self.writeback_index,
                                            doc_type='elastalert',
                                            body=query,
                                            size=1000)
             if res['hits']['hits']:
                 return res['hits']['hits']
         except:
             pass
     return []
Exemplo n.º 12
0
 def get_match_str(self, match):
     lt = self.rules.get('use_local_time')
     starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
     endtime = pretty_ts(match[self.ts_field], lt)
     message = ('A maximum of %d unique %s(s) occurred since last alert or '
                'between %s and %s\n\n' % (self.rules['max_cardinality'],
                                           self.rules['cardinality_field'],
                                           starttime, endtime))
     return message
Exemplo n.º 13
0
 def get_match_str(self, match):
     ts = match[self.rules['timestamp_field']]
     lt = self.rules.get('use_local_time')
     message = 'An abnormally low number of events occurred around %s.\n' % (pretty_ts(ts, lt))
     message += 'Between %s and %s, there were less than %s events.\n\n' % (
         pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt),
         pretty_ts(ts, lt),
         self.rules['threshold'])
     return message
Exemplo n.º 14
0
 def get_match_str(self, match):
     lt = self.rules.get('use_local_time')
     match_ts = lookup_es_key(match, self.ts_field)
     starttime = pretty_ts(dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt)
     message = 'At least %d(%d) events occurred between %s and %s\n\n' % (self.rules['num_events'],
                                                                      match['count'],
                                                                      starttime,
                                                                      endtime)
     return message
Exemplo n.º 15
0
 def get_match_str(self, match):
     ts = match[self.rules['timestamp_field']]
     lt = self.rules.get('use_local_time')
     message = 'An abnormally low number of events occurred around %s.\n' % (
         pretty_ts(ts, lt))
     message += 'Between %s and %s, there were less than %s events.\n\n' % (
         pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']),
                   lt), pretty_ts(ts, lt), self.rules['threshold'])
     return message
Exemplo n.º 16
0
    def writeback(self, doc_type, body):
        # Convert any datetime objects to timestamps
        for key in body.keys():
            if isinstance(body[key], datetime.datetime):
                body[key] = dt_to_ts(body[key])
        if self.debug:
            elastalert_logger.info("Skipping writing to ES: %s" % (body))
            return None

        if '@timestamp' not in body:
            body['@timestamp'] = dt_to_ts(ts_now())
        if self.writeback_es:
            try:
                res = self.writeback_es.create(index=self.writeback_index,
                                               doc_type=doc_type, body=body)
                return res
            except ElasticsearchException as e:
                logging.exception("Error writing alert info to elasticsearch: %s" % (e))
                self.writeback_es = None
Exemplo n.º 17
0
    def get_all_terms(self, args):
        """ Performs a terms aggregation for each field to get every existing term. """
        self.es = Elasticsearch(host=self.rules['es_host'],
                                port=self.rules['es_port'],
                                use_ssl=self.rule['use_ssl'],
                                timeout=self.rules.get('es_conn_timeout', 50))
        window_size = datetime.timedelta(
            **self.rules.get('terms_window_size', {'days': 30}))
        field_name = {"field": "", "size": 2147483647}  # Integer.MAX_VALUE
        query_template = {"aggs": {"values": {"terms": field_name}}}
        if args and args.start:
            end = ts_to_dt(args.start)
        else:
            end = ts_now()
        start = end - window_size
        if self.rules.get('use_strftime_index'):
            index = format_index(self.rules['index'], start, end)
        else:
            index = self.rules['index']
        time_filter = {
            self.rules['timestamp_field']: {
                'lte': dt_to_ts(end),
                'gte': dt_to_ts(start)
            }
        }
        query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
        query = {'aggs': {'filtered': query_template}}

        for field in self.fields:
            field_name['field'] = field
            res = self.es.search(body=query,
                                 index=index,
                                 ignore_unavailable=True,
                                 timeout='50s')
            if 'aggregations' in res:
                buckets = res['aggregations']['filtered']['values']['buckets']
                keys = [bucket['key'] for bucket in buckets]
                self.seen_values[field] = keys
                elastalert_logger.info('Found %s unique values for %s' %
                                       (len(keys), field))
            else:
                self.seen_values[field] = []
                elastalert_logger.info('Found no values for %s' % (field))
Exemplo n.º 18
0
 def get_match_str(self, match):
     lt = self.rules.get('use_local_time')
     starttime = pretty_ts(
         dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']),
         lt)
     endtime = pretty_ts(match[self.ts_field], lt)
     message = ('A maximum of %d unique %s(s) occurred since last alert or '
                'between %s and %s\n\n' %
                (self.rules['max_cardinality'],
                 self.rules['cardinality_field'], starttime, endtime))
     return message
Exemplo n.º 19
0
    def add_match(self, event):
        """ This function is called on all matching events. Rules use it to add
        extra information about the context of a match. Event is a dictionary
        containing terms directly from elasticsearch and alerts will report
        all of the information.

        :param event: The matching event, a dictionary of terms.
        """
        if '@timestamp' in event:
            event['@timestamp'] = dt_to_ts(event['@timestamp'])
        self.matches.append(event)
Exemplo n.º 20
0
 def get_match_str(self, match):
     lt = self.rules.get("use_local_time")
     starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules["timeframe"]), lt)
     endtime = pretty_ts(match[self.ts_field], lt)
     message = "A maximum of %d unique %s(s) occurred since last alert or " "between %s and %s\n\n" % (
         self.rules["max_cardinality"],
         self.rules["cardinality_field"],
         starttime,
         endtime,
     )
     return message
Exemplo n.º 21
0
    def add_match(self, event):
        """ This function is called on all matching events. Rules use it to add
        extra information about the context of a match. Event is a dictionary
        containing terms directly from elasticsearch and alerts will report
        all of the information.

        :param event: The matching event, a dictionary of terms.
        """
        if '@timestamp' in event:
            event['@timestamp'] = dt_to_ts(event['@timestamp'])
        self.matches.append(event)
Exemplo n.º 22
0
    def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp'):
        """ Returns a query dict that will apply a list of filters, filter by
        start and end time, and sort results by timestamp.

        :param filters: A list of elasticsearch filters to use.
        :param starttime: A timestamp to use as the start time of the query.
        :param endtime: A timestamp to use as the end time of the query.
        :param sort: If true, sort results by timestamp. (Default True)
        :return: A query dictionary to pass to elasticsearch.
        """
        starttime = dt_to_ts(starttime)
        endtime = dt_to_ts(endtime)
        filters = copy.copy(filters)
        query = {'filter': {'bool': {'must': filters}}}
        if starttime and endtime:
            query['filter']['bool']['must'].append({'range': {timestamp_field: {'from': starttime,
                                                                                'to': endtime}}})
        if sort:
            query['sort'] = [{timestamp_field: {'order': 'asc'}}]
        return query
Exemplo n.º 23
0
 def get_match_str(self, match):
     lt = self.rules.get('use_local_time')
     match_ts = lookup_es_key(match, self.ts_field)
     starttime = pretty_ts(
         dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt)
     endtime = pretty_ts(match_ts, lt)
     if 'email_type' in self.rules and self.rules['email_type'] == "html":
         message = ''
     else:
         message = 'At least %d events occurred between %s and %s\n\n' % (
             self.rules['num_events'], starttime, endtime)
     return message
Exemplo n.º 24
0
    def writeback(self, doc_type, body):
        # Convert any datetime objects to timestamps
        for key in body.keys():
            if isinstance(body[key], datetime.datetime):
                body[key] = dt_to_ts(body[key])
        if self.debug:
            elastalert_logger.info("Skipping writing to ES: %s" % (body))
            return None

        if '@timestamp' not in body:
            body['@timestamp'] = dt_to_ts(ts_now())
        if self.writeback_es:
            try:
                res = self.writeback_es.create(index=self.writeback_index,
                                               doc_type=doc_type,
                                               body=body)
                return res
            except ElasticsearchException as e:
                logging.exception(
                    "Error writing alert info to elasticsearch: %s" % (e))
                self.writeback_es = None
Exemplo n.º 25
0
    def add_match(self, event):
        """ This function is called on all matching events. Rules use it to add
        extra information about the context of a match. Event is a dictionary
        containing terms directly from elasticsearch and alerts will report
        all of the information.

        :param event: The matching event, a dictionary of terms.
        """
        # Convert datetime's back to timestamps
        ts = self.rules.get('timestamp_field')
        if ts in event:
            event[ts] = dt_to_ts(event[ts])
        self.matches.append(event)
Exemplo n.º 26
0
    def add_match(self, event):
        """ This function is called on all matching events. Rules use it to add
        extra information about the context of a match. Event is a dictionary
        containing terms directly from elasticsearch and alerts will report
        all of the information.

        :param event: The matching event, a dictionary of terms.
        """
        # Convert datetime's back to timestamps
        ts = self.rules.get('timestamp_field')
        if ts in event:
            event[ts] = dt_to_ts(event[ts])
        self.matches.append(event)
Exemplo n.º 27
0
    def find_recent_pending_alerts(self, time_limit):
        """ Queries writeback_es to find alerts that did not send
        and are newer than time_limit """

        # XXX only fetches 1000 results. If limit is reached, next loop will catch them
        # unless there is constantly more than 1000 alerts to send.

        # Fetch recent, unsent alerts that aren't part of an aggregate, earlier alerts first.
        query = {
            'query': {
                'query_string': {
                    'query': '!_exists_:aggregate_id AND alert_sent:false'
                }
            },
            'filter': {
                'range': {
                    'alert_time': {
                        'from': dt_to_ts(ts_now() - time_limit),
                        'to': dt_to_ts(ts_now())
                    }
                }
            },
            'sort': {
                'alert_time': {
                    'order': 'asc'
                }
            }
        }
        if self.writeback_es:
            try:
                res = self.writeback_es.search(index=self.writeback_index,
                                               doc_type='elastalert',
                                               body=query,
                                               size=1000)
                if res['hits']['hits']:
                    return res['hits']['hits']
            except:  # TODO: Give this a more relevant exception, try:except: is evil.
                pass
        return []
Exemplo n.º 28
0
    def find_recent_pending_alerts(self, time_limit):
        """ Queries writeback_es to find alerts that did not send
        and are newer than time_limit """

        # XXX only fetches 1000 results. If limit is reached, next loop will catch them
        # unless there is constantly more than 1000 alerts to send.

        # Fetch recent, unsent alerts that aren't part of an aggregate, earlier alerts first.
        query = {'query': {'query_string': {'query': '!_exists_:aggregate_id AND alert_sent:false'}},
                 'filter': {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit),
                                                     'to': dt_to_ts(ts_now())}}},
                 'sort': {'alert_time': {'order': 'asc'}}}
        if self.writeback_es:
            try:
                res = self.writeback_es.search(index=self.writeback_index,
                                               doc_type='elastalert',
                                               body=query,
                                               size=1000)
                if res['hits']['hits']:
                    return res['hits']['hits']
            except:  # TODO: Give this a more relevant exception, try:except: is evil.
                pass
        return []
Exemplo n.º 29
0
    def get_match_str(self, match):
        ts = match[self.rules['timestamp_field']]
        lt = self.rules.get('use_local_time')

        try:
            match_value = self.match_value[-1][:5]
        except:
            match_value = []

        message =  "Between %s and %s\n" % (pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt), pretty_ts(ts, lt))
        message += "%s(%s) %s %s\nmatch value:\n\t%s...\n\n" % (
                self.rules['stat'],
                self.rules['stat_field'],
                self.rules['stat_type'],
                self.rules['threshold'],
                '\n\t'.join(match_value)
                ) 
        return message
Exemplo n.º 30
0
    def add_match(self, event):
        """ This function is called on all matching events. This rule uses it
        to add events related to the matched event, if requested by
        configuration of the rule (related_events flag)

        :param event: The matching event, a dictionary of terms.
        """
        # Convert datetime's back to timestamps
        ts = self.rules.get('timestamp_field')
        if ts in event:
            event[ts] = dt_to_ts(event[ts])

        if self.attach_related and len(self.matches):
            # we need to add related events, and matches array already
            # has a field - then add a given event into related_events array
            if not self.matches[0].get('related_events'):
                self.matches[0]['related_events'] = []

            self.matches[0]['related_events'].append(event)
        else:
            self.matches.append(event)
Exemplo n.º 31
0
    def silence(self):
        """ Silence an alert for a period of time. --silence and --rule must be passed as args. """
        if not self.args.rule:
            logging.error('--silence must be used with --rule')
            exit(1)

        # With --rule, self.rules will only contain that specific rule
        rule_name = self.rules[0]['name']

        try:
            unit, num = self.args.silence.split('=')
            silence_time = datetime.timedelta(**{unit: int(num)})
            silence_ts = dt_to_ts(silence_time + datetime.datetime.utcnow())
        except (ValueError, TypeError):
            logging.error('%s is not a valid time period' %
                          (self.args.silence))
            exit(1)

        if not self.set_realert(rule_name, silence_ts):
            logging.error('Failed to save silence command to elasticsearch')
            exit(1)

        logging.info('Success. %s will be silenced until %s' %
                     (rule_name, silence_ts))
Exemplo n.º 32
0
    def run_rule(self, rule, endtime, starttime=None):
        """ Run a rule for a given time period, including querying and alerting on results.

        :param rule: The rule configuration.
        :param starttime: The earliest timestamp to query.
        :param endtime: The latest timestamp to query.
        :return: The number of matches that the rule produced.
        """
        run_start = time.time()
        self.current_es = Elasticsearch(host=rule['es_host'],
                                        port=rule['es_port'])
        self.current_es_addr = (rule['es_host'], rule['es_port'])

        # If there are pending aggregate matches, try processing them
        for x in range(len(rule['agg_matches'])):
            match = rule['agg_matches'].pop()
            self.add_aggregated_alert(match, rule)

        # Start from provided time if it's given
        if starttime:
            rule['starttime'] = starttime
        else:
            self.set_starttime(rule, endtime)
        rule['original_starttime'] = rule['starttime']

        # Don't run if starttime was set to the future
        if ts_now() <= rule['starttime']:
            logging.warning(
                "Attempted to use query start time in the future (%s), sleeping instead"
                % (starttime))
            return 0

        # Run the rule
        # If querying over a large time period, split it up into chunks
        self.num_hits = 0
        tmp_endtime = endtime
        buffer_time = rule.get('buffer_time', self.buffer_time)
        while endtime - rule['starttime'] > buffer_time:
            tmp_endtime = rule['starttime'] + self.run_every
            if not self.run_query(rule, rule['starttime'], tmp_endtime):
                return 0
            rule['starttime'] = tmp_endtime
        if not self.run_query(rule, rule['starttime'], endtime):
            return 0

        rule['type'].garbage_collect(endtime)

        # Process any new matches
        num_matches = len(rule['type'].matches)
        while rule['type'].matches:
            match = rule['type'].matches.pop(0)

            # If realert is set, silence the rule for that duration
            # Silence is cached by query_key, if it exists
            # Default realert time is 0 seconds

            # concatenate query_key (or none) with rule_name to form silence_cache key
            if 'query_key' in rule:
                try:
                    key = '.' + match[rule['query_key']]
                except KeyError:
                    # Some matches may not have a query key
                    key = ''
            else:
                key = ''

            if self.is_silenced(rule['name'] + key) or self.is_silenced(
                    rule['name']):
                logging.info('Ignoring match for silenced rule %s%s' %
                             (rule['name'], key))
                continue

            if rule['realert']:
                self.set_realert(
                    rule['name'] + key,
                    dt_to_ts(datetime.datetime.utcnow() + rule['realert']))

            # If no aggregation, alert immediately
            if not rule['aggregation']:
                self.alert([match], rule)
                continue

            # Add it as an aggregated match
            self.add_aggregated_alert(match, rule)

        time_taken = time.time() - run_start
        # Write to ES that we've run this rule against this time period
        body = {
            'rule_name': rule['name'],
            'endtime': endtime,
            'starttime': rule['starttime'],
            'matches': num_matches,
            'hits': self.num_hits,
            '@timestamp': ts_now(),
            'time_taken': time_taken
        }
        self.writeback('elastalert_status', body)

        return num_matches