Beispiel #1
0
    def run(self, records):
        """Run rules against the records sent from the Classifier function

        Args:
            records (list): Dictionaries of records sent from the classifier function
                Record Format:
                    {
                        'cluster': 'prod',
                        'log_schema_type': 'cloudwatch:cloudtrail',
                        'record': {
                            'key': 'value'
                        },
                        'service': 'kinesis',
                        'resource': 'kinesis_stream_name'
                        'data_type': 'json'
                    }

        Returns:
            list: Alerts that have been triggered by this data
        """
        LOGGER.info('Processing %d records', len(records))

        # Extract any threat intelligence matches from the records
        self._extract_threat_intel(records)

        alerts = []
        for payload in records:
            rules = Rule.rules_for_log_type(payload['log_schema_type'])
            if not rules:
                LOGGER.debug('No rules to process for %s', payload)
                continue

            for rule in rules:
                # subkey check
                if not self._process_subkeys(payload['record'], rule):
                    continue

                # matcher check
                if not rule.check_matchers(payload['record']):
                    continue

                alert = self._rule_analysis(payload, rule)
                if alert:
                    alerts.append(alert)

        self._alert_forwarder.send_alerts(alerts)

        # Only log rule info here if this is deployed in Lambda
        # During testing, this gets logged at the end and printing here could be confusing
        # since stress testing calls this method multiple times
        if self._in_lambda:
            LOGGER.info(get_rule_stats(True))

        MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.TRIGGERED_ALERTS,
                                len(alerts))

        return alerts
Beispiel #2
0
    def run(self, input_payload):
        """Process rules on a record.

        Gather a list of rules based on the record's datasource type.
        For each rule, evaluate the record through all listed matchers
        and the rule itself to determine if a match occurs.

        Returns:
            A tuple(list, list).
                First return is a list of Alert instances.
                Second return is a list of payload instance with normalized records.
        """
        alerts = []
        # store normalized records for future process in Threat Intel
        normalized_records = []
        payload = copy(input_payload)

        rules = Rule.rules_for_log_type(payload.log_source)

        if not rules:
            LOGGER.debug('No rules to process for %s', payload)
            return alerts, normalized_records
        # fetch all datatypes info from rules and run data normalization before
        # rule match to improve performance. So one record will be normalized only
        # once by all normalized datatypes from all rules.
        datatypes_set = {
            datatype
            for rule in rules if rule.datatypes for datatype in rule.datatypes
        }

        if datatypes_set:
            for record in payload.records:
                self._apply_normalization(record, normalized_records,
                                          datatypes_set, payload)

        for record in payload.records:
            for rule in rules:
                # subkey check
                if not self.process_subkeys(record, payload.type, rule):
                    continue

                # matcher check
                if not rule.check_matchers(record):
                    continue

                self.rule_analysis(record, rule, payload, alerts)

        return alerts, normalized_records
Beispiel #3
0
    def run(self, input_payload):
        """Process rules on a record.

        Gather a list of rules based on the record's datasource type.
        For each rule, evaluate the record through all listed matchers
        and the rule itself to determine if a match occurs.

        Returns:
            A tuple(list, list).
                First return is a list of Alert instances.
                Second return is a list of payload instance with normalized records.
        """
        alerts = []
        # store normalized records for future process in Threat Intel
        normalized_records = []
        payload = copy(input_payload)

        rules = Rule.rules_for_log_type(payload.log_source)

        if not rules:
            LOGGER.debug('No rules to process for %s', payload)
            return alerts, normalized_records

        for record in payload.records:
            for rule in rules:
                # subkey check
                if not self.process_subkeys(record, payload.type, rule):
                    continue

                # matcher check
                if not rule.check_matchers(record):
                    continue

                if rule.datatypes:
                    # When rule 'datatypes' option is defined, rules engine will
                    # apply data normalization to all the record.
                    record_copy = self._apply_normalization(
                        record, normalized_records, rule, payload)
                    self.rule_analysis(record_copy, rule, payload, alerts)
                else:
                    self.rule_analysis(record, rule, payload, alerts)

        return alerts, normalized_records