Esempio n. 1
0
    def __init__(self, context):
        """Initializer

        Args:
            context (dict): An AWS context object which provides metadata on the currently
                executing lambda function.
        """
        # Load the config. Validation occurs during load, which will
        # raise exceptions on any ConfigError
        StreamAlert.config = StreamAlert.config or config.load_config(validate=True)

        # Load the environment from the context arn
        self.env = config.parse_lambda_arn(context.invoked_function_arn)

        # Instantiate the send_alerts here to handle sending the triggered alerts to the
        # alert processor
        self.alert_forwarder = AlertForwarder()

        # Instantiate a classifier that is used for this run
        self.classifier = StreamClassifier(config=self.config)

        self._failed_record_count = 0
        self._processed_record_count = 0
        self._processed_size = 0
        self._alerts = []

        rule_import_paths = [item for location in {'rule_locations', 'matcher_locations'}
                             for item in self.config['global']['general'][location]]

        # Create an instance of the RulesEngine class that gets cached in the
        # StreamAlert class as an instance property
        self._rules_engine = RulesEngine(self.config, *rule_import_paths)

        # Firehose client attribute
        self._firehose_client = None
Esempio n. 2
0
    def test_processor(self, alerts):
        """Perform integration tests for the 'alert' Lambda function. Alerts
        that are fed through this are resultant from the rule processor tests.
        In order to end up here, the log must be configured to trigger a rule
        that would result in an alert being sent.

        Args:
            alerts (list): list of alerts to be processed that have been fed in
                from the rule processor.

        Return:
            bool: status of the alert processor dispatching
        """
        # Set the logger level to info so its not too noisy
        StreamOutput.LOGGER.setLevel(logging.ERROR)
        for alert in alerts:
            if self.context.mocked:
                self.setup_outputs(alert)

            # Convert alert to the Dynamo event format expected by the alert processor
            event = AlertForwarder.dynamo_record(alert)
            event['Outputs'] = list(event['Outputs'])

            for output, current_test_passed in StreamOutput.handler(event, self.context).items():
                self.all_tests_passed = current_test_passed and self.all_tests_passed
                service, descriptor = output.split(':')
                message = 'sending alert to \'{}\''.format(descriptor)
                report_output(current_test_passed, [
                    '',
                    'alert',
                    service,
                    message
                ])

                self._alert_fail_pass[current_test_passed] += 1
Esempio n. 3
0
class TestAlertForwarder(object):
    """Test class for AlertForwarder"""
    # pylint: disable=no-self-use,protected-access

    @patch.dict(os.environ, {'ALERTS_TABLE': _ALERT_TABLE})
    def setup(self):
        # pylint: disable=attribute-defined-outside-init
        self.forwarder = AlertForwarder(load_env(get_mock_context()))

    def test_alert_item(self):
        """AlertForwarder - Convert Alert to Dynamo Item"""
        item = AlertForwarder.dynamo_record(_MOCK_ALERT)
        expected = {
            'RuleName': 'test_name',
            'AlertID': 'test-uuid',
            'Created': ANY,
            'Cluster': _CLUSTER,
            'LogSource': 'test_source',
            'LogType': 'test_type',
            'RuleDescription': 'Test Description',
            'SourceEntity': 'test_entity',
            'SourceService': 'test_service',
            'Outputs': {'out1:here', 'out2:there'},  # Duplicates are ignored
            'Record': '{"key":"value"}'
        }
        assert_equal(expected, item)

    @mock_dynamodb2()
    @patch('stream_alert.rule_processor.alert_forward.LOGGER')
    def test_send_alerts(self, mock_logger):
        """AlertForwarder - Send Alerts"""
        self.forwarder.send_alerts([_MOCK_ALERT] * 2)
        mock_logger.assert_has_calls([
            call.info('Successfully sent %d alert(s) to dynamo:%s', 2, _ALERT_TABLE)
        ])

    @patch.object(AlertForwarder, '_send_to_dynamo')
    @patch('stream_alert.rule_processor.alert_forward.LOGGER')
    def test_send_alerts_dynamo_exception(self, mock_logger, mock_dynamo):
        """AlertForwarder - Send Alerts with Dynamo Exception"""
        mock_dynamo.side_effect = ClientError({}, 'batch_write')
        self.forwarder.send_alerts(None)

        mock_dynamo.assert_called_once_with(None)
        mock_logger.assert_has_calls([
            call.exception('Error saving alerts to Dynamo')
        ])
Esempio n. 4
0
class TestAlertForwarder(object):
    """Test class for AlertForwarder"""
    # pylint: disable=attribute-defined-outside-init,protected-access

    @patch('stream_alert.rule_processor.alert_forward.AlertTable', MagicMock())
    @patch.dict(os.environ, {'ALERTS_TABLE': _ALERTS_TABLE})
    def setup(self):
        self.forwarder = AlertForwarder()

    @patch('stream_alert.rule_processor.alert_forward.LOGGER')
    def test_send_alerts(self, mock_logger):
        """AlertForwarder - Send Alerts"""
        self.forwarder.send_alerts([1, 2, 3])
        self.forwarder._table.add_alerts.assert_called_once_with(  # pylint: disable=no-member
            [1, 2, 3])
        mock_logger.info.assert_called_once()

    @patch('stream_alert.rule_processor.alert_forward.LOGGER')
    def test_send_alerts_dynamo_exception(self, mock_logger):
        """AlertForwarder - ClientError When Sending Alerts"""
        self.forwarder._table.add_alerts.side_effect = ClientError(
            {}, 'batch_write')
        self.forwarder.send_alerts([])
        mock_logger.exception.assert_called_once()
Esempio n. 5
0
 def test_alert_item(self):
     """AlertForwarder - Convert Alert to Dynamo Item"""
     item = AlertForwarder.dynamo_record(_MOCK_ALERT)
     expected = {
         'RuleName': 'test_name',
         'AlertID': 'test-uuid',
         'Created': ANY,
         'Cluster': _CLUSTER,
         'LogSource': 'test_source',
         'LogType': 'test_type',
         'RuleDescription': 'Test Description',
         'SourceEntity': 'test_entity',
         'SourceService': 'test_service',
         'Outputs': {'out1:here', 'out2:there'},  # Duplicates are ignored
         'Record': '{"key":"value"}'
     }
     assert_equal(expected, item)
Esempio n. 6
0
 def setup(self):
     self.forwarder = AlertForwarder()
Esempio n. 7
0
class StreamAlert(object):
    """Wrapper class for handling StreamAlert classification and processing"""
    config = {}

    def __init__(self, context):
        """Initializer

        Args:
            context (dict): An AWS context object which provides metadata on the currently
                executing lambda function.
        """
        # Load the config. Validation occurs during load, which will
        # raise exceptions on any ConfigErrors
        StreamAlert.config = StreamAlert.config or load_config()

        # Load the environment from the context arn
        self.env = load_env(context)

        # Instantiate the send_alerts here to handle sending the triggered alerts to the
        # alert processor
        self.alert_forwarder = AlertForwarder()

        # Instantiate a classifier that is used for this run
        self.classifier = StreamClassifier(config=self.config)

        self._failed_record_count = 0
        self._processed_record_count = 0
        self._processed_size = 0
        self._alerts = []

        rule_import_paths = [
            item for location in {'rule_locations', 'matcher_locations'}
            for item in self.config['global']['general'][location]
        ]

        # Create an instance of the StreamRules class that gets cached in the
        # StreamAlert class as an instance property
        self._rules_engine = RulesEngine(self.config, *rule_import_paths)

        # Firehose client attribute
        self._firehose_client = None

    def run(self, event):
        """StreamAlert Lambda function handler.

        Loads the configuration for the StreamAlert function which contains
        available data sources, log schemas, normalized types, and outputs.
        Classifies logs sent into a parsed type.
        Matches records against rules.

        Args:
            event (dict): An AWS event mapped to a specific source/entity
                containing data read by Lambda.

        Returns:
            bool: True if all logs being parsed match a schema
        """
        records = event.get('Records', [])
        LOGGER.debug('Number of incoming records: %d', len(records))
        if not records:
            return False

        firehose_config = self.config['global'].get('infrastructure',
                                                    {}).get('firehose', {})
        if firehose_config.get('enabled'):
            self._firehose_client = StreamAlertFirehose(
                self.env['lambda_region'], firehose_config,
                self.config['logs'])

        payload_with_normalized_records = []
        for raw_record in records:
            # Get the service and entity from the payload. If the service/entity
            # is not in our config, log and error and go onto the next record
            service, entity = self.classifier.extract_service_and_entity(
                raw_record)
            if not service:
                LOGGER.error(
                    'No valid service found in payload\'s raw record. Skipping '
                    'record: %s', raw_record)
                continue

            if not entity:
                LOGGER.error(
                    'Unable to extract entity from payload\'s raw record for service %s. '
                    'Skipping record: %s', service, raw_record)
                continue

            # Cache the log sources for this service and entity on the classifier
            if not self.classifier.load_sources(service, entity):
                continue

            # Create the StreamPayload to use for encapsulating parsed info
            payload = load_stream_payload(service, entity, raw_record)
            if not payload:
                continue

            payload_with_normalized_records.extend(
                self._process_alerts(payload))

        # Log normalized records metric
        MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.NORMALIZED_RECORDS,
                                len(payload_with_normalized_records))

        # Apply Threat Intel to normalized records in the end of Rule Processor invocation
        record_alerts = self._rules_engine.threat_intel_match(
            payload_with_normalized_records)
        self._alerts.extend(record_alerts)
        if record_alerts:
            self.alert_forwarder.send_alerts(record_alerts)

        MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.TOTAL_RECORDS,
                                self._processed_record_count)

        MetricLogger.log_metric(FUNCTION_NAME,
                                MetricLogger.TOTAL_PROCESSED_SIZE,
                                self._processed_size)

        LOGGER.debug('Invalid record count: %d', self._failed_record_count)

        MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.FAILED_PARSES,
                                self._failed_record_count)

        LOGGER.debug('%s alerts triggered', len(self._alerts))

        MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.TRIGGERED_ALERTS,
                                len(self._alerts))

        # Check if debugging logging is on before json dumping alerts since
        # this can be time consuming if there are a lot of alerts
        if self._alerts and LOGGER.isEnabledFor(LOG_LEVEL_DEBUG):
            LOGGER.debug(
                'Alerts:\n%s',
                json.dumps([alert.output_dict() for alert in self._alerts],
                           indent=2,
                           sort_keys=True))

        if self._firehose_client:
            self._firehose_client.send()

        # Only log rule info here if this is not running tests
        # During testing, this gets logged at the end and printing here could be confusing
        # since stress testing calls this method multiple times
        if self.env['lambda_alias'] != 'development':
            stats.print_rule_stats(True)

        return self._failed_record_count == 0

    @property
    def alerts(self):
        """Returns list of Alert instances (useful for testing)."""
        return self._alerts

    def _process_alerts(self, payload):
        """Run the record through the rules, saving any alerts and forwarding them to Dynamo.

        Args:
            payload (StreamPayload): StreamAlert payload object being processed
        """
        payload_with_normalized_records = []
        for record in payload.pre_parse():
            # Increment the processed size using the length of this record
            self._processed_size += len(record.pre_parsed_record)
            self.classifier.classify_record(record)
            if not record.valid:
                if self.env['lambda_alias'] != 'development':
                    LOGGER.error(
                        'Record does not match any defined schemas: %s\n%s',
                        record, record.pre_parsed_record)

                self._failed_record_count += 1
                continue

            # Increment the total processed records to get an accurate assessment of throughput
            self._processed_record_count += len(record.records)

            LOGGER.debug(
                'Classified and Parsed Payload: <Valid: %s, Log Source: %s, Entity: %s>',
                record.valid, record.log_source, record.entity)

            record_alerts, normalized_records = self._rules_engine.run(record)

            payload_with_normalized_records.extend(normalized_records)

            LOGGER.debug(
                'Processed %d valid record(s) that resulted in %d alert(s).',
                len(payload.records), len(record_alerts))

            # Add all parsed records to the categorized payload dict only if Firehose is enabled
            if self._firehose_client:
                # Only send payloads with enabled log sources
                if self._firehose_client.enabled_log_source(
                        payload.log_source):
                    self._firehose_client.categorized_payloads[
                        payload.log_source].extend(payload.records)

            if not record_alerts:
                continue

            # Extend the list of alerts with any new ones so they can be returned
            self._alerts.extend(record_alerts)

            self.alert_forwarder.send_alerts(record_alerts)

        return payload_with_normalized_records
Esempio n. 8
0
 def setup(self):
     # pylint: disable=attribute-defined-outside-init
     self.forwarder = AlertForwarder(load_env(get_mock_context()))