class StreamAlert(object): """Wrapper class for handling StreamAlert classification and processing""" config = {} def __init__(self, context): """Initializer Args: context (dict): An AWS context object which provides metadata on the currently executing lambda function. """ # Load the config. Validation occurs during load, which will # raise exceptions on any ConfigError StreamAlert.config = StreamAlert.config or config.load_config( validate=True) # Load the environment from the context arn self.env = config.parse_lambda_arn(context.invoked_function_arn) # Instantiate the send_alerts here to handle sending the triggered alerts to the # alert processor self.alert_forwarder = AlertForwarder() # Instantiate a classifier that is used for this run self.classifier = StreamClassifier(config=self.config) self._failed_record_count = 0 self._processed_record_count = 0 self._processed_size = 0 self._alerts = [] rule_import_paths = [ item for location in {'rule_locations', 'matcher_locations'} for item in self.config['global']['general'][location] ] # Create an instance of the RulesEngine class that gets cached in the # StreamAlert class as an instance property self._rules_engine = RulesEngine(self.config, *rule_import_paths) # Firehose client attribute self._firehose_client = None def run(self, event): """StreamAlert Lambda function handler. Loads the configuration for the StreamAlert function which contains available data sources, log schemas, normalized types, and outputs. Classifies logs sent into a parsed type. Matches records against rules. Args: event (dict): An AWS event mapped to a specific source/entity containing data read by Lambda. Returns: bool: True if all logs being parsed match a schema """ records = event.get('Records', []) LOGGER.debug('Number of incoming records: %d', len(records)) if not records: return False firehose_config = self.config['global'].get('infrastructure', {}).get('firehose', {}) if firehose_config.get('enabled'): self._firehose_client = FirehoseClient( self.env['region'], firehose_config=firehose_config, log_sources=self.config['logs']) payload_with_normalized_records = [] for raw_record in records: # Get the service and entity from the payload. If the service/entity # is not in our config, log and error and go onto the next record service, entity = self.classifier.extract_service_and_entity( raw_record) if not service: LOGGER.error( 'No valid service found in payload\'s raw record. Skipping ' 'record: %s', raw_record) continue if not entity: LOGGER.error( 'Unable to extract entity from payload\'s raw record for service %s. ' 'Skipping record: %s', service, raw_record) continue # Cache the log sources for this service and entity on the classifier if not self.classifier.load_sources(service, entity): continue # Create the StreamPayload to use for encapsulating parsed info payload = load_stream_payload(service, entity, raw_record) if not payload: continue payload_with_normalized_records.extend( self._process_alerts(payload)) # Log normalized records metric MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.NORMALIZED_RECORDS, len(payload_with_normalized_records)) # Apply Threat Intel to normalized records in the end of Rule Processor invocation record_alerts = self._rules_engine.threat_intel_match( payload_with_normalized_records) self._alerts.extend(record_alerts) if record_alerts: self.alert_forwarder.send_alerts(record_alerts) MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.TOTAL_RECORDS, self._processed_record_count) MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.TOTAL_PROCESSED_SIZE, self._processed_size) LOGGER.debug('Invalid record count: %d', self._failed_record_count) MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.FAILED_PARSES, self._failed_record_count) LOGGER.debug('%s alerts triggered', len(self._alerts)) MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.TRIGGERED_ALERTS, len(self._alerts)) # Check if debugging logging is on before json dumping alerts since # this can be time consuming if there are a lot of alerts if self._alerts and LOGGER_DEBUG_ENABLED: LOGGER.debug( 'Alerts:\n%s', json.dumps([alert.output_dict() for alert in self._alerts], indent=2, sort_keys=True)) if self._firehose_client: self._firehose_client.send() # Only log rule info here if this is not running tests # During testing, this gets logged at the end and printing here could be confusing # since stress testing calls this method multiple times if self.env['qualifier'] != 'development': stats.print_rule_stats(True) return self._failed_record_count == 0 @property def alerts(self): """Returns list of Alert instances (useful for testing).""" return self._alerts def _process_alerts(self, payload): """Run the record through the rules, saving any alerts and forwarding them to Dynamo. Args: payload (StreamPayload): StreamAlert payload object being processed """ payload_with_normalized_records = [] for record in payload.pre_parse(): # Increment the processed size using the length of this record self._processed_size += len(record.pre_parsed_record) self.classifier.classify_record(record) if not record.valid: if self.env['qualifier'] != 'development': LOGGER.error( 'Record does not match any defined schemas: %s\n%s', record, record.pre_parsed_record) self._failed_record_count += 1 continue # Increment the total processed records to get an accurate assessment of throughput self._processed_record_count += len(record.records) LOGGER.debug( 'Classified and Parsed Payload: <Valid: %s, Log Source: %s, Entity: %s>', record.valid, record.log_source, record.entity) record_alerts, normalized_records = self._rules_engine.run(record) payload_with_normalized_records.extend(normalized_records) LOGGER.debug( 'Processed %d valid record(s) that resulted in %d alert(s).', len(payload.records), len(record_alerts)) # Add all parsed records to the categorized payload dict only if Firehose is enabled if self._firehose_client: self._firehose_client.add_payload_records( payload.log_source, payload.records) if not record_alerts: continue # Extend the list of alerts with any new ones so they can be returned self._alerts.extend(record_alerts) self.alert_forwarder.send_alerts(record_alerts) return payload_with_normalized_records
class TestFirehoseClient(object): """Test class for FirehoseClient""" # pylint: disable=protected-access,no-self-use,attribute-defined-outside-init def setup(self): """Setup before each method""" self.sa_firehose = FirehoseClient(region='us-east-1') def teardown(self): """Teardown after each method""" FirehoseClient._ENABLED_LOGS.clear() @staticmethod def _sample_categorized_payloads(): return { 'unit_test_simple_log': [{ 'unit_key_01': 1, 'unit_key_02': 'test' }, { 'unit_key_01': 2, 'unit_key_02': 'test' }], 'test_log_type_json_nested': [{ 'date': 'January 01, 3005', 'unixtime': '32661446400', 'host': 'my-host.name.website.com', 'data': { 'super': 'secret' } }] } @mock_kinesis def _mock_delivery_streams(self, delivery_stream_names): """Mock Kinesis Delivery Streams for tests""" for delivery_stream in delivery_stream_names: self.sa_firehose._client.create_delivery_stream( DeliveryStreamName=delivery_stream, S3DestinationConfiguration={ 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', 'BucketARN': 'arn:aws:s3:::kinesis-test', 'Prefix': '{}/'.format(delivery_stream), 'BufferingHints': { 'SizeInMBs': 123, 'IntervalInSeconds': 124 }, 'CompressionFormat': 'Snappy', }) @patch('stream_alert.rule_processor.firehose.LOGGER') @mock_kinesis def test_record_delivery_failed_put_count(self, mock_logging): """FirehoseClient - Record Delivery - Failed Put Count""" # Add sample categorized payloads for payload_type, logs in self._sample_categorized_payloads().iteritems(): self.sa_firehose._categorized_payloads[payload_type].extend(logs) # Setup mocked Delivery Streams self._mock_delivery_streams( ['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log']) with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock: firehose_mock.side_effect = [{ 'FailedPutCount': 3, 'RequestResponses': [{ "ErrorCode": "ServiceUnavailableException", "ErrorMessage": "Slow down." }, { "ErrorCode": "ServiceUnavailableException", "ErrorMessage": "Slow down." }, { "ErrorCode": "ServiceUnavailableException", "ErrorMessage": "Slow down." }] }, { 'FailedPutCount': 3, 'RequestResponses': [{ "ErrorCode": "ServiceUnavailableException", "ErrorMessage": "Slow down." }, { "ErrorCode": "ServiceUnavailableException", "ErrorMessage": "Slow down." }, { "ErrorCode": "ServiceUnavailableException", "ErrorMessage": "Slow down." }] }, { 'FailedPutCount': 0, 'RequestResponses': [{ "RecordId": "12345678910", "ErrorCode": "None", "ErrorMessage": "None" }, { "RecordId": "12345678910", "ErrorCode": "None", "ErrorMessage": "None" }, { "RecordId": "12345678910", "ErrorCode": "None", "ErrorMessage": "None" }] }] self.sa_firehose.send() firehose_mock.assert_called() assert_true(mock_logging.info.called) @patch('stream_alert.rule_processor.firehose.LOGGER') @mock_kinesis def test_record_delivery(self, mock_logging): """FirehoseClient - Record Delivery""" # Add sample categorized payloads for payload_type, logs in self._sample_categorized_payloads().iteritems(): self.sa_firehose._categorized_payloads[payload_type].extend(logs) # Setup mocked Delivery Streams self._mock_delivery_streams( ['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log']) # Send the records with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock: firehose_mock.return_value = {'FailedPutCount': 0} self.sa_firehose.send() firehose_mock.assert_called() assert_true(mock_logging.info.called) @patch('stream_alert.rule_processor.firehose.LOGGER') @mock_kinesis def test_record_delivery_failure(self, mock_logging): """FirehoseClient - Record Delivery - Failed PutRecord""" # Add sample categorized payloads for payload_type, logs in self._sample_categorized_payloads().iteritems(): self.sa_firehose._categorized_payloads[payload_type].extend(logs) # Setup mocked Delivery Streams self._mock_delivery_streams( ['streamalert_data_test_log_type_json_nested', 'streamalert_data_unit_test_simple_log']) # Send the records with patch.object(self.sa_firehose._client, 'put_record_batch') as firehose_mock: firehose_mock.return_value = { 'FailedPutCount': 3, 'RequestResponses': [ { 'RecordId': '12345', 'ErrorCode': '300', 'ErrorMessage': 'Bad message!!!' }, ] } self.sa_firehose.send() firehose_mock.assert_called() assert_true(mock_logging.error.called) @patch('stream_alert.rule_processor.firehose.LOGGER') @mock_kinesis def test_record_delivery_client_error(self, mock_logging): """FirehoseClient - Record Delivery - Client Error""" test_events = [ # unit_test_simple_log { 'unit_key_01': 2, 'unit_key_02': 'testtest' } for _ in range(10) ] self.sa_firehose._firehose_request_helper('invalid_stream', test_events) missing_stream_message = 'Client Error ... An error occurred ' \ '(ResourceNotFoundException) when calling the PutRecordBatch ' \ 'operation: Stream invalid_stream under account 123456789012 not found.' assert_true(mock_logging.error.called_with(missing_stream_message)) @mock_kinesis def test_load_enabled_sources(self): """FirehoseClient - Load Enabled Sources""" config = load_config('tests/unit/conf') firehose_config = { 'enabled_logs': ['json:regex_key_with_envelope', 'test_cloudtrail', 'cloudwatch'] } # expands to 2 logs enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, config['logs']) assert_equal(len(enabled_logs), 4) # Make sure the subtitution works properly assert_true(all([':' not in log for log in enabled_logs])) assert_false(FirehoseClient.enabled_log_source('test_inspec')) @patch('stream_alert.rule_processor.firehose.LOGGER.error') @mock_kinesis def test_load_enabled_sources_invalid_log(self, mock_logging): """FirehoseClient - Load Enabled Sources - Invalid Log""" config = load_config('tests/unit/conf') firehose_config = {'enabled_logs': ['log-that-doesnt-exist']} sa_firehose = FirehoseClient( region='us-east-1', firehose_config=firehose_config, log_sources=config['logs']) assert_equal(len(sa_firehose._ENABLED_LOGS), 0) mock_logging.assert_called_with( 'Enabled Firehose log %s not declared in logs.json', 'log-that-doesnt-exist' ) def test_strip_successful_records(self): """FirehoseClient - Strip Successful Records""" batch = [{'test': 'success'}, {'test': 'data'}, {'other': 'failure'}, {'other': 'info'}] response = { 'FailedPutCount': 1, 'RequestResponses': [ {'RecordId': 'rec_id_00'}, {'RecordId': 'rec_id_01'}, {'ErrorCode': 10, 'ErrorMessage': 'foo'}, {'RecordId': 'rec_id_03'} ] } expected_batch = [{'other': 'failure'}] FirehoseClient._strip_successful_records(batch, response) assert_equal(batch, expected_batch) def test_segment_records_by_size(self): """FirehoseClient - Segment Large Records""" record_batch = [ # unit_test_simple_log { 'unit_key_01': 2, 'unit_key_02': 'testtest' * 10000 } for _ in range(100) ] sized_batches = [] for sized_batch in FirehoseClient._segment_records_by_size(record_batch): sized_batches.append(sized_batch) assert_true(len(str(sized_batches[0])) < 4000000) assert_equal(len(sized_batches), 4) assert_true(isinstance(sized_batches[3][0], dict)) def test_sanitize_keys(self): """FirehoseClient - Sanitize Keys""" # test_log_type_json_nested test_event = { 'date': 'January 01, 3005', 'unixtime': '32661446400', 'host': 'my-host.name.website.com', 'data': { 'super-duper': 'secret', 'sanitize_me': 1, 'example-key': 1, 'moar**data': 2, 'even.more': 3 } } expected_sanitized_event = { 'date': 'January 01, 3005', 'unixtime': '32661446400', 'host': 'my-host.name.website.com', 'data': { 'super_duper': 'secret', 'sanitize_me': 1, 'example_key': 1, 'moar__data': 2, 'even_more': 3 } } sanitized_event = FirehoseClient.sanitize_keys(test_event) assert_equal(sanitized_event, expected_sanitized_event) @patch('stream_alert.rule_processor.firehose.LOGGER') def test_limit_record_size(self, mock_logging): """FirehoseClient - Record Size Check""" test_events = [ # unit_test_simple_log { 'unit_key_01': 1, 'unit_key_02': 'test' * 250001 # is 4 bytes higher than max }, { 'unit_key_01': 2, 'unit_key_02': 'test' }, # test_log_type_json_nested { 'date': 'January 01, 3005', 'unixtime': '32661446400', 'host': 'my-host.name.website.com', 'data': { 'super': 'secret' } }, # add another unit_test_sample_log to verify in a different position { 'unit_key_01': 1, 'unit_key_02': 'test' * 250001 # is 4 bytes higher than max }, { 'test': 1 } ] FirehoseClient._limit_record_size(test_events) assert_true(len(test_events), 3) assert_true(mock_logging.error.called)