Beispiel #1
0
def test_generate_cloudwatch_monitoring_custom_sns():
    """CLI - Terraform Generate Cloudwatch Monitoring with Existing SNS Topic"""

    # Test a custom SNS topic name
    CONFIG['global']['infrastructure']['monitoring'][
        'create_sns_topic'] = False
    CONFIG['global']['infrastructure']['monitoring']\
          ['sns_topic_name'] = 'unit_test_monitoring'

    cluster_dict = _common.infinitedict()
    result = monitoring.generate_monitoring('test', cluster_dict, CONFIG)

    expected_cloudwatch_tf_custom = {
        'source':
        'modules/tf_stream_alert_monitoring',
        'sns_topic_arn':
        'arn:aws:sns:us-west-1:12345678910:unit_test_monitoring',
        'lambda_functions': [
            'unit-testing_test_streamalert_rule_processor',
            'unit-testing_test_streamalert_alert_processor'
        ],
        'kinesis_stream':
        'unit-testing_test_stream_alert_kinesis'
    }

    assert_true(result)
    assert_equal(cluster_dict['module']['cloudwatch_monitoring_test'],
                 expected_cloudwatch_tf_custom)
Beispiel #2
0
def test_generate_s3_events():
    """CLI - Terraform S3 Events with Valid Bucket"""
    cluster_dict = _common.infinitedict()
    CONFIG['clusters']['advanced']['modules']['s3_events'] = {
        's3_bucket_id': 'unit-test-bucket.data'
    }
    result = s3_events.generate_s3_events('advanced', cluster_dict, CONFIG)

    expected_config = {
        'module': {
            's3_events_advanced': {
                'source':
                'modules/tf_stream_alert_s3_events',
                'lambda_function_arn':
                '${module.stream_alert_advanced.lambda_arn}',
                'lambda_function_name':
                'unit-testing_advanced_stream_alert_processor',
                's3_bucket_id':
                'unit-test-bucket.data',
                's3_bucket_arn':
                'arn:aws:s3:::unit-test-bucket.data',
                'lambda_role_id':
                '${module.stream_alert_advanced.lambda_role_id}',
                'lambda_role_arn':
                '${module.stream_alert_advanced.lambda_role_arn}'
            }
        }
    }

    assert_true(result)
    assert_equal(cluster_dict, expected_config)
Beispiel #3
0
def test_generate_s3_events():
    """CLI - Terraform S3 Events with Valid Buckets"""
    cluster_dict = _common.infinitedict()
    result = s3_events.generate_s3_events('advanced', cluster_dict, CONFIG)

    expected_config = {
        'module': {
            's3_events_unit-test-bucket_data': {
                'source': 'modules/tf_stream_alert_s3_events',
                'lambda_function_arn':
                '${module.stream_alert_advanced.lambda_arn}',
                'bucket_id': 'unit-test-bucket.data',
                'enable_events': True,
                'lambda_role_id':
                '${module.stream_alert_advanced.lambda_role_id}',
            },
            's3_events_unit-test_cloudtrail_data': {
                'source': 'modules/tf_stream_alert_s3_events',
                'lambda_function_arn':
                '${module.stream_alert_advanced.lambda_arn}',
                'bucket_id': 'unit-test.cloudtrail.data',
                'enable_events': False,
                'lambda_role_id':
                '${module.stream_alert_advanced.lambda_role_id}',
            }
        }
    }

    assert_true(result)
    assert_equal(cluster_dict, expected_config)
Beispiel #4
0
def test_generate_cloudwatch_monitoring_no_kinesis():
    """CLI - Terraform Generate Cloudwatch Monitoring - Kinesis Disabled"""
    cluster_dict = _common.infinitedict()
    CONFIG['clusters']['test']['modules']['cloudwatch_monitoring'][
        'kinesis_alarms_enabled'] = False
    CONFIG['clusters']['test']['modules']['cloudwatch_monitoring'][
        'lambda_alarms_enabled'] = True
    result = monitoring.generate_monitoring('test', cluster_dict, CONFIG)

    # Test a the default SNS topic option
    expected_cloudwatch_tf = {
        'source':
        'modules/tf_stream_alert_monitoring',
        'sns_topic_arn':
        'arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring',
        'lambda_functions': [
            'unit-testing_test_streamalert_rule_processor',
            'unit-testing_test_streamalert_alert_processor'
        ],
        'lambda_alarms_enabled':
        True,
        'kinesis_alarms_enabled':
        False
    }

    assert_true(result)
    assert_equal(cluster_dict['module']['cloudwatch_monitoring_test'],
                 expected_cloudwatch_tf)
Beispiel #5
0
def test_kinesis_streams_with_trusted_account():
    """CLI - Terraform Generate Kinesis Streams with trusted account"""
    cluster_dict = _common.infinitedict()
    result = kinesis_streams.generate_kinesis_streams('trusted', cluster_dict,
                                                      CONFIG)

    expected_result = {
        'module': {
            'kinesis_trusted': {
                'source': 'modules/tf_stream_alert_kinesis_streams',
                'account_id': '12345678910',
                'shard_level_metrics': [],
                'region': 'us-west-1',
                'prefix': 'unit-testing',
                'cluster_name': 'trusted',
                'stream_name': 'unit-testing_trusted_stream_alert_kinesis',
                'shards': 1,
                'retention': 24,
                'create_user': True,
                'trusted_accounts': ['98765432100']
            }
        }
    }

    assert_true(result)
    assert_equal(cluster_dict, expected_result)
Beispiel #6
0
def test_generate_cloudwatch_monitoring_with_settings():
    """CLI - Terraform Generate Cloudwatch Monitoring with Custom Settings"""
    cluster_dict = _common.infinitedict()
    result = monitoring.generate_monitoring('advanced', cluster_dict, CONFIG)

    # Test the default SNS topic option
    expected_cloudwatch_tf = {
        'source':
        'modules/tf_stream_alert_monitoring',
        'sns_topic_arn':
        'arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring',
        'lambda_functions': [
            'unit-testing_advanced_streamalert_rule_processor',
            'unit-testing_advanced_streamalert_alert_processor'
        ],
        'kinesis_stream':
        'unit-testing_advanced_stream_alert_kinesis',
        'lambda_alarms_enabled':
        True,
        'kinesis_alarms_enabled':
        True,
        'kinesis_iterator_age_error_threshold':
        '3000000'
    }

    assert_true(result)
    assert_equal(cluster_dict['module']['cloudwatch_monitoring_advanced'],
                 expected_cloudwatch_tf)
Beispiel #7
0
def test_kinesis_streams():
    """CLI - Terraform Generate Kinesis Streams"""
    cluster_dict = _common.infinitedict()
    result = kinesis_streams.generate_kinesis_streams('advanced', cluster_dict,
                                                      CONFIG)

    expected_result = {
        'module': {
            'kinesis_advanced': {
                'source': 'modules/tf_stream_alert_kinesis_streams',
                'account_id': '12345678910',
                'shard_level_metrics': ["IncomingBytes"],
                'region': 'us-west-1',
                'prefix': 'unit-testing',
                'cluster_name': 'advanced',
                'stream_name': 'unit-testing_advanced_stream_alert_kinesis',
                'shards': 1,
                'retention': 24,
                'create_user': True
            }
        }
    }

    assert_true(result)
    assert_equal(cluster_dict, expected_result)
Beispiel #8
0
def generate_cluster(**kwargs):
    """Generate a StreamAlert cluster file.

    Keyword Args:
        cluster_name (str): The name of the currently generating cluster
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        dict: generated Terraform cluster dictionary
    """
    config = kwargs.get('config')
    cluster_name = kwargs.get('cluster_name')

    modules = config['clusters'][cluster_name]['modules']
    cluster_dict = infinitedict()

    if not generate_stream_alert(cluster_name, cluster_dict, config):
        return

    generate_cloudwatch_metric_filters(cluster_name, cluster_dict, config)

    generate_cloudwatch_metric_alarms(cluster_name, cluster_dict, config)

    if modules.get('cloudwatch_monitoring', {}).get('enabled'):
        if not generate_monitoring(cluster_name, cluster_dict, config):
            return

    if modules.get('kinesis'):
        if not generate_kinesis_streams(cluster_name, cluster_dict, config):
            return

    outputs = config['clusters'][cluster_name].get('outputs')
    if outputs:
        if not generate_outputs(cluster_name, cluster_dict, config):
            return

    if modules.get('kinesis_events'):
        if not generate_kinesis_events(cluster_name, cluster_dict, config):
            return

    cloudtrail_info = modules.get('cloudtrail')
    if cloudtrail_info:
        if not generate_cloudtrail(cluster_name, cluster_dict, config):
            return

    flow_log_info = modules.get('flow_logs')
    if flow_log_info:
        if not generate_flow_logs(cluster_name, cluster_dict, config):
            return

    s3_events_info = modules.get('s3_events')
    if s3_events_info:
        if not generate_s3_events(cluster_name, cluster_dict, config):
            return

    generate_app_integrations(cluster_name, cluster_dict, config)

    return cluster_dict
Beispiel #9
0
def test_generate_cloudwatch_monitoring_disabled():
    """CLI - Terraform Generate Cloudwatch Monitoring Disabled"""
    cluster_dict = _common.infinitedict()
    cluster = 'trusted'
    result = monitoring.generate_monitoring(cluster, cluster_dict, CONFIG)

    assert_true(result)
    assert_true('cloudwatch_monitoring_{}'.format(cluster) not in
                cluster_dict['module'])
Beispiel #10
0
def test_generate_cloudwatch_monitoring_invalid_config(mock_logging):
    """CLI - Terraform Generate Cloudwatch Monitoring with Invalid Config"""
    CONFIG['global']['infrastructure'] = {}

    cluster_dict = _common.infinitedict()
    result = monitoring.generate_monitoring('test', cluster_dict, CONFIG)

    assert_true(mock_logging.error.called)
    assert_false(result)
Beispiel #11
0
def test_generate_s3_events_invalid_bucket(mock_logging):
    """CLI - Terraform S3 Events with Missing Bucket Key"""
    cluster_dict = _common.infinitedict()
    CONFIG['clusters']['advanced']['modules']['s3_events'] = {
        'wrong_key': 'my-bucket!!!'
    }
    result = s3_events.generate_s3_events('advanced', cluster_dict, CONFIG)

    assert_true(mock_logging.error.called)
    assert_false(result)
Beispiel #12
0
def test_generate_s3_events_legacy():
    """CLI - Terraform S3 Events - Legacy"""
    cluster_dict = _common.infinitedict()
    CONFIG['clusters']['test']['modules']['s3_events'] = {
        's3_bucket_id': 'unit-test-bucket.legacy.data'
    }
    result = s3_events.generate_s3_events('test', cluster_dict, CONFIG)
    # Reload the config
    CONFIG.load()

    assert_true(result)
    assert_equal(CONFIG['clusters']['test']['modules']['s3_events'],
                 [{
                     'bucket_id': 'unit-test-bucket.legacy.data'
                 }])
def generate_threat_intel_downloader(config):
    """Generate Threat Intel Downloader Terrafrom

    Args:
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        dict: Athena dict to be marshalled to JSON
    """
    # Use the monitoring topic as a dead letter queue
    infrastructure_config = config['global'].get('infrastructure')
    dlq_topic = (DEFAULT_SNS_MONITORING_TOPIC if infrastructure_config.get(
        'monitoring', {}).get('create_sns_topic') else
                 infrastructure_config.get('monitoring', {}).get(
                     'sns_topic_name', DEFAULT_SNS_MONITORING_TOPIC))

    # Threat Intel Downloader module
    ti_downloader_config = config['lambda']['threat_intel_downloader_config']
    ti_downloader_dict = infinitedict()
    ti_downloader_dict['module']['threat_intel_downloader'] = {
        'account_id': config['global']['account']['aws_account_id'],
        'region': config['global']['account']['region'],
        'source': 'modules/tf_threat_intel_downloader',
        'lambda_function_arn': '${module.threat_intel_downloader.lambda_arn}',
        'lambda_handler': ti_downloader_config['handler'],
        'lambda_memory': ti_downloader_config.get('memory', '128'),
        'lambda_timeout': ti_downloader_config.get('timeout', '60'),
        'lambda_s3_bucket': ti_downloader_config['source_bucket'],
        'lambda_s3_key': ti_downloader_config['source_object_key'],
        'lambda_log_level': ti_downloader_config.get('log_level', 'info'),
        'interval': ti_downloader_config.get('interval', 'rate(1 day)'),
        'current_version': ti_downloader_config['current_version'],
        'prefix': config['global']['account']['prefix'],
        'monitoring_sns_topic': dlq_topic,
        'table_rcu': ti_downloader_config.get('table_rcu', '10'),
        'table_wcu': ti_downloader_config.get('table_wcu', '10'),
        'ioc_keys': ti_downloader_config.get('ioc_keys'),
        'ioc_filters': ti_downloader_config.get('ioc_filters'),
        'ioc_types': ti_downloader_config.get('ioc_types')
    }
    return ti_downloader_dict
def test_kinesis_events():
    """CLI - Terraform Generate Kinesis Events"""
    cluster_dict = _common.infinitedict()
    result = kinesis_events.generate_kinesis_events('advanced', cluster_dict,
                                                    CONFIG)

    expected_result = {
        'module': {
            'kinesis_events_advanced': {
                'source': 'modules/tf_stream_alert_kinesis_events',
                'lambda_production_enabled': True,
                'lambda_role_id':
                '${module.stream_alert_advanced.lambda_role_id}',
                'lambda_function_arn':
                '${module.stream_alert_advanced.lambda_arn}',
                'kinesis_stream_arn': '${module.kinesis_advanced.arn}',
                'role_policy_prefix': 'advanced'
            }
        }
    }

    assert_true(result)
    assert_equal(cluster_dict, expected_result)
Beispiel #15
0
def test_generate_s3_events():
    """CLI - Terraform - S3 Events with Valid Buckets"""
    cluster_dict = _common.infinitedict()
    result = s3_events.generate_s3_events('advanced', cluster_dict, CONFIG)

    expected_config = {
        'module': {
            's3_events_unit-testing_advanced_0': {
                'source': 'modules/tf_stream_alert_s3_events',
                'lambda_function_arn':
                '${module.stream_alert_advanced.lambda_arn}',
                'bucket_id': 'unit-test-bucket.data',
                'notification_id': 'advanced_0',
                'enable_events': True,
                'lambda_role_id':
                '${module.stream_alert_advanced.lambda_role_id}',
                'filter_suffix': '.log',
                'filter_prefix': 'AWSLogs/123456789/CloudTrail/us-east-1/'
            },
            's3_events_unit-testing_advanced_1': {
                'source': 'modules/tf_stream_alert_s3_events',
                'lambda_function_arn':
                '${module.stream_alert_advanced.lambda_arn}',
                'bucket_id': 'unit-test.cloudtrail.data',
                'enable_events': False,
                'notification_id': 'advanced_1',
                'lambda_role_id':
                '${module.stream_alert_advanced.lambda_role_id}',
                'filter_suffix': '',
                'filter_prefix': ''
            }
        }
    }

    assert_true(result)
    assert_equal(cluster_dict, expected_config)
Beispiel #16
0
def generate_main(**kwargs):
    """Generate the main.tf.json Terraform dict

    Keyword Args:
        init (bool): If Terraform is running in the init phase or not
        config (CLIConfig): The loaded CLI config

    Returns:
        dict: main.tf.json Terraform dict
    """
    init = kwargs.get('init')
    config = kwargs['config']
    main_dict = infinitedict()

    # Configure provider along with the minimum version
    main_dict['provider']['aws'] = {'version': '~> 0.1.4'}

    # Configure Terraform version requirement
    main_dict['terraform']['required_version'] = '~> 0.10.6'

    # Setup the Backend dependencing on the deployment phase.
    # When first setting up StreamAlert, the Terraform statefile
    # is stored locally.  After the first dependencies are created,
    # this moves to S3.
    if init:
        main_dict['terraform']['backend']['local'] = {
            'path': 'terraform.tfstate'}
    else:
        main_dict['terraform']['backend']['s3'] = {
            'bucket': '{}.streamalert.terraform.state'.format(
                config['global']['account']['prefix']),
            'key': 'stream_alert_state/terraform.tfstate',
            'region': config['global']['account']['region'],
            'encrypt': True,
            'acl': 'private',
            'kms_key_id': 'alias/{}'.format(config['global']['account']['kms_key_alias'])}

    logging_bucket = '{}.streamalert.s3-logging'.format(
        config['global']['account']['prefix'])
    logging_bucket_lifecycle = {
        'prefix': '/',
        'enabled': True,
        'transition': {
            'days': 30,
            'storage_class': 'GLACIER'}}

    # Configure initial S3 buckets
    main_dict['resource']['aws_s3_bucket'] = {
        'lambda_source': generate_s3_bucket(
            bucket=config['lambda']['rule_processor_config']['source_bucket'],
            logging=logging_bucket
        ),
        'stream_alert_secrets': generate_s3_bucket(
            bucket='{}.streamalert.secrets'.format(config['global']['account']['prefix']),
            logging=logging_bucket
        ),
        'terraform_remote_state': generate_s3_bucket(
            bucket=config['global']['terraform']['tfstate_bucket'],
            logging=logging_bucket
        ),
        'logging_bucket': generate_s3_bucket(
            bucket=logging_bucket,
            acl='log-delivery-write',
            logging=logging_bucket,
            lifecycle_rule=logging_bucket_lifecycle
        ),
        'streamalerts': generate_s3_bucket(
            bucket='{}.streamalerts'.format(config['global']['account']['prefix']),
            logging=logging_bucket
        )
    }

    # Conditionally configure Firehose
    if config['global']['infrastructure'].get('firehose', {}).get('enabled'):
        firehose_config = config['global']['infrastructure']['firehose']
        firehose_s3_bucket_suffix = firehose_config.get('s3_bucket_suffix',
                                                        'streamalert.data')
        firehose_s3_bucket_name = '{}.{}'.format(config['global']['account']['prefix'],
                                                 firehose_s3_bucket_suffix)

        # Add the main Firehose module
        main_dict['module']['kinesis_firehose'] = {
            'source': 'modules/tf_stream_alert_kinesis_firehose',
            'account_id': config['global']['account']['aws_account_id'],
            'region': config['global']['account']['region'],
            'prefix': config['global']['account']['prefix'],
            'logs': enabled_firehose_logs(config),
            'buffer_size': config['global']['infrastructure']
                           ['firehose'].get('buffer_size', 64),
            'buffer_interval': config['global']['infrastructure']
                               ['firehose'].get('buffer_interval', 300),
            'compression_format': config['global']['infrastructure']
                                  ['firehose'].get('compression_format', 'GZIP'),
            's3_logging_bucket': logging_bucket,
            's3_bucket_name': firehose_s3_bucket_name
        }

    # KMS Key and Alias creation
    main_dict['resource']['aws_kms_key']['stream_alert_secrets'] = {
        'enable_key_rotation': True,
        'description': 'StreamAlert secret management'
    }
    main_dict['resource']['aws_kms_alias']['stream_alert_secrets'] = {
        'name': 'alias/{}'.format(config['global']['account']['kms_key_alias']),
        'target_key_id': '${aws_kms_key.stream_alert_secrets.key_id}'
    }

    # Global infrastructure settings
    infrastructure_config = config['global'].get('infrastructure')
    if infrastructure_config and 'monitoring' in infrastructure_config:
        if infrastructure_config['monitoring'].get('create_sns_topic'):
            main_dict['resource']['aws_sns_topic']['stream_alert_monitoring'] = {
                'name': DEFAULT_SNS_MONITORING_TOPIC
            }

    # Add any global cloudwatch alarms to the main.tf
    monitoring_config = config['global']['infrastructure'].get('monitoring')
    if not monitoring_config:
        return main_dict

    global_metrics = monitoring_config.get('metric_alarms')
    if not global_metrics:
        return main_dict

    topic_name = (DEFAULT_SNS_MONITORING_TOPIC if infrastructure_config
                  ['monitoring'].get('create_sns_topic') else
                  infrastructure_config['monitoring'].get('sns_topic_name'))

    sns_topic_arn = 'arn:aws:sns:{region}:{account_id}:{topic}'.format(
        region=config['global']['account']['region'],
        account_id=config['global']['account']['aws_account_id'],
        topic=topic_name
    )

    formatted_alarms = {}
    # Add global metric alarms for the rule and alert processors
    for func in FUNC_PREFIXES:
        if func not in global_metrics:
            continue

        for name, settings in global_metrics[func].iteritems():
            alarm_info = settings.copy()
            alarm_info['alarm_name'] = name
            alarm_info['namespace'] = 'StreamAlert'
            alarm_info['alarm_actions'] = [sns_topic_arn]
            # Terraform only allows certain characters in resource names
            acceptable_chars = ''.join([string.digits, string.letters, '_-'])
            name = filter(acceptable_chars.__contains__, name)
            formatted_alarms['metric_alarm_{}'.format(name)] = alarm_info

    if formatted_alarms:
        main_dict['resource']['aws_cloudwatch_metric_alarm'] = formatted_alarms

    return main_dict
Beispiel #17
0
def generate_athena(config):
    """Generate Athena Terraform.

    Args:
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        dict: Athena dict to be marshalled to JSON
    """
    athena_dict = infinitedict()
    athena_config = config['lambda']['athena_partition_refresh_config']

    data_buckets = set()
    for refresh_type in athena_config['refresh_type']:
        data_buckets.update(set(athena_config['refresh_type'][refresh_type]))

    athena_dict['module']['stream_alert_athena'] = {
        'source':
        'modules/tf_stream_alert_athena',
        'lambda_handler':
        athena_config['handler'],
        'lambda_memory':
        athena_config.get('memory', '128'),
        'lambda_timeout':
        athena_config.get('timeout', '60'),
        'lambda_s3_bucket':
        athena_config['source_bucket'],
        'lambda_s3_key':
        athena_config['source_object_key'],
        'lambda_log_level':
        athena_config.get('log_level', 'info'),
        'athena_data_buckets':
        list(data_buckets),
        'refresh_interval':
        athena_config.get('refresh_interval', 'rate(10 minutes)'),
        'current_version':
        athena_config['current_version'],
        'enable_metrics':
        athena_config.get('enable_metrics', False),
        'prefix':
        config['global']['account']['prefix']
    }

    # Cloudwatch monitoring setup
    monitoring_config = config['global'].get('infrastructure',
                                             {}).get('monitoring', {})
    sns_topic_name = DEFAULT_SNS_MONITORING_TOPIC if monitoring_config.get(
        'create_sns_topic') else monitoring_config.get('sns_topic_name')
    athena_dict['module']['athena_monitoring'] = {
        'source':
        'modules/tf_stream_alert_monitoring',
        'sns_topic_arn':
        'arn:aws:sns:{region}:{account_id}:{topic}'.format(
            region=config['global']['account']['region'],
            account_id=config['global']['account']['aws_account_id'],
            topic=sns_topic_name),
        'lambda_functions': [
            '{}_streamalert_athena_partition_refresh'.format(
                config['global']['account']['prefix'])
        ],
        'kinesis_alarms_enabled':
        False
    }

    # Metrics setup
    if not athena_config.get('enable_metrics', False):
        return athena_dict

    # Check to see if there are any metrics configured for the athena function
    current_metrics = metrics.MetricLogger.get_available_metrics()
    if metrics.ATHENA_PARTITION_REFRESH_NAME not in current_metrics:
        return athena_dict

    metric_prefix = 'AthenaRefresh'
    filter_pattern_idx, filter_value_idx = 0, 1

    # Add filters for the cluster and aggregate
    # Use a list of strings that represnt the following comma separated values:
    #   <filter_name>,<filter_pattern>,<value>
    filters = [
        '{},{},{}'.format('{}-{}'.format(metric_prefix,
                                         metric), settings[filter_pattern_idx],
                          settings[filter_value_idx]) for metric, settings in
        current_metrics[metrics.ATHENA_PARTITION_REFRESH_NAME].iteritems()
    ]

    athena_dict['module']['stream_alert_athena'][
        'athena_metric_filters'] = filters

    return athena_dict
Beispiel #18
0
 def setup(self):
     """Setup before each method"""
     self.cluster_dict = _common.infinitedict()
     self.config = CLIConfig(config_path='tests/unit/conf')