예제 #1
0
def generate_main(config, init=False):
    """Generate the main.tf.json Terraform dict

    Args:
        config (CLIConfig): The loaded CLI config
        init (bool): Terraform is running in the init phase or not (optional)

    Returns:
        dict: main.tf.json Terraform dict
    """
    write_vars(config, region=config['global']['account']['region'])

    main_dict = infinitedict()

    logging_bucket, create_logging_bucket = s3_access_logging_bucket(config)

    state_lock_table_name = '{}_streamalert_terraform_state_lock'.format(
        config['global']['account']['prefix']
    )
    # Setup the Backend depending on the deployment phase.
    # When first setting up StreamAlert, the Terraform statefile
    # is stored locally.  After the first dependencies are created,
    # this moves to S3.
    if init:
        main_dict['terraform']['backend']['local'] = {
            'path': 'terraform.tfstate',
        }
    else:
        terraform_bucket_name, _ = terraform_state_bucket(config)
        main_dict['terraform']['backend']['s3'] = {
            'bucket': terraform_bucket_name,
            'key': config['global'].get('terraform', {}).get(
                'state_key_name',
                'streamalert_state/terraform.tfstate'
            ),
            'region': config['global']['account']['region'],
            'encrypt': True,
            'dynamodb_table': state_lock_table_name,
            'acl': 'private',
            'kms_key_id': 'alias/{}'.format(
                config['global']['account'].get(
                    'kms_key_alias',
                    '{}_streamalert_secrets'.format(config['global']['account']['prefix'])
                )
            ),
        }

    # Configure initial S3 buckets
    main_dict['resource']['aws_s3_bucket'] = {
        'streamalerts': generate_s3_bucket(
            bucket=firehose_alerts_bucket(config),
            logging=logging_bucket
        )
    }

    # Configure remote state locking table
    main_dict['resource']['aws_dynamodb_table'] = {
        'terraform_remote_state_lock': {
            'name': state_lock_table_name,
            'billing_mode': 'PAY_PER_REQUEST',
            'hash_key': 'LockID',
            'attribute': {
                'name': 'LockID',
                'type': 'S'
            },
            'tags': {
                'Name': 'StreamAlert'
            }
        }
    }

    # Create bucket for S3 access logs (if applicable)
    if create_logging_bucket:
        main_dict['resource']['aws_s3_bucket']['logging_bucket'] = generate_s3_bucket(
            bucket=logging_bucket,
            logging=logging_bucket,
            acl='log-delivery-write',
            lifecycle_rule={
                'prefix': '/',
                'enabled': True,
                'transition': {
                    'days': 365,
                    'storage_class': 'GLACIER'
                }
            },
            sse_algorithm='AES256'  # SSE-KMS doesn't seem to work with access logs
        )

    terraform_bucket_name, create_state_bucket = terraform_state_bucket(config)
    # Create bucket for Terraform state (if applicable)
    if create_state_bucket:
        main_dict['resource']['aws_s3_bucket']['terraform_remote_state'] = generate_s3_bucket(
            bucket=terraform_bucket_name,
            logging=logging_bucket
        )

    # Setup Firehose Delivery Streams
    generate_firehose(logging_bucket, main_dict, config)

    # Configure global resources like Firehose alert delivery and alerts table
    main_dict['module']['globals'] = _generate_global_module(config)

    # KMS Key and Alias creation
    main_dict['resource']['aws_kms_key']['server_side_encryption'] = {
        'enable_key_rotation': True,
        'description': 'StreamAlert S3 Server-Side Encryption',
        'policy': json.dumps({
            'Version': '2012-10-17',
            'Statement': [
                {
                    'Sid': 'Enable IAM User Permissions',
                    'Effect': 'Allow',
                    'Principal': {
                        'AWS': 'arn:aws:iam::{}:root'.format(
                            config['global']['account']['aws_account_id']
                        )
                    },
                    'Action': 'kms:*',
                    'Resource': '*'
                },
                {
                    'Sid': 'Allow principals in the account to use the key',
                    'Effect': 'Allow',
                    'Principal': '*',
                    'Action': ['kms:Decrypt', 'kms:GenerateDataKey*', 'kms:Encrypt'],
                    'Resource': '*',
                    'Condition': {
                        'StringEquals': {
                            'kms:CallerAccount': config['global']['account']['aws_account_id']
                        }
                    }
                }
            ]
        })
    }
    main_dict['resource']['aws_kms_alias']['server_side_encryption'] = {
        'name': 'alias/{}_server-side-encryption'.format(config['global']['account']['prefix']),
        'target_key_id': '${aws_kms_key.server_side_encryption.key_id}'
    }

    main_dict['resource']['aws_kms_key']['streamalert_secrets'] = {
        'enable_key_rotation': True,
        'description': 'StreamAlert secret management'
    }
    main_dict['resource']['aws_kms_alias']['streamalert_secrets'] = {
        'name': 'alias/{}'.format(
            config['global']['account'].get(
                'kms_key_alias',
                '{}_streamalert_secrets'.format(config['global']['account']['prefix'])
            )
        ),
        'target_key_id': '${aws_kms_key.streamalert_secrets.key_id}'
    }

    # Global infrastructure settings
    topic_name, create_topic = monitoring_topic_name(config)
    if create_topic:
        main_dict['resource']['aws_sns_topic']['monitoring'] = {
            'name': topic_name
        }

    return main_dict
예제 #2
0
def generate_athena(config):
    """Generate Athena Terraform.

    Args:
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        dict: Athena dict to be marshalled to JSON
    """
    result = infinitedict()

    prefix = config['global']['account']['prefix']
    athena_config = config['lambda']['athena_partitioner_config']

    data_buckets = athena_partition_buckets_tf(config)
    database = athena_config.get('database_name',
                                 '{}_streamalert'.format(prefix))

    results_bucket_name = athena_query_results_bucket(config)

    queue_name = athena_config.get(
        'queue_name',
        '{}_streamalert_athena_s3_notifications'.format(prefix)).strip()

    logging_bucket, _ = s3_access_logging_bucket(config)

    # Set variables for the athena partitioner's IAM permissions
    result['module']['athena_partitioner_iam'] = {
        'source':
        './modules/tf_athena',
        'account_id':
        config['global']['account']['aws_account_id'],
        'prefix':
        prefix,
        's3_logging_bucket':
        logging_bucket,
        'database_name':
        database,
        'queue_name':
        queue_name,
        'athena_data_buckets':
        data_buckets,
        'results_bucket':
        results_bucket_name,
        'lambda_timeout':
        athena_config['timeout'],
        'kms_key_id':
        '${aws_kms_key.server_side_encryption.key_id}',
        'function_role_id':
        '${module.athena_partitioner_lambda.role_id}',
        'function_name':
        '${module.athena_partitioner_lambda.function_name}',
        'function_alias_arn':
        '${module.athena_partitioner_lambda.function_alias_arn}',
    }

    # Set variables for the Lambda module
    result['module']['athena_partitioner_lambda'] = generate_lambda(
        '{}_streamalert_{}'.format(prefix, ATHENA_PARTITIONER_NAME),
        'streamalert.athena_partitioner.main.handler',
        athena_config,
        config,
        tags={'Subcomponent': 'AthenaPartitioner'})

    return result
예제 #3
0
def generate_athena(config):
    """Generate Athena Terraform.

    Args:
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        dict: Athena dict to be marshalled to JSON
    """
    athena_dict = infinitedict()
    athena_config = config['lambda']['athena_partition_refresh_config']

    data_buckets = sorted(athena_partition_buckets(config))

    prefix = config['global']['account']['prefix']
    database = athena_config.get('database_name',
                                 '{}_streamalert'.format(prefix))

    results_bucket_name = athena_query_results_bucket(config)

    queue_name = athena_config.get(
        'queue_name',
        '{}_streamalert_athena_s3_notifications'.format(prefix)).strip()

    logging_bucket, _ = s3_access_logging_bucket(config)
    athena_dict['module']['streamalert_athena'] = {
        's3_logging_bucket': logging_bucket,
        'source': './modules/tf_athena',
        'database_name': database,
        'queue_name': queue_name,
        'results_bucket': results_bucket_name,
        'kms_key_id': '${aws_kms_key.server_side_encryption.key_id}',
        'lambda_handler': AthenaPackage.lambda_handler,
        'lambda_memory': athena_config.get('memory', '128'),
        'lambda_timeout': athena_config.get('timeout', '60'),
        'lambda_log_level': athena_config.get('log_level', 'info'),
        'athena_data_buckets': data_buckets,
        'concurrency_limit': athena_config.get('concurrency_limit', 10),
        'account_id': config['global']['account']['aws_account_id'],
        'prefix': prefix
    }

    # Cloudwatch monitoring setup
    athena_dict['module']['athena_monitoring'] = {
        'source':
        './modules/tf_monitoring',
        'sns_topic_arn':
        monitoring_topic_arn(config),
        'lambda_functions':
        ['{}_streamalert_athena_partition_refresh'.format(prefix)],
        'kinesis_alarms_enabled':
        False
    }

    # Metrics setup
    if not athena_config.get('enable_custom_metrics', False):
        return athena_dict

    # Check to see if there are any metrics configured for the athena function
    current_metrics = metrics.MetricLogger.get_available_metrics()
    if metrics.ATHENA_PARTITION_REFRESH_NAME not in current_metrics:
        return athena_dict

    metric_prefix = 'AthenaRefresh'
    filter_pattern_idx, filter_value_idx = 0, 1

    # Add filters for the cluster and aggregate
    # Use a list of strings that represent the following comma separated values:
    #   <filter_name>,<filter_pattern>,<value>
    filters = [
        '{},{},{}'.format('{}-{}'.format(metric_prefix,
                                         metric), settings[filter_pattern_idx],
                          settings[filter_value_idx]) for metric, settings in
        current_metrics[metrics.ATHENA_PARTITION_REFRESH_NAME].items()
    ]

    athena_dict['module']['streamalert_athena'][
        'athena_metric_filters'] = filters

    return athena_dict
예제 #4
0
def generate_cloudtrail(cluster_name, cluster_dict, config):
    """Add the CloudTrail module to the Terraform cluster dict.

    Args:
        cluster_name (str): The name of the currently generating cluster
        cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        bool: Result of applying the cloudtrail module
    """
    modules = config['clusters'][cluster_name]['modules']
    settings = modules['cloudtrail']
    if not settings.get('enabled', True):
        LOGGER.debug('CloudTrail module is not enabled')
        return True  # not an error

    region = config['global']['account']['region']
    prefix = config['global']['account']['prefix']
    send_to_cloudwatch = settings.get('send_to_cloudwatch', False)
    enable_s3_events = settings.get('enable_s3_events', True)

    s3_bucket_name = settings.get(
        's3_bucket_name',
        '{}-{}-streamalert-cloudtrail'.format(prefix, cluster_name))

    primary_account_id = config['global']['account']['aws_account_id']
    account_ids = set(settings.get('s3_cross_account_ids', []))
    account_ids.add(primary_account_id)
    account_ids = sorted(account_ids)

    logging_bucket, _ = s3_access_logging_bucket(
        config)  # Just get the bucket name from the tuple

    module_info = {
        'source': './modules/tf_cloudtrail',
        'primary_account_id': primary_account_id,
        'region': region,
        'prefix': prefix,
        'cluster': cluster_name,
        's3_cross_account_ids': account_ids,
        's3_logging_bucket': logging_bucket,
        's3_bucket_name': s3_bucket_name,
    }

    # These have defaults in the terraform module, so only override if it's set in the config
    settings_with_defaults = {
        'enable_logging',
        'is_global_trail',
        's3_event_selector_type',
        'send_to_sns',
    }
    for value in settings_with_defaults:
        if value in settings:
            module_info[value] = settings[value]

    if send_to_cloudwatch:
        if not generate_cloudtrail_cloudwatch(
                cluster_name, cluster_dict, config, settings, prefix, region):
            return False

        module_info['cloudwatch_logs_role_arn'] = (
            '${{module.cloudtrail_cloudwatch_{}.cloudtrail_to_cloudwatch_logs_role}}'
            .format(cluster_name))
        module_info['cloudwatch_logs_group_arn'] = (
            '${{module.cloudtrail_cloudwatch_{}.cloudwatch_logs_group_arn}}'.
            format(cluster_name))

    cluster_dict['module']['cloudtrail_{}'.format(cluster_name)] = module_info

    if enable_s3_events:
        s3_event_account_ids = account_ids
        # Omit the primary account ID from the event notifications to avoid duplicative processing
        if send_to_cloudwatch:
            s3_event_account_ids = [
                account_id for account_id in account_ids
                if account_id != primary_account_id
            ]
        bucket_info = {
            s3_bucket_name: [{
                'filter_prefix': 'AWSLogs/{}/'.format(account_id)
            } for account_id in s3_event_account_ids]
        }
        generate_s3_events_by_bucket(
            cluster_name,
            cluster_dict,
            config,
            bucket_info,
            module_prefix='cloudtrail',
        )

    return True