Ejemplo n.º 1
0
def generate_aggregate_cloudwatch_metric_alarms(config):
    """Return any CloudWatch Metric Alarms for aggregate metrics

    Args:
        config (dict): The loaded config from the 'conf/' directory
    """
    result = infinitedict()

    sns_topic_arn = monitoring_topic_arn(config)

    for func, func_config in config['lambda'].iteritems():
        metric_alarms = func_config.get('custom_metric_alarms')
        if not metric_alarms:
            continue

        func = func.replace('_config', '')

        for idx, name in enumerate(metric_alarms):
            alarm_settings = metric_alarms[name]
            alarm_settings['source'] = 'modules/tf_metric_alarms',
            alarm_settings['sns_topic_arn'] = sns_topic_arn
            alarm_settings['alarm_name'] = name
            result['module']['metric_alarm_{}_{}'.format(func, idx)] = alarm_settings

    return result
Ejemplo n.º 2
0
def generate_cluster_cloudwatch_metric_alarms(cluster_name, cluster_dict, config):
    """Add the CloudWatch Metric Alarms information to the Terraform cluster dict.

    Args:
        cluster_name (str): The name of the currently generating cluster
        cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
        config (dict): The loaded config from the 'conf/' directory
    """
    infrastructure_config = config['global'].get('infrastructure')

    if not (infrastructure_config and 'monitoring' in infrastructure_config):
        LOGGER.error(
            'Invalid config: Make sure you declare global infrastructure options!')
        return

    sns_topic_arn = monitoring_topic_arn(config)

    stream_alert_config = config['clusters'][cluster_name]['modules']['stream_alert']

    # Add cluster metric alarms for the clustered function(s). ie: classifier
    metric_alarms = [
        metric_alarm
        for func in CLUSTERED_FUNCTIONS
        for metric_alarm in stream_alert_config['{}_config'.format(func)].get(
            'custom_metric_alarms', []
        )
    ]

    for idx, metric_alarm in enumerate(metric_alarms):
        metric_alarm['source'] = 'modules/tf_metric_alarms',
        metric_alarm['sns_topic_arn'] = sns_topic_arn
        cluster_dict['module']['metric_alarm_{}_{}'.format(cluster_name, idx)] = metric_alarm
Ejemplo n.º 3
0
def generate_cloudwatch_metric_alarms(cluster_name, cluster_dict, config):
    """Add the CloudWatch Metric Alarms information to the Terraform cluster dict.

    Args:
        cluster_name (str): The name of the currently generating cluster
        cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
        config (dict): The loaded config from the 'conf/' directory
    """
    infrastructure_config = config['global'].get('infrastructure')

    if not (infrastructure_config and 'monitoring' in infrastructure_config):
        LOGGER_CLI.error(
            'Invalid config: Make sure you declare global infrastructure options!'
        )
        return

    sns_topic_arn = monitoring_topic_arn(config)

    cluster_dict['module']['stream_alert_{}'.format(
        cluster_name)]['sns_topic_arn'] = sns_topic_arn

    stream_alert_config = config['clusters'][cluster_name]['modules'][
        'stream_alert']

    # Add cluster metric alarms for the rule and alert processors
    formatted_alarms = []
    for func_config in stream_alert_config.values():
        if 'metric_alarms' not in func_config:
            continue

        # TODO: update this logic to simply use a list of maps once Terraform fixes
        # their support for this, instead of the comma-separated string this creates
        metric_alarms = func_config['metric_alarms']
        for name, alarm_info in metric_alarms.iteritems():
            formatted_alarms.append(_format_metric_alarm(name, alarm_info))

    cluster_dict['module']['stream_alert_{}'.format(
        cluster_name)]['metric_alarms'] = formatted_alarms
Ejemplo n.º 4
0
def generate_lambda(function_name,
                    zip_file,
                    handler,
                    lambda_config,
                    config,
                    environment=None,
                    metrics_lookup=None):
    """Generate an instance of the Lambda Terraform module.

    Args:
        function_name (str): Name of the Lambda function (e.g. 'alert_processor')
        zip_file (str): Path where the .zip deployment package lives
        handler (str): Lambda function handler
        lambda_config (dict): Section of the config for this particular Lambda function
        config (dict): Parsed config from conf/
        environment (dict): Optional environment variables to specify.
            ENABLE_METRICS and LOGGER_LEVEL are included automatically.
        metrics_lookup (str): Canonical name of this function (used to lookup custom metrics)

    Example Lambda config:
        {
            "concurrency_limit": 1,
            "log_level": "info",
            "log_retention_days": 14,
            "memory": 128,
            "metric_alarms": {
              "errors": {
                "enabled": true,
                "evaluation_periods": 1,
                "period_secs": 120,
                "threshold": 0
              },
              "throttles": {
                "enabled": true,
                "evaluation_periods": 1,
                "period_secs": 120,
                "threshold": 0
              }
            },
            "schedule_expression": "rate(5 minutes)",
            "timeout": 10,
            "vpc_config": {
                "security_group_ids": [
                    "sg-id"
                ],
                "subnet_ids": [
                    "subnet-id"
                ]
            }
        }

    Returns:
        dict: Terraform config for an instance of the tf_lambda module.
    """
    # Add logger level to any custom environment variables
    environment_variables = {
        # Convert True/False to "1" or "0", respectively
        'ENABLE_METRICS': str(int(lambda_config.get('enable_metrics', False))),
        'LOGGER_LEVEL': lambda_config.get('log_level', 'info')
    }

    if environment:
        environment_variables.update(environment)

    lambda_module = {
        'source': 'modules/tf_lambda',
        'function_name': function_name,
        'description': function_name.replace('_', ' ').title(),
        'handler': handler,
        'memory_size_mb': lambda_config['memory'],
        'timeout_sec': lambda_config['timeout'],
        'filename': zip_file,
        'environment_variables': environment_variables
    }

    # Include optional keys only if they are defined (otherwise use the module defaults)
    for key in [
            'concurrency_limit', 'log_retention_days', 'schedule_expression'
    ]:
        if key in lambda_config:
            lambda_module[key] = lambda_config[key]

    # Add metric alarms and filters to the Lambda module definition
    lambda_module.update(
        _tf_metric_alarms(lambda_config, monitoring_topic_arn(config)))
    lambda_module.update(_tf_metric_filters(lambda_config, metrics_lookup))

    # Add VPC config to the Lambda module definition
    lambda_module.update(_tf_vpc_config(lambda_config))

    return lambda_module
Ejemplo n.º 5
0
def generate_firehose(logging_bucket, main_dict, config):
    """Generate the Firehose Terraform modules

    Args:
        config (CLIConfig): The loaded StreamAlert Config
        main_dict (infinitedict): The Dict to marshal to a file
        logging_bucket (str): The name of the global logging bucket
    """
    if not config['global']['infrastructure'].get('firehose',
                                                  {}).get('enabled'):
        return

    firehose_config = config['global']['infrastructure']['firehose']
    firehose_s3_bucket_suffix = firehose_config.get('s3_bucket_suffix',
                                                    'streamalert.data')
    firehose_s3_bucket_name = '{}.{}'.format(
        config['global']['account']['prefix'], firehose_s3_bucket_suffix)

    # Firehose Setup module
    main_dict['module']['kinesis_firehose_setup'] = {
        'source': 'modules/tf_stream_alert_kinesis_firehose_setup',
        'account_id': config['global']['account']['aws_account_id'],
        'prefix': config['global']['account']['prefix'],
        'region': config['global']['account']['region'],
        's3_logging_bucket': logging_bucket,
        's3_bucket_name': firehose_s3_bucket_name,
        'kms_key_id': '${aws_kms_key.server_side_encryption.key_id}'
    }

    enabled_logs = FirehoseClient.load_enabled_log_sources(
        config['global']['infrastructure']['firehose'],
        config['logs'],
        force_load=True)

    log_alarms_config = config['global']['infrastructure']['firehose'].get(
        'enabled_logs', {})

    # Add the Delivery Streams individually
    for log_stream_name, log_type_name in enabled_logs.iteritems():
        module_dict = {
            'source':
            'modules/tf_stream_alert_kinesis_firehose_delivery_stream',
            'buffer_size': (config['global']['infrastructure']['firehose'].get(
                'buffer_size', 64)),
            'buffer_interval':
            (config['global']['infrastructure']['firehose'].get(
                'buffer_interval', 300)),
            'compression_format':
            (config['global']['infrastructure']['firehose'].get(
                'compression_format', 'GZIP')),
            'log_name':
            log_stream_name,
            'role_arn':
            '${module.kinesis_firehose_setup.firehose_role_arn}',
            's3_bucket_name':
            firehose_s3_bucket_name,
            'kms_key_arn':
            '${aws_kms_key.server_side_encryption.arn}'
        }

        # Try to get alarm info for this specific log type
        alarm_info = log_alarms_config.get(log_type_name)
        if not alarm_info and ':' in log_type_name:
            # Fallback on looking for alarm info for the parent log type
            alarm_info = log_alarms_config.get(log_type_name.split(':')[0])

        if alarm_info and alarm_info.get('enable_alarm'):
            module_dict['enable_alarm'] = True

            # There are defaults of these defined in the terraform module, so do
            # not set the variable values unless explicitly specified
            if alarm_info.get('log_min_count_threshold'):
                module_dict['alarm_threshold'] = alarm_info.get(
                    'log_min_count_threshold')

            if alarm_info.get('evaluation_periods'):
                module_dict['evaluation_periods'] = alarm_info.get(
                    'evaluation_periods')

            if alarm_info.get('period_seconds'):
                module_dict['period_seconds'] = alarm_info.get(
                    'period_seconds')

            if alarm_info.get('alarm_actions'):
                if not isinstance(alarm_info.get('alarm_actions'), list):
                    module_dict['alarm_actions'] = [
                        alarm_info.get('alarm_actions')
                    ]
                else:
                    module_dict['alarm_actions'] = alarm_info.get(
                        'alarm_actions')
            else:
                module_dict['alarm_actions'] = [monitoring_topic_arn(config)]

        main_dict['module']['kinesis_firehose_{}'.format(
            log_stream_name)] = module_dict
Ejemplo n.º 6
0
def generate_monitoring(cluster_name, cluster_dict, config):
    """Add the CloudWatch Monitoring module to the Terraform cluster dict.

    Example configuration:

    "cloudwatch_monitoring": {
      "enabled": true,
      "kinesis_alarms_enabled": true,
      "lambda_alarms_enabled": true,
      "settings": {
        "lambda_invocation_error_period": "600",
        "kinesis_iterator_age_error_period": "600",
        "kinesis_write_throughput_exceeded_threshold": "100"
      }
    }

    Args:
        cluster_name (str): The name of the currently generating cluster
        cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
        config (dict): The loaded config from the 'conf/' directory

    Returns:
        bool: Result of applying the cloudwatch_monitoring module
    """
    prefix = config['global']['account']['prefix']
    infrastructure_config = config['global'].get('infrastructure')
    monitoring_config = config['clusters'][cluster_name]['modules'][
        'cloudwatch_monitoring']

    if not (infrastructure_config and 'monitoring' in infrastructure_config):
        LOGGER.error(
            'Invalid config: Make sure you declare global infrastructure options!'
        )
        return False

    if not monitoring_config.get('enabled', False):
        LOGGER.info('CloudWatch Monitoring not enabled, skipping...')
        return True

    sns_topic_arn = monitoring_topic_arn(config)

    cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)] = {
        'source': 'modules/tf_stream_alert_monitoring',
        'sns_topic_arn': sns_topic_arn,
        'kinesis_alarms_enabled': False,
        'lambda_alarms_enabled': False
    }

    if monitoring_config.get('lambda_alarms_enabled', True):
        cluster_dict['module']['cloudwatch_monitoring_{}'.format(
            cluster_name)].update({
                'lambda_functions':
                ['{}_streamalert_classifier_{}'.format(prefix, cluster_name)],
                'lambda_alarms_enabled':
                True
            })

    if monitoring_config.get('kinesis_alarms_enabled', True):
        cluster_dict['module']['cloudwatch_monitoring_{}'.format(
            cluster_name)].update({
                'kinesis_stream':
                '{}_{}_stream_alert_kinesis'.format(prefix, cluster_name),
                'kinesis_alarms_enabled':
                True
            })

    # Add support for custom settings for tweaking alarm thresholds, eval periods, and periods
    # Note: This does not strictly check for proper variable names, since there are so many.
    #       Instead, Terraform will error out if an improper name is used.
    #       Also, every value in these settings should be a string, so cast for safety.
    for setting_name, setting_value in monitoring_config.get('settings',
                                                             {}).iteritems():
        cluster_dict['module']['cloudwatch_monitoring_{}'.format(
            cluster_name)][setting_name] = str(setting_value)

    return True
Ejemplo n.º 7
0
def generate_main(config, init=False):
    """Generate the main.tf.json Terraform dict

    Args:
        config (CLIConfig): The loaded CLI config
        init (bool): Terraform is running in the init phase or not (optional)

    Returns:
        dict: main.tf.json Terraform dict
    """
    main_dict = infinitedict()

    # Configure provider along with the minimum version
    main_dict['provider']['aws'] = {'version': TERRAFORM_VERSIONS['provider']['aws']}

    # Configure Terraform version requirement
    main_dict['terraform']['required_version'] = TERRAFORM_VERSIONS['application']

    # Setup the Backend depending on the deployment phase.
    # When first setting up StreamAlert, the Terraform statefile
    # is stored locally.  After the first dependencies are created,
    # this moves to S3.
    if init:
        main_dict['terraform']['backend']['local'] = {
            'path': 'terraform.tfstate'}
    else:
        main_dict['terraform']['backend']['s3'] = {
            'bucket': '{}.streamalert.terraform.state'.format(
                config['global']['account']['prefix']),
            'key': 'stream_alert_state/terraform.tfstate',
            'region': config['global']['account']['region'],
            'encrypt': True,
            'acl': 'private',
            'kms_key_id': 'alias/{}'.format(config['global']['account']['kms_key_alias'])}

    logging_bucket = '{}.streamalert.s3-logging'.format(
        config['global']['account']['prefix'])
    logging_bucket_lifecycle = {
        'prefix': '/',
        'enabled': True,
        'transition': {
            'days': 30,
            'storage_class': 'GLACIER'}}

    # Configure initial S3 buckets
    main_dict['resource']['aws_s3_bucket'] = {
        'lambda_source': generate_s3_bucket(
            bucket=config['lambda']['rule_processor_config']['source_bucket'],
            logging=logging_bucket
        ),
        'stream_alert_secrets': generate_s3_bucket(
            bucket='{}.streamalert.secrets'.format(config['global']['account']['prefix']),
            logging=logging_bucket
        ),
        'terraform_remote_state': generate_s3_bucket(
            bucket=config['global']['terraform']['tfstate_bucket'],
            logging=logging_bucket
        ),
        'logging_bucket': generate_s3_bucket(
            bucket=logging_bucket,
            logging=logging_bucket,
            acl='log-delivery-write',
            lifecycle_rule=logging_bucket_lifecycle
        ),
        'streamalerts': generate_s3_bucket(
            bucket='{}.streamalerts'.format(config['global']['account']['prefix']),
            logging=logging_bucket
        )
    }

    # Setup Firehose Delivery Streams
    generate_firehose(config, main_dict, logging_bucket)

    # Configure global resources like Firehose alert delivery and alerts table
    main_dict['module']['globals'] = {
        'source': 'modules/tf_stream_alert_globals',
        'account_id': config['global']['account']['aws_account_id'],
        'region': config['global']['account']['region'],
        'prefix': config['global']['account']['prefix'],
        'alerts_table_read_capacity': (
            config['global']['infrastructure']['alerts_table']['read_capacity']),
        'alerts_table_write_capacity': (
            config['global']['infrastructure']['alerts_table']['write_capacity']),
        'rules_table_read_capacity': (
            config['global']['infrastructure']['rules_table']['read_capacity']),
        'rules_table_write_capacity': (
            config['global']['infrastructure']['rules_table']['write_capacity'])
    }

    # KMS Key and Alias creation
    main_dict['resource']['aws_kms_key']['stream_alert_secrets'] = {
        'enable_key_rotation': True,
        'description': 'StreamAlert secret management'
    }
    main_dict['resource']['aws_kms_alias']['stream_alert_secrets'] = {
        'name': 'alias/{}'.format(config['global']['account']['kms_key_alias']),
        'target_key_id': '${aws_kms_key.stream_alert_secrets.key_id}'
    }

    # Global infrastructure settings
    infrastructure_config = config['global'].get('infrastructure')
    if infrastructure_config and 'monitoring' in infrastructure_config:
        if infrastructure_config['monitoring'].get('create_sns_topic'):
            main_dict['resource']['aws_sns_topic']['stream_alert_monitoring'] = {
                'name': DEFAULT_SNS_MONITORING_TOPIC
            }

    # Add any global cloudwatch alarms to the main.tf
    monitoring_config = config['global']['infrastructure'].get('monitoring')
    if not monitoring_config:
        return main_dict

    global_metrics = monitoring_config.get('metric_alarms')
    if not global_metrics:
        return main_dict

    sns_topic_arn = monitoring_topic_arn(config)

    formatted_alarms = {}
    # Add global metric alarms for the rule and alert processors
    for func in FUNC_PREFIXES:
        if func not in global_metrics:
            continue

        for name, settings in global_metrics[func].iteritems():
            alarm_info = settings.copy()
            alarm_info['alarm_name'] = name
            alarm_info['namespace'] = 'StreamAlert'
            alarm_info['alarm_actions'] = [sns_topic_arn]
            # Terraform only allows certain characters in resource names
            acceptable_chars = ''.join([string.digits, string.letters, '_-'])
            name = filter(acceptable_chars.__contains__, name)
            formatted_alarms['metric_alarm_{}'.format(name)] = alarm_info

    if formatted_alarms:
        main_dict['resource']['aws_cloudwatch_metric_alarm'] = formatted_alarms

    return main_dict
Ejemplo n.º 8
0
def generate_main(config, init=False):
    """Generate the main.tf.json Terraform dict

    Args:
        config (CLIConfig): The loaded CLI config
        init (bool): Terraform is running in the init phase or not (optional)

    Returns:
        dict: main.tf.json Terraform dict
    """
    main_dict = infinitedict()

    # Configure provider along with the minimum version
    main_dict['provider']['aws'] = {
        'version': TERRAFORM_VERSIONS['provider']['aws']
    }

    # Configure Terraform version requirement
    main_dict['terraform']['required_version'] = TERRAFORM_VERSIONS[
        'application']

    # Setup the Backend depending on the deployment phase.
    # When first setting up StreamAlert, the Terraform statefile
    # is stored locally.  After the first dependencies are created,
    # this moves to S3.
    if init:
        main_dict['terraform']['backend']['local'] = {
            'path': 'terraform.tfstate'
        }
    else:
        main_dict['terraform']['backend']['s3'] = {
            'bucket':
            config['global']['terraform']['tfstate_bucket'],
            'key':
            config['global']['terraform']['tfstate_s3_key'],
            'region':
            config['global']['account']['region'],
            'encrypt':
            True,
            'acl':
            'private',
            'kms_key_id':
            'alias/{}'.format(config['global']['account']['kms_key_alias'])
        }

    logging_bucket = config['global']['s3_access_logging']['logging_bucket']

    # Configure initial S3 buckets
    main_dict['resource']['aws_s3_bucket'] = {
        'stream_alert_secrets':
        generate_s3_bucket(bucket='{}.streamalert.secrets'.format(
            config['global']['account']['prefix']),
                           logging=logging_bucket),
        'streamalerts':
        generate_s3_bucket(bucket='{}.streamalerts'.format(
            config['global']['account']['prefix']),
                           logging=logging_bucket)
    }

    # Create bucket for S3 access logs (if applicable)
    if config['global']['s3_access_logging'].get('create_bucket', True):
        main_dict['resource']['aws_s3_bucket'][
            'logging_bucket'] = generate_s3_bucket(
                bucket=logging_bucket,
                logging=logging_bucket,
                acl='log-delivery-write',
                lifecycle_rule={
                    'prefix': '/',
                    'enabled': True,
                    'transition': {
                        'days': 365,
                        'storage_class': 'GLACIER'
                    }
                },
                sse_algorithm=
                'AES256'  # SSE-KMS doesn't seem to work with access logs
            )

    # Create bucket for Terraform state (if applicable)
    if config['global']['terraform'].get('create_bucket', True):
        main_dict['resource']['aws_s3_bucket'][
            'terraform_remote_state'] = generate_s3_bucket(
                bucket=config['global']['terraform']['tfstate_bucket'],
                logging=logging_bucket)

    # Setup Firehose Delivery Streams
    generate_firehose(config, main_dict, logging_bucket)

    # Configure global resources like Firehose alert delivery and alerts table
    global_module = {
        'source':
        'modules/tf_stream_alert_globals',
        'account_id':
        config['global']['account']['aws_account_id'],
        'region':
        config['global']['account']['region'],
        'prefix':
        config['global']['account']['prefix'],
        'kms_key_arn':
        '${aws_kms_key.server_side_encryption.arn}',
        'alerts_table_read_capacity':
        (config['global']['infrastructure']['alerts_table']['read_capacity']),
        'alerts_table_write_capacity':
        (config['global']['infrastructure']['alerts_table']['write_capacity'])
    }

    if config['global']['infrastructure']['rule_staging'].get('enabled'):
        global_module['enable_rule_staging'] = True
        global_module['rules_table_read_capacity'] = (
            config['global']['infrastructure']['rule_staging']['table']
            ['read_capacity'])
        global_module['rules_table_write_capacity'] = (
            config['global']['infrastructure']['rule_staging']['table']
            ['write_capacity'])

    main_dict['module']['globals'] = global_module

    # KMS Key and Alias creation
    main_dict['resource']['aws_kms_key']['server_side_encryption'] = {
        'enable_key_rotation':
        True,
        'description':
        'StreamAlert S3 Server-Side Encryption',
        'policy':
        json.dumps({
            'Version':
            '2012-10-17',
            'Statement': [{
                'Sid': 'Enable IAM User Permissions',
                'Effect': 'Allow',
                'Principal': {
                    'AWS':
                    'arn:aws:iam::{}:root'.format(
                        config['global']['account']['aws_account_id'])
                },
                'Action': 'kms:*',
                'Resource': '*'
            }, {
                'Sid':
                'Allow principals in the account to use the key',
                'Effect':
                'Allow',
                'Principal':
                '*',
                'Action':
                ['kms:Decrypt', 'kms:GenerateDataKey*', 'kms:Encrypt'],
                'Resource':
                '*',
                'Condition': {
                    'StringEquals': {
                        'kms:CallerAccount':
                        config['global']['account']['aws_account_id']
                    }
                }
            }]
        })
    }
    main_dict['resource']['aws_kms_alias']['server_side_encryption'] = {
        'name':
        'alias/{}_server-side-encryption'.format(
            config['global']['account']['prefix']),
        'target_key_id':
        '${aws_kms_key.server_side_encryption.key_id}'
    }

    main_dict['resource']['aws_kms_key']['stream_alert_secrets'] = {
        'enable_key_rotation': True,
        'description': 'StreamAlert secret management'
    }
    main_dict['resource']['aws_kms_alias']['stream_alert_secrets'] = {
        'name':
        'alias/{}'.format(config['global']['account']['kms_key_alias']),
        'target_key_id': '${aws_kms_key.stream_alert_secrets.key_id}'
    }

    # Global infrastructure settings
    infrastructure_config = config['global'].get('infrastructure')
    if infrastructure_config and 'monitoring' in infrastructure_config:
        if infrastructure_config['monitoring'].get('create_sns_topic'):
            main_dict['resource']['aws_sns_topic'][
                'stream_alert_monitoring'] = {
                    'name': DEFAULT_SNS_MONITORING_TOPIC
                }

    # Add any global cloudwatch alarms to the main.tf
    monitoring_config = config['global']['infrastructure'].get('monitoring')
    if not monitoring_config:
        return main_dict

    global_metrics = monitoring_config.get('metric_alarms')
    if not global_metrics:
        return main_dict

    sns_topic_arn = monitoring_topic_arn(config)

    formatted_alarms = {}
    # Add global metric alarms for the rule and alert processors
    for func in FUNC_PREFIXES:
        if func not in global_metrics:
            continue

        for name, settings in global_metrics[func].iteritems():
            alarm_info = settings.copy()
            alarm_info['alarm_name'] = name
            alarm_info['namespace'] = 'StreamAlert'
            alarm_info['alarm_actions'] = [sns_topic_arn]
            # Terraform only allows certain characters in resource names
            acceptable_chars = ''.join([string.digits, string.letters, '_-'])
            name = filter(acceptable_chars.__contains__, name)
            formatted_alarms['metric_alarm_{}'.format(name)] = alarm_info

    if formatted_alarms:
        main_dict['resource']['aws_cloudwatch_metric_alarm'] = formatted_alarms

    return main_dict
Ejemplo n.º 9
0
def generate_lambda(function_name, config, environment=None):
    """Generate an instance of the Lambda Terraform module.

    Args:
        function_name (str): Name of the Lambda function (e.g. 'alert_processor')
        config (dict): Parsed config from conf/
        environment (dict): Optional environment variables to specify.
            ENABLE_METRICS and LOGGER_LEVEL are included automatically.

    Example Lambda config:
        {
            "concurrency_limit": 1,
            "current_version": "$LATEST",
            "handler": "main.handler",
            "log_level": "info",
            "log_retention_days": 14,
            "memory": 128,
            "metric_alarms": {
              "errors": {
                "enabled": true,
                "evaluation_periods": 1,
                "period_secs": 120,
                "threshold": 0
              },
              "throttles": {
                "enabled": true,
                "evaluation_periods": 1,
                "period_secs": 120,
                "threshold": 0
              }
            },
            "schedule_expression": "rate(5 minutes)",
            "source_bucket": "BUCKET",
            "source_object_key": "OBJECT_KEY",
            "timeout": 10,
            "vpc_config": {
                "security_group_ids": [
                    "sg-id"
                ],
                "subnet_ids": [
                    "subnet-id"
                ]
            }
        }

    Returns:
        dict: Terraform config for an instance of the tf_lambda module.
    """
    lambda_config = _lambda_config(function_name, config)

    # Add logger level to any custom environment variables
    environment_variables = {
        # Convert True/False to "1" or "0", respectively
        'ENABLE_METRICS': str(int(lambda_config.get('enable_metrics', False))),
        'LOGGER_LEVEL': lambda_config.get('log_level', 'info')
    }
    if environment:
        environment_variables.update(environment)

    lambda_module = {
        'source': 'modules/tf_lambda',
        'function_name': '{}_streamalert_{}'.format(config['global']['account']['prefix'],
                                                    function_name),
        'description': 'StreamAlert {}'.format(function_name.replace('_', ' ').title()),
        'handler': lambda_config['handler'],
        'memory_size_mb': lambda_config['memory'],
        'timeout_sec': lambda_config['timeout'],
        'source_bucket': lambda_config['source_bucket'],
        'source_object_key': lambda_config['source_object_key'],
        'environment_variables': environment_variables,
        'aliased_version': lambda_config['current_version'],
    }

    # Include optional keys only if they are defined (otherwise use the module defaults)
    for key in ['concurrency_limit', 'log_retention_days', 'schedule_expression']:
        if key in lambda_config:
            lambda_module[key] = lambda_config[key]

    # Add metric alarms and filters to the Lambda module definition
    lambda_module.update(_tf_metric_alarms(lambda_config, monitoring_topic_arn(config)))
    lambda_module.update(_tf_metric_filters(lambda_config, function_name))

    # Add VPC config to the Lambda module definition
    lambda_module.update(_tf_vpc_config(lambda_config))

    return lambda_module