Пример #1
0
def _terraform_build(options, config):
    """Run Terraform with an optional set of targets and clusters

    Args:
        options (namedtuple): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert CLI
    """
    if not terraform_generate(config=config):
        return

    # Define the set of custom targets to apply
    tf_runner_targets = set()
    # If resource are not clustered, it is most likely required to
    # fall in the custom mapping below:
    custom_module_mapping = {
        'athena': 'module.stream_alert_athena',
        'threat_intel_downloader': 'module.threat_intel_downloader'
    }
    clusters = set(options.clusters or config.clusters())

    if options.target:
        tf_runner_targets.update({
            'module.{}_{}'.format(target, cluster)
            for cluster in clusters for target in options.target
        })
        for name in custom_module_mapping:
            if name in options.target:
                tf_runner_targets.add(custom_module_mapping[name])

    tf_runner(targets=tf_runner_targets)
Пример #2
0
def deploy(options, config):
    """Deploy new versions of all Lambda functions

    Args:
        options (namedtuple): ArgParsed command from the CLI
        config (CLIConfig): Loaded StreamAlert config

    Steps:
        Build AWS Lambda deployment package
        Upload to S3
        Update lambda.json with uploaded package checksum and S3 key
        Publish new version
        Update each cluster's Lambda configuration with latest published version
        Run Terraform Apply
    """
    # Terraform apply only to the module which contains our lambda functions
    deploy_targets = set()
    packages = []

    if 'all' in options.processor:
        processors = {
            'alert', 'alert_merger', 'apps', 'athena', 'rule',
            'threat_intel_downloader'
        }
    else:
        processors = options.processor

    for processor in processors:
        package, targets = _create_and_upload(processor, config,
                                              options.clusters)
        # Continue if the package isn't enabled
        if not all([package, targets]):
            continue

        packages.append(package)
        deploy_targets.update(targets)

    # Regenerate the Terraform configuration with the new S3 keys
    if not terraform_generate(config=config):
        return

    # Run Terraform: Update the Lambda source code in $LATEST
    if not helpers.tf_runner(targets=deploy_targets):
        sys.exit(1)

    # Update the rule table now if the rule processor is being deployed
    if 'rule' in options.processor:
        _update_rule_table(options, config)

    # Publish a new production Lambda version
    if not _publish_version(packages, config, options.clusters):
        return

    # Regenerate the Terraform configuration with the new Lambda versions
    if not terraform_generate(config=config):
        return

    # Apply the changes to the Lambda aliases
    helpers.tf_runner(targets=deploy_targets, refresh=False, auto_approve=True)
Пример #3
0
def _terraform_init(config):
    """Initialize infrastructure using Terraform

    Args:
        config (CLIConfig): Loaded StreamAlert CLI
    """
    LOGGER_CLI.info('Initializing StreamAlert')

    # generate init Terraform files
    if not terraform_generate(config=config, init=True):
        return

    LOGGER_CLI.info('Initializing Terraform')
    if not run_command(['terraform', 'init']):
        sys.exit(1)

    # build init infrastructure
    LOGGER_CLI.info('Building Initial Infrastructure')
    init_targets = [
        'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket',
        'aws_s3_bucket.stream_alert_secrets',
        'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts',
        'aws_kms_key.stream_alert_secrets',
        'aws_kms_alias.stream_alert_secrets'
    ]
    if not tf_runner(targets=init_targets):
        LOGGER_CLI.error('An error occurred while running StreamAlert init')
        sys.exit(1)

    # generate the main.tf with remote state enabled
    LOGGER_CLI.info('Configuring Terraform Remote State')
    if not terraform_generate(config=config):
        return

    if not run_command(['terraform', 'init']):
        return

    # Use a named tuple to match the 'processor' attribute in the argparse options
    deploy_opts = namedtuple('DeployOptions', ['processor', 'clusters'])

    LOGGER_CLI.info('Deploying Lambda Functions')

    deploy(deploy_opts(['rule', 'alert', 'alert_merger', 'athena'], []),
           config)

    # we need to manually create the streamalerts table since terraform does not support this
    # See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486
    alerts_bucket = '{}.streamalerts'.format(
        config['global']['account']['prefix'])
    create_table('alerts', alerts_bucket, config)

    LOGGER_CLI.info('Building Remainder Infrastructure')
    tf_runner(refresh=False)
Пример #4
0
def kinesis_handler(options, config):
    """Main handler for the Kinesis parser

    Args:
        options (argparse.Namespace): Parsed arguments
        config (CLIConfig): Loaded StreamAlert config

    Returns:
        bool: False if errors occurred, True otherwise
    """
    enable = options.action == 'enable-events'
    LOGGER.info('%s Kinesis Events', 'Enabling' if enable else 'Disabling')

    for cluster in options.clusters or config.clusters():
        if 'kinesis_events' in config['clusters'][cluster]['modules']:
            config['clusters'][cluster]['modules']['kinesis_events']['enabled'] = enable

    config.write()

    if options.skip_terraform:
        return True  # not an error

    if not terraform_generate_handler(config):
        return False

    return tf_runner(
        action='apply',
        targets=[
            'module.{}_{}'.format('kinesis_events', cluster) for cluster in config.clusters()
        ]
    )
Пример #5
0
def deploy(functions, config, clusters=None):
    """Deploy the functions

    Args:
        functions (set): Set of functions being deployed
        config (CLIConfig): Loaded StreamAlert config
        clusters (set=None): Optional clusters to target for this deploy

    Returns:
        bool: False if errors occurred, True otherwise
    """

    LOGGER.info('Deploying: %s', ' '.join(sorted(functions)))

    # Terraform apply only to the module which contains our lambda functions
    deploy_targets = set()
    packages = []

    for function in functions:
        package, targets = _create(function, config, clusters)
        # Continue if the package isn't enabled
        if not all([package, targets]):
            continue

        packages.append(package)
        deploy_targets.update(targets)

    # Terraform applies the new package and publishes a new version
    return helpers.tf_runner(targets=deploy_targets)
Пример #6
0
def terraform_destroy_handler(options, config):
    """Use Terraform to destroy any existing infrastructure

    Args:
        options (argparse.Namespace): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert config

    Returns:
        bool: False if errors occurred, True otherwise
    """
    # Check for valid credentials
    if not check_credentials():
        return False

    # Verify terraform is installed
    if not terraform_check():
        return False

    # Ask for approval here since multiple Terraform commands may be necessary
    if not continue_prompt(message='Are you sure you want to destroy?'):
        return False

    if options.target:
        target_modules, valid = _get_valid_tf_targets(config, options.target)
        if not valid:
            return False

        return tf_runner(action='destroy',
                         auto_approve=True,
                         targets=target_modules if target_modules else None)

    # Migrate back to local state so Terraform can successfully
    # destroy the S3 bucket used by the backend.
    # Do not check for terraform or aws creds again since these were checked above
    if not terraform_generate_handler(
            config=config, init=True, check_tf=False, check_creds=False):
        return False

    if not run_command(['terraform', 'init']):
        return False

    # Destroy all of the infrastructure
    if not tf_runner(action='destroy', auto_approve=True):
        return False

    # Remove old Terraform files
    return terraform_clean_handler()
Пример #7
0
def _terraform_destroy(options, config):
    """Use Terraform to destroy any existing infrastructure

    Args:
        options (namedtuple): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert CLI
    """
    # Ask for approval here since multiple Terraform commands may be necessary
    if not continue_prompt(message='Are you sure you want to destroy?'):
        sys.exit(1)

    if options.target:
        targets = []
        # Iterate over any targets to destroy. Global modules, like athena
        # are prefixed with `stream_alert_` while cluster based modules
        # are a combination of the target and cluster name
        for target in options.target:
            if target == 'athena':
                targets.append('module.stream_alert_{}'.format(target))
            elif target == 'threat_intel_downloader':
                targets.append('module.threat_intel_downloader')
            else:
                targets.extend([
                    'module.{}_{}'.format(target, cluster)
                    for cluster in config.clusters()
                ])

        tf_runner(action='destroy', auto_approve=True, targets=targets)
        return

    # Migrate back to local state so Terraform can successfully
    # destroy the S3 bucket used by the backend.
    if not terraform_generate(config=config, init=True):
        return

    if not run_command(['terraform', 'init']):
        return

    # Destroy all of the infrastructure
    if not tf_runner(action='destroy', auto_approve=True):
        return

    # Remove old Terraform files
    _terraform_clean(config)
Пример #8
0
def deploy(options, config):
    """Deploy new versions of all Lambda functions

    Args:
        options (namedtuple): ArgParsed command from the CLI
        config (CLIConfig): Loaded StreamAlert config

    Steps:
        Build AWS Lambda deployment package
        Upload to S3
        Update lambda.json with uploaded package checksum and S3 key
        Publish new version
        Update each cluster's Lambda configuration with latest published version
        Run Terraform Apply
    """
    # Terraform apply only to the module which contains our lambda functions
    deploy_targets = set()
    packages = []

    if 'all' in options.processor:
        processors = {
            'alert', 'alert_merger', 'apps', 'athena', 'rule', 'rule_promo',
            'threat_intel_downloader'
        }
    else:
        processors = options.processor

    for processor in processors:
        package, targets = _create(processor, config, options.clusters)
        # Continue if the package isn't enabled
        if not all([package, targets]):
            continue

        packages.append(package)
        deploy_targets.update(targets)

    # Update the rule table now if the rule processor is being deployed
    if 'rule' in options.processor:
        _update_rule_table(options, config)

    # Terraform applies the new package and publishes a new version
    helpers.tf_runner(targets=deploy_targets)
Пример #9
0
def rollback(options):
    """Rollback the current production AWS Lambda version by 1

    Notes:
        Ignores if the production version is $LATEST
        Only rollsback if published version is greater than 1
    """
    clusters = CONFIG.clusters()

    if 'all' in options.processor:
        lambda_functions = {
            'rule_processor', 'alert_processor', 'athena_partition_refresh'
        }
    else:
        lambda_functions = {
            '{}_processor'.format(proc)
            for proc in options.processor if proc != 'athena'
        }
        if 'athena' in options.processor:
            lambda_functions.add('athena_partition_refresh')

    for cluster in clusters:
        for lambda_function in lambda_functions:
            stream_alert_key = CONFIG['clusters'][cluster]['modules'][
                'stream_alert']
            current_vers = stream_alert_key[lambda_function]['current_version']
            if current_vers != '$LATEST':
                current_vers = int(current_vers)
                if current_vers > 1:
                    new_vers = current_vers - 1
                    CONFIG['clusters'][cluster]['modules']['stream_alert'][
                        lambda_function]['current_version'] = new_vers
                    CONFIG.write()

    targets = ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()]

    if not terraform_generate(config=CONFIG):
        return

    helpers.tf_runner(targets=targets)
Пример #10
0
def set_kinesis_events(options, config, enable=True):
    """Enable or disable Kinesis events for given clusters

    Args:
        options (namedtuple): Parsed arguments
        config (CLIConfig): Loaded StreamAlert config
        enable (bool): Enable/Disable switch
    """
    for cluster in options.clusters or config.clusters():
        if 'kinesis_events' in config['clusters'][cluster]['modules']:
            config['clusters'][cluster]['modules']['kinesis_events'][
                'enabled'] = enable

    config.write()

    if not options.skip_terraform:
        terraform_generate(config)
        tf_runner(action='apply',
                  targets=[
                      'module.{}_{}'.format('kinesis_events', cluster)
                      for cluster in config.clusters()
                  ])
Пример #11
0
def rollback(options, config):
    """Rollback the current production AWS Lambda version by 1

    Notes:
        Ignores if the production version is $LATEST
        Only rollsback if published version is greater than 1
    """
    clusters = options.clusters or config.clusters()
    rollback_all = 'all' in options.processor
    tf_targets = []

    if rollback_all or 'alert' in options.processor:
        tf_targets.extend(_rollback_alert(config) or [])

    if rollback_all or 'alert_merger' in options.processor:
        tf_targets.extend(_rollback_alert_merger(config) or [])

    if rollback_all or 'apps' in options.processor:
        tf_targets.extend(_rollback_apps(config, clusters) or [])

    if rollback_all or 'athena' in options.processor:
        tf_targets.extend(_rollback_athena(config) or [])

    if rollback_all or 'rule' in options.processor:
        tf_targets.extend(_rollback_rule(config, clusters) or [])

    if rollback_all or 'threat_intel_downloader' in options.processor:
        tf_targets.extend(_rollback_downloader(config) or [])

    if not tf_targets:  # No changes made
        return

    config.write()

    if not terraform_generate(config=config):
        return

    helpers.tf_runner(targets=sorted(tf_targets))
Пример #12
0
def terraform_build_handler(options, config):
    """Run Terraform with an optional set of targets and clusters

    Args:
        options (argparse.Namespace): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert config

    Returns:
        bool: False if errors occurred, True otherwise
    """
    if not terraform_generate_handler(config=config):
        return False

    target_modules, valid = _get_valid_tf_targets(config, options.target)
    if not valid:
        return False

    return tf_runner(targets=target_modules if target_modules else None)
Пример #13
0
def terraform_build(options, config):
    """Run Terraform with an optional set of targets

    Args:
        options (namedtuple): Parsed arguments from manage.py
    """
    # Generate Terraform files
    if not terraform_generate(config=config):
        return
    # Target is for terraforming a specific streamalert module.
    # This value is passed as a list
    if options.target == ['athena']:
        tf_runner(targets=['module.stream_alert_athena'])
    elif options.target:
        targets = [
            'module.{}_{}'.format(target, cluster)
            for cluster in config.clusters() for target in options.target
        ]
        tf_runner(targets=targets)
    else:
        tf_runner()
Пример #14
0
def deploy(options, config):
    """Deploy new versions of all Lambda functions

    Steps:
    - Build AWS Lambda deployment package
    - Upload to S3
    - Update lambda.json with uploaded package checksum and S3 key
    - Publish new version
    - Update each cluster's Lambda configuration with latest published version
    - Run Terraform Apply
    """
    processor = options.processor
    # Terraform apply only to the module which contains our lambda functions
    targets = set()
    packages = []

    def _publish_version(packages):
        """Publish Lambda versions"""
        for package in packages:
            if package.package_name in {'athena_partition_refresh', 'threat_intel_downloader'}:
                published = LambdaVersion(
                    config=config, package=package, clustered_deploy=False).publish_function()
            else:
                published = LambdaVersion(config=config, package=package).publish_function()
            if not published:
                return False

        return True

    def _deploy_rule_processor():
        """Create Rule Processor package and publish versions"""
        rule_package = RuleProcessorPackage(config=config, version=current_version)
        rule_package.create_and_upload()
        return rule_package

    def _deploy_alert_processor():
        """Create Alert Processor package and publish versions"""
        alert_package = AlertProcessorPackage(config=config, version=current_version)
        alert_package.create_and_upload()
        return alert_package

    def _deploy_athena_partition_refresh():
        """Create Athena Partition Refresh package and publish"""
        athena_package = AthenaPackage(config=config, version=current_version)
        athena_package.create_and_upload()
        return athena_package

    def _deploy_apps_function():
        """Create app integration package and publish versions"""
        app_integration_package = AppIntegrationPackage(config=config, version=apps_version)
        app_integration_package.create_and_upload()
        return app_integration_package

    def _deploy_threat_intel_downloader():
        """Create Threat Intel downloader package and publish version"""
        threat_intel_package = ThreatIntelDownloaderPackage(
            config=config,
            version=ti_downloader_version
        )
        threat_intel_package.create_and_upload()
        return threat_intel_package

    if 'all' in processor:
        targets.update({'module.stream_alert_{}'.format(x) for x in config.clusters()})

        targets.update({
            'module.app_{}_{}'.format(app_name, cluster)
            for cluster, info in config['clusters'].iteritems()
            for app_name in info['modules'].get('stream_alert_apps', {})
        })

        packages.append(_deploy_rule_processor())
        packages.append(_deploy_alert_processor())
        packages.append(_deploy_apps_function())

        # Only include the Athena function if it exists and is enabled
        athena_config = config['lambda'].get('athena_partition_refresh_config')
        if athena_config and athena_config.get('enabled', False):
            targets.add('module.stream_alert_athena')
            packages.append(_deploy_athena_partition_refresh())

    else:

        if 'rule' in processor:
            targets.update({'module.stream_alert_{}'.format(x) for x in config.clusters()})

            packages.append(_deploy_rule_processor())

        if 'alert' in processor:
            targets.update({'module.stream_alert_{}'.format(x) for x in config.clusters()})

            packages.append(_deploy_alert_processor())

        if 'apps' in processor:

            targets.update({
                'module.app_{}_{}'.format(app_name, cluster)
                for cluster, info in config['clusters'].iteritems()
                for app_name in info['modules'].get('stream_alert_apps', {})
            })

            packages.append(_deploy_apps_function())

        if 'athena' in processor:
            targets.add('module.stream_alert_athena')

            packages.append(_deploy_athena_partition_refresh())

        if 'threat_intel_downloader' in processor:
            targets.add('module.threat_intel_downloader')
            packages.append(_deploy_threat_intel_downloader())

    # Regenerate the Terraform configuration with the new S3 keys
    if not terraform_generate(config=config):
        return

    # Run Terraform: Update the Lambda source code in $LATEST
    if not helpers.tf_runner(targets=targets):
        sys.exit(1)

    # TODO(jack) write integration test to verify newly updated function

    # Publish a new production Lambda version
    if not _publish_version(packages):
        return

    # Regenerate the Terraform configuration with the new Lambda versions
    if not terraform_generate(config=config):
        return

    # Apply the changes to the Lambda aliases
    helpers.tf_runner(targets=targets)
Пример #15
0
def terraform_init(options, config):
    """Initialize infrastructure using Terraform

    Args:
        config (CLIConfig): Loaded StreamAlert config

    Returns:
        bool: False if errors occurred, True otherwise
    """
    # Stop here if only initializing the backend
    if options.backend:
        return _terraform_init_backend()

    LOGGER.info('Initializing StreamAlert')

    # generate init Terraform files
    if not terraform_generate_handler(config=config, init=True):
        return False

    LOGGER.info('Initializing Terraform')
    if not run_command(['terraform', 'init']):
        return False

    # build init infrastructure
    LOGGER.info('Building initial infrastructure')
    init_targets = [
        'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket',
        'aws_s3_bucket.stream_alert_secrets',
        'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts',
        'aws_kms_key.server_side_encryption',
        'aws_kms_alias.server_side_encryption',
        'aws_kms_key.stream_alert_secrets',
        'aws_kms_alias.stream_alert_secrets'
    ]
    if not tf_runner(targets=init_targets):
        LOGGER.error('An error occurred while running StreamAlert init')
        return False

    # generate the main.tf with remote state enabled
    LOGGER.info('Configuring Terraform Remote State')
    if not terraform_generate_handler(
            config=config, check_tf=False, check_creds=False):
        return False

    if not run_command(['terraform', 'init']):
        return False

    LOGGER.info('Deploying Lambda Functions')

    functions = ['rule', 'alert', 'alert_merger', 'athena', 'classifier']

    deploy(functions, config)

    # we need to manually create the streamalerts table since terraform does not support this
    # See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486
    alerts_bucket = '{}.streamalerts'.format(
        config['global']['account']['prefix'])
    create_table('alerts', alerts_bucket, config)

    LOGGER.info('Building remainding infrastructure')
    return tf_runner(refresh=False)
Пример #16
0
def deploy(options):
    """Deploy new versions of all Lambda functions

    Steps:
    - Build AWS Lambda deployment package
    - Upload to S3
    - Update lambda.json with uploaded package checksum and S3 key
    - Publish new version
    - Update each cluster's Lambda configuration with latest published version
    - Run Terraform Apply
    """
    processor = options.processor
    # Terraform apply only to the module which contains our lambda functions
    targets = []
    packages = []

    def _publish_version(packages):
        """Publish Lambda versions"""
        for package in packages:
            if package.package_name == 'athena_partition_refresh':
                published = LambdaVersion(
                    config=CONFIG, package=package,
                    clustered_deploy=False).publish_function()
            else:
                published = LambdaVersion(config=CONFIG,
                                          package=package).publish_function()
            if not published:
                return False

        return True

    def _deploy_rule_processor():
        """Create Rule Processor package and publish versions"""
        rule_package = RuleProcessorPackage(config=CONFIG,
                                            version=current_version)
        rule_package.create_and_upload()
        return rule_package

    def _deploy_alert_processor():
        """Create Alert Processor package and publish versions"""
        alert_package = AlertProcessorPackage(config=CONFIG,
                                              version=current_version)
        alert_package.create_and_upload()
        return alert_package

    def _deploy_athena_partition_refresh():
        """Create Athena Partition Refresh package and publish"""
        athena_package = AthenaPackage(config=CONFIG, version=current_version)
        athena_package.create_and_upload()
        return athena_package

    if 'all' in processor:
        targets.extend(
            ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()])

        packages.append(_deploy_rule_processor())
        packages.append(_deploy_alert_processor())

        # Only include the Athena function if it exists and is enabled
        athena_config = CONFIG['lambda'].get('athena_partition_refresh_config')
        if athena_config and athena_config.get('enabled', False):
            targets.append('module.stream_alert_athena')
            packages.append(_deploy_athena_partition_refresh())

    else:

        if 'rule' in processor:
            targets.extend([
                'module.stream_alert_{}'.format(x) for x in CONFIG.clusters()
            ])

            packages.append(_deploy_rule_processor())

        if 'alert' in processor:
            targets.extend([
                'module.stream_alert_{}'.format(x) for x in CONFIG.clusters()
            ])

            packages.append(_deploy_alert_processor())

        if 'athena' in processor:
            targets.append('module.stream_alert_athena')

            packages.append(_deploy_athena_partition_refresh())

    # Regenerate the Terraform configuration with the new S3 keys
    if not terraform_generate(config=CONFIG):
        return

    # Run Terraform: Update the Lambda source code in $LATEST
    if not helpers.tf_runner(targets=targets):
        sys.exit(1)

    # TODO(jack) write integration test to verify newly updated function

    # Publish a new production Lambda version
    if not _publish_version(packages):
        return

    # Regenerate the Terraform configuration with the new Lambda versions
    if not terraform_generate(config=CONFIG):
        return

    # Apply the changes to the Lambda aliases
    helpers.tf_runner(targets=targets)
Пример #17
0
def terraform_handler(options, config):
    """Handle all Terraform CLI operations

    Args:
        options (namedtuple): Parsed arguments from manage.py
    """
    # Check for valid credentials
    if not check_credentials():
        return

    # Verify terraform is installed
    if not terraform_check():
        return
    # Use a named tuple to match the 'processor' attribute in the argparse options
    deploy_opts = namedtuple('DeployOptions', ['processor', 'clusters'])

    # Plan and Apply our streamalert infrastructure
    if options.subcommand == 'build':
        terraform_build(options, config)

    # generate terraform files
    elif options.subcommand == 'generate':
        if not terraform_generate(config=config):
            return

    elif options.subcommand == 'init-backend':
        run_command(['terraform', 'init'])

    # initialize streamalert infrastructure from a blank state
    elif options.subcommand == 'init':
        LOGGER_CLI.info('Initializing StreamAlert')

        # generate init Terraform files
        if not terraform_generate(config=config, init=True):
            return

        LOGGER_CLI.info('Initializing Terraform')
        if not run_command(['terraform', 'init']):
            sys.exit(1)

        # build init infrastructure
        LOGGER_CLI.info('Building Initial Infrastructure')
        init_targets = [
            'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket',
            'aws_s3_bucket.stream_alert_secrets',
            'aws_s3_bucket.terraform_remote_state',
            'aws_s3_bucket.streamalerts', 'aws_kms_key.stream_alert_secrets',
            'aws_kms_alias.stream_alert_secrets'
        ]
        if not tf_runner(targets=init_targets):
            LOGGER_CLI.error('An error occured while running StreamAlert init')
            sys.exit(1)

        # generate the main.tf with remote state enabled
        LOGGER_CLI.info('Configuring Terraform Remote State')
        if not terraform_generate(config=config):
            return

        if not run_command(['terraform', 'init']):
            return

        LOGGER_CLI.info('Deploying Lambda Functions')
        # deploy both lambda functions
        deploy(deploy_opts(['rule', 'alert'], []), config)
        # create all remainder infrastructure

        LOGGER_CLI.info('Building Remainder Infrastructure')
        tf_runner()

    elif options.subcommand == 'clean':
        if not continue_prompt(
                message='Are you sure you want to clean all Terraform files?'):
            sys.exit(1)
        terraform_clean(config)

    elif options.subcommand == 'destroy':
        if not continue_prompt(message='Are you sure you want to destroy?'):
            sys.exit(1)

        if options.target:
            targets = []
            # Iterate over any targets to destroy. Global modules, like athena
            # are prefixed with `stream_alert_` while cluster based modules
            # are a combination of the target and cluster name
            for target in options.target:
                if target == 'athena':
                    targets.append('module.stream_alert_{}'.format(target))
                elif target == 'threat_intel_downloader':
                    targets.append('module.threat_intel_downloader')
                else:
                    targets.extend([
                        'module.{}_{}'.format(target, cluster)
                        for cluster in config.clusters()
                    ])

            tf_runner(targets=targets, action='destroy')
            return

        # Migrate back to local state so Terraform can successfully
        # destroy the S3 bucket used by the backend.
        if not terraform_generate(config=config, init=True):
            return

        if not run_command(['terraform', 'init']):
            return

        # Destroy all of the infrastructure
        if not tf_runner(action='destroy'):
            return

        # Remove old Terraform files
        terraform_clean(config)

    # get a quick status on our declared infrastructure
    elif options.subcommand == 'status':
        terraform_status(config)