Ejemplo n.º 1
0
    def add_app_integration(self, app_info):
        """Add a configuration for a new streamalert app integration function

        Args:
            app_info (dict): The necessary values needed to begin configuring
                a new app integration
        """
        exists, prompt_for_auth, overwrite = False, True, False
        app = get_app(app_info, False)

        # Check to see if there is an existing configuration for this app integration
        cluster_config = self.config['clusters'][app_info['cluster']]
        if app_info['app_name'] in cluster_config['modules'].get('stream_alert_apps', {}):
            prompt = ('An app with the name \'{}\' is already configured for cluster '
                      '\'{}\'. Would you like to update the existing app\'s configuration'
                      '?'.format(app_info['app_name'], app_info['cluster']))

            exists = True

            # Return if the user is not deliberately updating an existing config
            if not continue_prompt(message=prompt):
                return

            prompt = ('Would you also like to update the authentication information for '
                      'app integration with name \'{}\'?'.format(app_info['app_name']))

            # If this is true, we shouldn't prompt again to warn about overwriting
            prompt_for_auth = overwrite = continue_prompt(message=prompt)

        if prompt_for_auth and not save_app_auth_info(app, app_info, overwrite):
            return

        apps_config = cluster_config['modules'].get('stream_alert_apps', {})
        local_config_keys = {'interval', 'timeout', 'memory'}
        if not exists:
            # Save a default log level as info to the config
            app_info['log_level'] = 'info'
            app_info['current_version'] = '$LATEST'
            local_config_keys.update({'log_level', 'current_version', 'type'})

            apps_config[app_info['app_name']] = {key: app_info[key]
                                                 for key in local_config_keys}
        else:
            apps_config[app_info['app_name']].update({key: app_info[key]
                                                      for key in local_config_keys})


        cluster_config['modules']['stream_alert_apps'] = apps_config

        # Add this service to the sources for this app integration
        # The `stream_alert_app` is purposely singular here
        app_sources = self.config['sources'].get('stream_alert_app', {})
        app_sources[app_info['function_name']] = {'logs': [app.service()]}
        self.config['sources']['stream_alert_app'] = app_sources

        LOGGER_CLI.info('Successfully added \'%s\' app integration to \'conf/clusters/%s.json\' '
                        'for service \'%s\'.', app_info['app_name'],
                        app_info['cluster'], app_info['type'])

        self.write()
Ejemplo n.º 2
0
    def _add_global_metric_alarm(self, alarm_info):
        """Add a metric alarm that corresponds to a predefined metrics globally

        Args:
            alarm_info (dict): All the necessary values needed to add a CloudWatch
                metric alarm
        """
        function_name = alarm_info['function']

        func_config_name = '{}_config'.format(function_name)

        # Check if metrics are not enabled, and ask the user if they would like to enable them
        if func_config_name not in self.config['lambda']:
            self.config['lambda'][func_config_name] = {}

        function_config = self.config['lambda'][func_config_name]

        if function_name in CLUSTERED_FUNCTIONS:
            if not self._clusters_with_metrics_enabled(function_name):
                prompt = (
                    'Metrics are not currently enabled for the \'{}\' function '
                    'within any cluster. Creating an alarm will have no effect '
                    'until metrics are enabled for this function in at least one '
                    'cluster. Would you still like to continue?'.format(
                        function_name))
                if not continue_prompt(message=prompt):
                    return False

        else:
            if not function_config.get('enable_custom_metrics'):
                prompt = (
                    'Metrics are not currently enabled for the \'{}\' function. '
                    'Would you like to enable metrics for this function?'
                ).format(function_name)

                if continue_prompt(message=prompt):
                    self.toggle_metrics(function_name, enabled=True)

                elif not continue_prompt(
                        message='Would you still like to add this alarm '
                        'even though metrics are disabled?'):
                    return False

        metric_alarms = function_config.get('custom_metric_alarms', {})

        # Format the metric name for the aggregate metric
        alarm_settings = alarm_info.copy()
        alarm_settings['metric_name'] = '{}-{}'.format(
            metrics.FUNC_PREFIXES[function_name],
            alarm_settings['metric_name'])

        function_config[
            'custom_metric_alarms'] = self._add_metric_alarm_config(
                alarm_settings, metric_alarms)
        LOGGER.info(
            'Successfully added \'%s\' metric alarm to '
            '\'conf/lambda.json\'.', alarm_settings['alarm_name'])

        return True
Ejemplo n.º 3
0
    def _add_metric_alarm_per_cluster(self, alarm_info, function_name):
        """Add a metric alarm for individual clusters. This is for non-aggregate
        CloudWatch metric alarms.

        Args:
            alarm_info (dict): All the necessary values needed to add a CloudWatch
                metric alarm.
            function_name (str): The name of the lambda function this metric is
                related to.
        """
        # If no clusters have been specified by the user, we can assume this alarm
        # should be created for all available clusters, so fall back to that
        clusters = (alarm_info['clusters'] if alarm_info['clusters'] else list(
            self.config['clusters']))

        # Go over each of the clusters and see if enable_metrics == True and prompt
        # the user to toggle metrics on if this is False
        for cluster in clusters:
            function_config = (self.config['clusters'][cluster]['modules']
                               ['stream_alert'][function_name])

            if not function_config.get('enable_metrics'):
                prompt = (
                    'Metrics are not currently enabled for the \'{}\' function '
                    'within the \'{}\' cluster. Would you like to enable metrics '
                    'for this cluster?'.format(function_name, cluster))

                if continue_prompt(message=prompt):
                    self.toggle_metrics(True, [cluster], [function_name])

                elif not continue_prompt(
                        message='Would you still like to add this alarm '
                        'even though metrics are disabled?'):
                    continue

            metric_alarms = function_config.get('metric_alarms', {})

            # Format the metric name for the cluster based metric
            # Prepend a prefix for this function and append the cluster name
            alarm_settings = alarm_info.copy()
            alarm_settings['metric_name'] = '{}-{}-{}'.format(
                metrics.FUNC_PREFIXES[function_name],
                alarm_settings['metric_name'], cluster.upper())

            new_alarms = self._add_metric_alarm_config(alarm_settings,
                                                       metric_alarms)
            if new_alarms != False:
                function_config['metric_alarms'] = new_alarms
                LOGGER_CLI.info(
                    'Successfully added \'%s\' metric alarm for the \'%s\' '
                    'function to \'conf/clusters/%s.json\'.',
                    alarm_settings['alarm_name'], function_name, cluster)
Ejemplo n.º 4
0
    def _add_cluster_metric_alarm(self, alarm_info):
        """Add a metric alarm that corresponds to a predefined metrics for clusters

        Args:
            alarm_info (dict): All the necessary values needed to add a CloudWatch
                metric alarm.
        """
        function_name = alarm_info['function']

        # Go over each of the clusters and see if enable_metrics == True and prompt
        # the user to toggle metrics on if this is False
        config_name = '{}_config'.format(function_name)
        for cluster in alarm_info['clusters']:
            function_config = (self.config['clusters'][cluster]['modules']
                               ['stream_alert'][config_name])

            if not function_config.get('enable_custom_metrics'):
                prompt = (
                    'Metrics are not currently enabled for the \'{}\' function '
                    'within the \'{}\' cluster. Would you like to enable metrics '
                    'for this cluster?'.format(function_name, cluster))

                if continue_prompt(message=prompt):
                    self.toggle_metrics(function_name,
                                        enabled=True,
                                        clusters=[cluster])

                elif not continue_prompt(
                        message='Would you still like to add this alarm '
                        'even though metrics are disabled?'):
                    continue

            metric_alarms = function_config.get('custom_metric_alarms', {})

            # Format the metric name for the cluster based metric
            # Prepend a prefix for this function and append the cluster name
            alarm_settings = alarm_info.copy()
            alarm_settings['metric_name'] = '{}-{}-{}'.format(
                metrics.FUNC_PREFIXES[function_name],
                alarm_settings['metric_name'], cluster.upper())

            function_config[
                'custom_metric_alarms'] = self._add_metric_alarm_config(
                    alarm_settings, metric_alarms)
            LOGGER.info(
                'Successfully added \'%s\' metric alarm for the \'%s\' '
                'function to \'conf/clusters/%s.json\'.',
                alarm_settings['alarm_name'], function_name, cluster)

        return True
Ejemplo n.º 5
0
def drop_all_tables(athena_client):
    """Drop all 'streamalert' Athena tables

    Used when cleaning up an existing deployment

    Args:
        athena_client (boto3.client): Instantiated CLI AthenaClient
    """
    if not continue_prompt(
            message='Are you sure you want to drop all Athena tables?'):
        return

    success, all_tables = athena_client.run_athena_query(
        query='SHOW TABLES', database='streamalert')
    if not success:
        LOGGER_CLI.error('There was an issue getting all tables')
        return

    unique_tables = athena_helpers.unique_values_from_query(all_tables)

    for table in unique_tables:
        success, all_tables = athena_client.run_command(
            query='DROP TABLE {}'.format(table), database='streamalert')
        if not success:
            LOGGER_CLI.error('Unable to drop the %s table', table)
        else:
            LOGGER_CLI.info('Dropped %s', table)
Ejemplo n.º 6
0
def drop_all_tables(config):
    """Drop all 'streamalert' Athena tables

    Used when cleaning up an existing deployment

    Args:
        config (CLIConfig): Loaded StreamAlert CLI
    """
    if not continue_prompt(
            message='Are you sure you want to drop all Athena tables?'):
        return

    athena_client = StreamAlertAthenaClient(
        config, results_key_prefix='stream_alert_cli')

    success, all_tables = athena_client.run_athena_query(
        query='SHOW TABLES', database=athena_client.sa_database)
    if not success:
        LOGGER_CLI.error('There was an issue getting all tables')
        return

    unique_tables = athena_helpers.unique_values_from_query(all_tables)

    for table in unique_tables:
        success, all_tables = athena_client.run_athena_query(
            query='DROP TABLE {}'.format(table),
            database=athena_client.sa_database)
        if not success:
            LOGGER_CLI.error('Unable to drop the %s table', table)
        else:
            LOGGER_CLI.info('Dropped %s', table)
Ejemplo n.º 7
0
def drop_all_tables(config):
    """Drop all 'streamalert' Athena tables

    Used when cleaning up an existing deployment

    Args:
        config (CLIConfig): Loaded StreamAlert config

    Returns:
        bool: False if errors occurred, True otherwise
    """
    if not continue_prompt(
            message='Are you sure you want to drop all Athena tables?'):
        return False

    athena_client = get_athena_client(config)

    if not athena_client.drop_all_tables():
        LOGGER.error('Failed to drop one or more tables from database: %s',
                     athena_client.database)
        return False

    LOGGER.info('Successfully dropped all tables from database: %s',
                athena_client.database)
    return True
Ejemplo n.º 8
0
def terraform_destroy_handler(options, config):
    """Use Terraform to destroy any existing infrastructure

    Args:
        options (argparse.Namespace): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert config

    Returns:
        bool: False if errors occurred, True otherwise
    """
    # Check for valid credentials
    if not check_credentials():
        return False

    # Verify terraform is installed
    if not terraform_check():
        return False

    # Ask for approval here since multiple Terraform commands may be necessary
    if not continue_prompt(message='Are you sure you want to destroy?'):
        return False

    if options.target:
        target_modules, valid = _get_valid_tf_targets(config, options.target)
        if not valid:
            return False

        return tf_runner(action='destroy',
                         auto_approve=True,
                         targets=target_modules if target_modules else None)

    # Migrate back to local state so Terraform can successfully
    # destroy the S3 bucket used by the backend.
    # Do not check for terraform or aws creds again since these were checked above
    if not terraform_generate_handler(
            config=config, init=True, check_tf=False, check_creds=False):
        return False

    if not run_command(['terraform', 'init']):
        return False

    # Destroy all of the infrastructure
    if not tf_runner(action='destroy', auto_approve=True):
        return False

    # Remove old Terraform files
    return terraform_clean_handler()
Ejemplo n.º 9
0
def save_parameter(region, name, value, description, force_overwrite=False):
    """Function to save the designated value to parameter store

    Args:
        name (str): Name of the parameter being saved
        value (str): Value to be saved to the parameter store
    """
    ssm_client = boto3.client('ssm', region_name=region)

    param_value = json.dumps(value)

    # The name of the parameter should follow the format of:
    # <function_name>_<type> where <type> is one of {'auth', 'config', 'state'}
    # and <function_name> follows the the format:
    # '<prefix>_<cluster>_<service>_<app_name>_app'
    # Example: prefix_prod_duo_auth_production_collector_app_config
    def save(overwrite=False):

        ssm_client.put_parameter(Name=name,
                                 Description=description,
                                 Value=param_value,
                                 Type='SecureString',
                                 Overwrite=overwrite)

    try:
        save(overwrite=force_overwrite)
    except ClientError as err:
        if err.response['Error']['Code'] == 'ExpiredTokenException':
            # Log an error if this response was due to no credentials being found
            LOGGER_CLI.error(
                'Could not save \'%s\' to parameter store because no '
                'valid credentials were loaded.', name)

        if err.response['Error']['Code'] != 'ParameterAlreadyExists':
            raise

        prompt = (
            'A parameter already exists with name \'{}\'. Would you like '
            'to overwrite the existing value?'.format(name))

        # Ask to overwrite
        if not continue_prompt(message=prompt):
            return False

        save(overwrite=True)

    return True
Ejemplo n.º 10
0
def _terraform_destroy(options, config):
    """Use Terraform to destroy any existing infrastructure

    Args:
        options (namedtuple): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert CLI
    """
    # Ask for approval here since multiple Terraform commands may be necessary
    if not continue_prompt(message='Are you sure you want to destroy?'):
        sys.exit(1)

    if options.target:
        targets = []
        # Iterate over any targets to destroy. Global modules, like athena
        # are prefixed with `stream_alert_` while cluster based modules
        # are a combination of the target and cluster name
        for target in options.target:
            if target == 'athena':
                targets.append('module.stream_alert_{}'.format(target))
            elif target == 'threat_intel_downloader':
                targets.append('module.threat_intel_downloader')
            else:
                targets.extend([
                    'module.{}_{}'.format(target, cluster)
                    for cluster in config.clusters()
                ])

        tf_runner(action='destroy', auto_approve=True, targets=targets)
        return

    # Migrate back to local state so Terraform can successfully
    # destroy the S3 bucket used by the backend.
    if not terraform_generate(config=config, init=True):
        return

    if not run_command(['terraform', 'init']):
        return

    # Destroy all of the infrastructure
    if not tf_runner(action='destroy', auto_approve=True):
        return

    # Remove old Terraform files
    _terraform_clean(config)
Ejemplo n.º 11
0
def terraform_handler(options, config):
    """Handle all Terraform CLI operations

    Args:
        options (namedtuple): Parsed arguments from manage.py
        config (CLIConfig): Loaded StreamAlert CLI
    """
    # Check for valid credentials
    if not check_credentials():
        return

    # Verify terraform is installed
    if not terraform_check():
        return

    # Plan and Apply our streamalert infrastructure
    if options.subcommand == 'build':
        _terraform_build(options, config)

    # generate terraform files
    elif options.subcommand == 'generate':
        if not terraform_generate(config=config):
            return

    elif options.subcommand == 'init-backend':
        run_command(['terraform', 'init'])

    # initialize streamalert infrastructure from a blank state
    elif options.subcommand == 'init':
        _terraform_init(config)

    elif options.subcommand == 'clean':
        if not continue_prompt(
                message='Are you sure you want to clean all Terraform files?'):
            sys.exit(1)
        _terraform_clean(config)

    elif options.subcommand == 'destroy':
        _terraform_destroy(options, config)

    # get a quick status on our declared infrastructure
    elif options.subcommand == 'status':
        terraform_status(config)
Ejemplo n.º 12
0
def drop_all_tables(config):
    """Drop all 'streamalert' Athena tables

    Used when cleaning up an existing deployment

    Args:
        config (CLIConfig): Loaded StreamAlert CLI
    """
    if not continue_prompt(message='Are you sure you want to drop all Athena tables?'):
        return

    athena_client = get_athena_client(config)

    if not athena_client.drop_all_tables():
        LOGGER_CLI.error('Failed to drop one or more tables from database: %s',
                         athena_client.database)
    else:
        LOGGER_CLI.info('Successfully dropped all tables from database: %s',
                        athena_client.database)
Ejemplo n.º 13
0
    def add_metric_alarm(self, alarm_info):
        """Add a metric alarm that corresponds to a predefined metrics

        Args:
            alarm_info (dict): All the necessary values needed to add a CloudWatch
                metric alarm
        """
        # Check to see if an alarm with this name already exists
        if self._alarm_exists(alarm_info['alarm_name']):
            return

        # Get the current metrics for each function
        current_metrics = metrics.MetricLogger.get_available_metrics()

        # Extract the function name this metric is associated with
        metric_function = {metric: function for function in current_metrics
                           for metric in current_metrics[function]}[alarm_info['metric_name']]

        # Do not continue if the user is trying to apply a metric alarm for an athena
        # metric to a specific cluster (since the athena function operates on all clusters)
        if (alarm_info['metric_target'] != 'aggregate' and
                metric_function == metrics.ATHENA_PARTITION_REFRESH_NAME):
            LOGGER_CLI.error('Metrics for the athena function can only be applied '
                             'to an aggregate metric target, not on a per-cluster basis.')
            return

        # If the metric is related to either the rule processor or alert processor, we should
        # check to see if any cluster has metrics enabled for that function before continuing
        if (metric_function in {metrics.ALERT_PROCESSOR_NAME, metrics.RULE_PROCESSOR_NAME} and
                not any(self.config['clusters'][cluster]['modules']['stream_alert']
                        [metric_function].get('enable_metrics') for cluster in
                        self.config['clusters'])):
            prompt = ('Metrics are not currently enabled for the \'{}\' function '
                      'within any cluster. Creating an alarm will have no effect '
                      'until metrics are enabled for this function in at least one '
                      'cluster. Would you still like to continue?'.format(metric_function))
            if not continue_prompt(message=prompt):
                return

        elif metric_function == metrics.ATHENA_PARTITION_REFRESH_NAME:
            # If the user is attempting to add a metric for athena, make sure the athena
            # function is initialized first
            if 'athena_partition_refresh_config' not in self.config['lambda']:
                LOGGER_CLI.error('No configuration found for Athena Partition Refresh. '
                                 'Please run: `$ python manage.py athena init` first.')
                return

            # If the athena function is initialized, but metrics are not enabled, ask
            # the user if they would like to enable them now
            if not self.config['lambda']['athena_partition_refresh_config'].get('enable_metrics'):
                prompt = ('Metrics are not currently enabled for the \'athena\' function. '
                          'Would you like to enable metrics for athena?')

                if continue_prompt(message=prompt):
                    self.toggle_metrics(True, None, [metric_function])

                elif not continue_prompt(message='Would you still like to add this alarm '
                                                 'even though metrics are disabled?'):
                    return

        # Add metric alarms for the aggregate metrics - these are added to the global config
        if (alarm_info['metric_target'] == 'aggregate' or
                metric_function == metrics.ATHENA_PARTITION_REFRESH_NAME):
            global_config = self.config['global']['infrastructure']['monitoring']

            metric_alarms = global_config.get('metric_alarms', {})
            if not metric_alarms:
                global_config['metric_alarms'] = {}

            metric_alarms = global_config['metric_alarms'].get(metric_function, {})
            if not metric_alarms:
                global_config['metric_alarms'][metric_function] = {}

            # Format the metric name for the aggregate metric
            alarm_settings = alarm_info.copy()
            alarm_settings['metric_name'] = '{}-{}'.format(metrics.FUNC_PREFIXES[metric_function],
                                                           alarm_info['metric_name'])

            new_alarms = self._add_metric_alarm_config(alarm_settings, metric_alarms)
            if new_alarms != False:
                global_config['metric_alarms'][metric_function] = new_alarms
                LOGGER_CLI.info('Successfully added \'%s\' metric alarm to '
                                '\'conf/global.json\'.', alarm_settings['alarm_name'])

        else:
            # Add metric alarms on a per-cluster basis - these are added to the cluster config
            self._add_metric_alarm_per_cluster(alarm_info, metric_function)

        # Save all of the alarm updates to disk
        self.write()
Ejemplo n.º 14
0
    def add_app(self, func_name, app_info):
        """Add a configuration for a new streamalert app integration function

        Args:
            app_info (dict): The necessary values needed to begin configuring
                a new app integration

        Returns:
            bool: False if errors occurred, True otherwise
        """
        exists, prompt_for_auth, overwrite = False, True, False
        app = StreamAlertApp.get_app(app_info['type'])

        cluster_name = app_info['cluster']
        app_name = app_info['app_name']

        # Check to see if there is an existing configuration for this app integration
        cluster_config = self.config['clusters'][cluster_name]

        if func_name in cluster_config['modules'].get('stream_alert_apps', {}):
            prompt = (
                'An app with the name \'{}\' is already configured for cluster '
                '\'{}\'. Would you like to update the existing app\'s configuration'
                '?'.format(app_name, cluster_name))

            exists = True

            # Return if the user is not deliberately updating an existing config
            if not continue_prompt(message=prompt):
                return

            prompt = (
                'Would you also like to update the authentication information for '
                'app integration with name \'{}\'?'.format(app_name))

            # If this is true, we shouldn't prompt again to warn about overwriting
            prompt_for_auth = overwrite = continue_prompt(message=prompt)

        if prompt_for_auth and not save_app_auth_info(app, app_info, func_name,
                                                      overwrite):
            return False

        apps_config = cluster_config['modules'].get('stream_alert_apps', {})
        if not exists:
            # Save a default app settings to the config for new apps
            new_app_config = {
                'app_name': app_info['app_name'],
                'concurrency_limit': 2,
                'log_level': 'info',
                'log_retention_days': 14,
                'memory': app_info['memory'],
                'metric_alarms': {
                    'errors': {
                        'enabled': True,
                        'evaluation_periods': 1,
                        'period_secs': 120
                    }
                },
                'schedule_expression': app_info['schedule_expression'],
                'timeout': app_info['timeout'],
                'type': app_info['type']
            }
            apps_config[func_name] = new_app_config
        else:

            # Allow for updating certain attributes for the app without overwriting
            # current parts of the configuration
            updated_app_config = {
                'memory': app_info['memory'],
                'schedule_expression': app_info['schedule_expression'],
                'timeout': app_info['timeout']
            }
            apps_config[func_name].update(updated_app_config)

        cluster_config['modules']['stream_alert_apps'] = apps_config

        # Add this service to the sources for this app integration
        # The `stream_alert_app` is purposely singular here
        app_sources = self.config['sources'].get('stream_alert_app', {})
        app_sources[func_name] = {'logs': [app.service()]}
        self.config['sources']['stream_alert_app'] = app_sources

        LOGGER.info(
            'Successfully added \'%s\' app integration to \'conf/clusters/%s.json\' '
            'for service \'%s\'.', app_info['app_name'], app_info['cluster'],
            app_info['type'])

        self.write()

        return True
Ejemplo n.º 15
0
def terraform_handler(options, config):
    """Handle all Terraform CLI operations

    Args:
        options (namedtuple): Parsed arguments from manage.py
    """
    # Check for valid credentials
    if not check_credentials():
        return

    # Verify terraform is installed
    if not terraform_check():
        return
    # Use a named tuple to match the 'processor' attribute in the argparse options
    deploy_opts = namedtuple('DeployOptions', ['processor', 'clusters'])

    # Plan and Apply our streamalert infrastructure
    if options.subcommand == 'build':
        terraform_build(options, config)

    # generate terraform files
    elif options.subcommand == 'generate':
        if not terraform_generate(config=config):
            return

    elif options.subcommand == 'init-backend':
        run_command(['terraform', 'init'])

    # initialize streamalert infrastructure from a blank state
    elif options.subcommand == 'init':
        LOGGER_CLI.info('Initializing StreamAlert')

        # generate init Terraform files
        if not terraform_generate(config=config, init=True):
            return

        LOGGER_CLI.info('Initializing Terraform')
        if not run_command(['terraform', 'init']):
            sys.exit(1)

        # build init infrastructure
        LOGGER_CLI.info('Building Initial Infrastructure')
        init_targets = [
            'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket',
            'aws_s3_bucket.stream_alert_secrets',
            'aws_s3_bucket.terraform_remote_state',
            'aws_s3_bucket.streamalerts', 'aws_kms_key.stream_alert_secrets',
            'aws_kms_alias.stream_alert_secrets'
        ]
        if not tf_runner(targets=init_targets):
            LOGGER_CLI.error('An error occured while running StreamAlert init')
            sys.exit(1)

        # generate the main.tf with remote state enabled
        LOGGER_CLI.info('Configuring Terraform Remote State')
        if not terraform_generate(config=config):
            return

        if not run_command(['terraform', 'init']):
            return

        LOGGER_CLI.info('Deploying Lambda Functions')
        # deploy both lambda functions
        deploy(deploy_opts(['rule', 'alert'], []), config)
        # create all remainder infrastructure

        LOGGER_CLI.info('Building Remainder Infrastructure')
        tf_runner()

    elif options.subcommand == 'clean':
        if not continue_prompt(
                message='Are you sure you want to clean all Terraform files?'):
            sys.exit(1)
        terraform_clean(config)

    elif options.subcommand == 'destroy':
        if not continue_prompt(message='Are you sure you want to destroy?'):
            sys.exit(1)

        if options.target:
            targets = []
            # Iterate over any targets to destroy. Global modules, like athena
            # are prefixed with `stream_alert_` while cluster based modules
            # are a combination of the target and cluster name
            for target in options.target:
                if target == 'athena':
                    targets.append('module.stream_alert_{}'.format(target))
                elif target == 'threat_intel_downloader':
                    targets.append('module.threat_intel_downloader')
                else:
                    targets.extend([
                        'module.{}_{}'.format(target, cluster)
                        for cluster in config.clusters()
                    ])

            tf_runner(targets=targets, action='destroy')
            return

        # Migrate back to local state so Terraform can successfully
        # destroy the S3 bucket used by the backend.
        if not terraform_generate(config=config, init=True):
            return

        if not run_command(['terraform', 'init']):
            return

        # Destroy all of the infrastructure
        if not tf_runner(action='destroy'):
            return

        # Remove old Terraform files
        terraform_clean(config)

    # get a quick status on our declared infrastructure
    elif options.subcommand == 'status':
        terraform_status(config)