def lambda_handler(options, config): """Handle all Lambda CLI operations""" if options.subcommand == 'deploy': # Check for valid credentials if not check_credentials(): return # Make sure the Terraform code is up to date if not terraform_generate(config=config): return LOGGER_CLI.info('Deploying: %s', ' '.join(options.processor)) deploy(options, config) elif options.subcommand == 'rollback': # Check for valid credentials if not check_credentials(): return # Make sure the Terraform code is up to date if not terraform_generate(config=config): return LOGGER_CLI.info('Rolling back: %s', ' '.join(options.processor)) rollback(options, config) elif options.subcommand == 'test': LOGGER_CLI.info('Testing: %s', ' '.join(options.processor)) stream_alert_test(options, config)
def _terraform_init_backend(): """Initialize the infrastructure backend (S3) using Terraform Returns: bool: False if errors occurred, True otherwise """ # Check for valid credentials if not check_credentials(): return False # Verify terraform is installed if not terraform_check(): return False LOGGER.info('Initializing StreamAlert backend') return run_command(['terraform', 'init'])
def terraform_destroy_handler(options, config): """Use Terraform to destroy any existing infrastructure Args: options (argparse.Namespace): Parsed arguments from manage.py config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Check for valid credentials if not check_credentials(): return False # Verify terraform is installed if not terraform_check(): return False # Ask for approval here since multiple Terraform commands may be necessary if not continue_prompt(message='Are you sure you want to destroy?'): return False if options.target: target_modules, valid = _get_valid_tf_targets(config, options.target) if not valid: return False return tf_runner(action='destroy', auto_approve=True, targets=target_modules if target_modules else None) # Migrate back to local state so Terraform can successfully # destroy the S3 bucket used by the backend. # Do not check for terraform or aws creds again since these were checked above if not terraform_generate_handler( config=config, init=True, check_tf=False, check_creds=False): return False if not run_command(['terraform', 'init']): return False # Destroy all of the infrastructure if not tf_runner(action='destroy', auto_approve=True): return False # Remove old Terraform files return terraform_clean_handler()
def terraform_handler(options, config): """Handle all Terraform CLI operations Args: options (namedtuple): Parsed arguments from manage.py config (CLIConfig): Loaded StreamAlert CLI """ # Check for valid credentials if not check_credentials(): return # Verify terraform is installed if not terraform_check(): return # Plan and Apply our streamalert infrastructure if options.subcommand == 'build': _terraform_build(options, config) # generate terraform files elif options.subcommand == 'generate': if not terraform_generate(config=config): return elif options.subcommand == 'init-backend': run_command(['terraform', 'init']) # initialize streamalert infrastructure from a blank state elif options.subcommand == 'init': _terraform_init(config) elif options.subcommand == 'clean': if not continue_prompt( message='Are you sure you want to clean all Terraform files?'): sys.exit(1) _terraform_clean(config) elif options.subcommand == 'destroy': _terraform_destroy(options, config) # get a quick status on our declared infrastructure elif options.subcommand == 'status': terraform_status(config)
def terraform_generate_handler(config, init=False, check_tf=True, check_creds=True): """Generate all Terraform plans for the configured clusters. Keyword Args: config (dict): The loaded config from the 'conf/' directory init (bool): Indicates if main.tf.json is generated for `init` Returns: bool: Result of cluster generating """ # Check for valid credentials if check_creds and not check_credentials(): return False # Verify terraform is installed if check_tf and not terraform_check(): return False cleanup_old_tf_files(config) # Setup the main.tf.json file LOGGER.debug('Generating cluster file: main.tf.json') with open('terraform/main.tf.json', 'w') as tf_file: json.dump( generate_main(config, init=init), tf_file, indent=2, sort_keys=True ) # Return early during the init process, clusters are not needed yet if init: return True # Setup cluster files for cluster in config.clusters(): if cluster in RESTRICTED_CLUSTER_NAMES: raise InvalidClusterName( 'Rename cluster "main" or "athena" to something else!') LOGGER.debug('Generating cluster file: %s.tf.json', cluster) cluster_dict = generate_cluster(config=config, cluster_name=cluster) if not cluster_dict: LOGGER.error( 'An error was generated while creating the %s cluster', cluster) return False with open('terraform/{}.tf.json'.format(cluster), 'w') as tf_file: json.dump( cluster_dict, tf_file, indent=2, sort_keys=True ) metric_filters = generate_aggregate_cloudwatch_metric_filters(config) if metric_filters: with open('terraform/metric_filters.tf.json', 'w') as tf_file: json.dump(metric_filters, tf_file, indent=2, sort_keys=True) metric_alarms = generate_aggregate_cloudwatch_metric_alarms(config) if metric_alarms: with open('terraform/metric_alarms.tf.json', 'w') as tf_file: json.dump(metric_alarms, tf_file, indent=2, sort_keys=True) # Setup Athena generate_global_lambda_settings( config, config_name='athena_partition_refresh_config', generate_func=generate_athena, tf_tmp_file='terraform/athena.tf.json', message='Removing old Athena Terraform file' ) # Setup Threat Intel Downloader Lambda function if it is enabled generate_global_lambda_settings( config, config_name='threat_intel_downloader_config', generate_func=generate_threat_intel_downloader, tf_tmp_file='terraform/ti_downloader.tf.json', message='Removing old Threat Intel Downloader Terraform file' ) # Setup Rule Promotion if it is enabled generate_global_lambda_settings( config, config_name='rule_promotion_config', generate_func=generate_rule_promotion, tf_tmp_file='terraform/rule_promotion.tf.json', message='Removing old Rule Promotion Terraform file' ) # Setup Rules Engine generate_global_lambda_settings( config, config_name='rules_engine_config', generate_func=generate_rules_engine, tf_tmp_file='terraform/rules_engine.tf.json', message='Removing old Rules Engine Terraform file' ) # Setup Alert Processor generate_global_lambda_settings( config, config_name='alert_processor_config', generate_func=generate_alert_processor, tf_tmp_file='terraform/alert_processor.tf.json', message='Removing old Alert Processor Terraform file' ) # Setup Alert Merger generate_global_lambda_settings( config, config_name='alert_merger_config', generate_func=generate_alert_merger, tf_tmp_file='terraform/alert_merger.tf.json', message='Removing old Alert Merger Terraform file' ) return True
def _check_prereqs(self): if self._type == self.Types.LIVE: return check_credentials() return True
def terraform_handler(options, config): """Handle all Terraform CLI operations Args: options (namedtuple): Parsed arguments from manage.py """ # Check for valid credentials if not check_credentials(): return # Verify terraform is installed if not terraform_check(): return # Use a named tuple to match the 'processor' attribute in the argparse options deploy_opts = namedtuple('DeployOptions', ['processor', 'clusters']) # Plan and Apply our streamalert infrastructure if options.subcommand == 'build': terraform_build(options, config) # generate terraform files elif options.subcommand == 'generate': if not terraform_generate(config=config): return elif options.subcommand == 'init-backend': run_command(['terraform', 'init']) # initialize streamalert infrastructure from a blank state elif options.subcommand == 'init': LOGGER_CLI.info('Initializing StreamAlert') # generate init Terraform files if not terraform_generate(config=config, init=True): return LOGGER_CLI.info('Initializing Terraform') if not run_command(['terraform', 'init']): sys.exit(1) # build init infrastructure LOGGER_CLI.info('Building Initial Infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket', 'aws_s3_bucket.stream_alert_secrets', 'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts', 'aws_kms_key.stream_alert_secrets', 'aws_kms_alias.stream_alert_secrets' ] if not tf_runner(targets=init_targets): LOGGER_CLI.error('An error occured while running StreamAlert init') sys.exit(1) # generate the main.tf with remote state enabled LOGGER_CLI.info('Configuring Terraform Remote State') if not terraform_generate(config=config): return if not run_command(['terraform', 'init']): return LOGGER_CLI.info('Deploying Lambda Functions') # deploy both lambda functions deploy(deploy_opts(['rule', 'alert'], []), config) # create all remainder infrastructure LOGGER_CLI.info('Building Remainder Infrastructure') tf_runner() elif options.subcommand == 'clean': if not continue_prompt( message='Are you sure you want to clean all Terraform files?'): sys.exit(1) terraform_clean(config) elif options.subcommand == 'destroy': if not continue_prompt(message='Are you sure you want to destroy?'): sys.exit(1) if options.target: targets = [] # Iterate over any targets to destroy. Global modules, like athena # are prefixed with `stream_alert_` while cluster based modules # are a combination of the target and cluster name for target in options.target: if target == 'athena': targets.append('module.stream_alert_{}'.format(target)) elif target == 'threat_intel_downloader': targets.append('module.threat_intel_downloader') else: targets.extend([ 'module.{}_{}'.format(target, cluster) for cluster in config.clusters() ]) tf_runner(targets=targets, action='destroy') return # Migrate back to local state so Terraform can successfully # destroy the S3 bucket used by the backend. if not terraform_generate(config=config, init=True): return if not run_command(['terraform', 'init']): return # Destroy all of the infrastructure if not tf_runner(action='destroy'): return # Remove old Terraform files terraform_clean(config) # get a quick status on our declared infrastructure elif options.subcommand == 'status': terraform_status(config)