def lambda_handler(options, config): """Handle all Lambda CLI operations""" if options.subcommand == 'deploy': # Check for valid credentials if not check_credentials(): return # Make sure the Terraform code is up to date if not terraform_generate(config=config): return LOGGER_CLI.info('Deploying: %s', ' '.join(options.processor)) deploy(options, config) elif options.subcommand == 'rollback': # Check for valid credentials if not check_credentials(): return # Make sure the Terraform code is up to date if not terraform_generate(config=config): return LOGGER_CLI.info('Rolling back: %s', ' '.join(options.processor)) rollback(options, config) elif options.subcommand == 'test': LOGGER_CLI.info('Testing: %s', ' '.join(options.processor)) stream_alert_test(options, config)
def _terraform_init(config): """Initialize infrastructure using Terraform Args: config (CLIConfig): Loaded StreamAlert CLI """ LOGGER_CLI.info('Initializing StreamAlert') # generate init Terraform files if not terraform_generate(config=config, init=True): return LOGGER_CLI.info('Initializing Terraform') if not run_command(['terraform', 'init']): sys.exit(1) # build init infrastructure LOGGER_CLI.info('Building Initial Infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket', 'aws_s3_bucket.stream_alert_secrets', 'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts', 'aws_kms_key.stream_alert_secrets', 'aws_kms_alias.stream_alert_secrets' ] if not tf_runner(targets=init_targets): LOGGER_CLI.error('An error occurred while running StreamAlert init') sys.exit(1) # generate the main.tf with remote state enabled LOGGER_CLI.info('Configuring Terraform Remote State') if not terraform_generate(config=config): return if not run_command(['terraform', 'init']): return # Use a named tuple to match the 'processor' attribute in the argparse options deploy_opts = namedtuple('DeployOptions', ['processor', 'clusters']) LOGGER_CLI.info('Deploying Lambda Functions') deploy(deploy_opts(['rule', 'alert', 'alert_merger', 'athena'], []), config) # we need to manually create the streamalerts table since terraform does not support this # See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486 alerts_bucket = '{}.streamalerts'.format( config['global']['account']['prefix']) create_table('alerts', alerts_bucket, config) LOGGER_CLI.info('Building Remainder Infrastructure') tf_runner(refresh=False)
def lambda_handler(options, config): """Handle all Lambda CLI operations""" if options.subcommand == 'deploy': # Make sure the Terraform code is up to date if not terraform_generate(config=config): return deploy(options, config) elif options.subcommand == 'rollback': # Make sure the Terraform code is up to date if not terraform_generate(config=config): return rollback(options, config) elif options.subcommand == 'test': stream_alert_test(options, config)
def terraform_handler(options, config): """Handle all Terraform CLI operations Args: options (namedtuple): Parsed arguments from manage.py """ # Check for valid credentials if not check_credentials(): return # Verify terraform is installed if not terraform_check(): return # Use a named tuple to match the 'processor' attribute in the argparse options deploy_opts = namedtuple('DeployOptions', ['processor', 'clusters']) # Plan and Apply our streamalert infrastructure if options.subcommand == 'build': terraform_build(options, config) # generate terraform files elif options.subcommand == 'generate': if not terraform_generate(config=config): return elif options.subcommand == 'init-backend': run_command(['terraform', 'init']) # initialize streamalert infrastructure from a blank state elif options.subcommand == 'init': LOGGER_CLI.info('Initializing StreamAlert') # generate init Terraform files if not terraform_generate(config=config, init=True): return LOGGER_CLI.info('Initializing Terraform') if not run_command(['terraform', 'init']): sys.exit(1) # build init infrastructure LOGGER_CLI.info('Building Initial Infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket', 'aws_s3_bucket.stream_alert_secrets', 'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts', 'aws_kms_key.stream_alert_secrets', 'aws_kms_alias.stream_alert_secrets' ] if not tf_runner(targets=init_targets): LOGGER_CLI.error('An error occured while running StreamAlert init') sys.exit(1) # generate the main.tf with remote state enabled LOGGER_CLI.info('Configuring Terraform Remote State') if not terraform_generate(config=config): return if not run_command(['terraform', 'init']): return LOGGER_CLI.info('Deploying Lambda Functions') # deploy both lambda functions deploy(deploy_opts(['rule', 'alert'], []), config) # create all remainder infrastructure LOGGER_CLI.info('Building Remainder Infrastructure') tf_runner() elif options.subcommand == 'clean': if not continue_prompt( message='Are you sure you want to clean all Terraform files?'): sys.exit(1) terraform_clean(config) elif options.subcommand == 'destroy': if not continue_prompt(message='Are you sure you want to destroy?'): sys.exit(1) if options.target: targets = [] # Iterate over any targets to destroy. Global modules, like athena # are prefixed with `stream_alert_` while cluster based modules # are a combination of the target and cluster name for target in options.target: if target == 'athena': targets.append('module.stream_alert_{}'.format(target)) elif target == 'threat_intel_downloader': targets.append('module.threat_intel_downloader') else: targets.extend([ 'module.{}_{}'.format(target, cluster) for cluster in config.clusters() ]) tf_runner(targets=targets, action='destroy') return # Migrate back to local state so Terraform can successfully # destroy the S3 bucket used by the backend. if not terraform_generate(config=config, init=True): return if not run_command(['terraform', 'init']): return # Destroy all of the infrastructure if not tf_runner(action='destroy'): return # Remove old Terraform files terraform_clean(config) # get a quick status on our declared infrastructure elif options.subcommand == 'status': terraform_status(config)
def terraform_init(options, config): """Initialize infrastructure using Terraform Args: config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Stop here if only initializing the backend if options.backend: return _terraform_init_backend() LOGGER.info('Initializing StreamAlert') # generate init Terraform files if not terraform_generate_handler(config=config, init=True): return False LOGGER.info('Initializing Terraform') if not run_command(['terraform', 'init']): return False # build init infrastructure LOGGER.info('Building initial infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket', 'aws_s3_bucket.stream_alert_secrets', 'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts', 'aws_kms_key.server_side_encryption', 'aws_kms_alias.server_side_encryption', 'aws_kms_key.stream_alert_secrets', 'aws_kms_alias.stream_alert_secrets' ] if not tf_runner(targets=init_targets): LOGGER.error('An error occurred while running StreamAlert init') return False # generate the main.tf with remote state enabled LOGGER.info('Configuring Terraform Remote State') if not terraform_generate_handler( config=config, check_tf=False, check_creds=False): return False if not run_command(['terraform', 'init']): return False LOGGER.info('Deploying Lambda Functions') functions = ['rule', 'alert', 'alert_merger', 'athena', 'classifier'] deploy(functions, config) # we need to manually create the streamalerts table since terraform does not support this # See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486 alerts_bucket = '{}.streamalerts'.format( config['global']['account']['prefix']) create_table('alerts', alerts_bucket, config) LOGGER.info('Building remainding infrastructure') return tf_runner(refresh=False)