def handler(cls, options, config): """Initialize infrastructure using Terraform Args: config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Stop here if only initializing the backend if options.backend: return cls._terraform_init_backend(config) LOGGER.info('Initializing StreamAlert') # generate init Terraform files if not terraform_generate_handler(config=config, init=True): return False LOGGER.info('Initializing Terraform') if not run_command(['terraform', 'init']): return False # build init infrastructure LOGGER.info('Building initial infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket', 'aws_s3_bucket.streamalert_secrets', 'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts', 'aws_kms_key.server_side_encryption', 'aws_kms_alias.server_side_encryption', 'aws_kms_key.streamalert_secrets', 'aws_kms_alias.streamalert_secrets', 'aws_dynamodb_table.terraform_remote_state_lock' ] if not tf_runner(targets=init_targets): LOGGER.error('An error occurred while running StreamAlert init') return False # generate the main.tf with remote state enabled LOGGER.info('Configuring Terraform Remote State') if not terraform_generate_handler(config=config, check_tf=False, check_creds=False): return False if not run_command(['terraform', 'init']): return False LOGGER.info('Deploying Lambda Functions') functions = ['rule', 'alert', 'alert_merger', 'athena', 'classifier'] deploy(functions, config) # we need to manually create the streamalerts table since terraform does not support this # See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486 alerts_bucket = firehose_alerts_bucket(config) create_table('alerts', alerts_bucket, config) LOGGER.info('Building remainding infrastructure') return tf_runner(refresh=False)
def handler(cls, options, config): """Main handler for the Kinesis parser Args: options (argparse.Namespace): Parsed arguments config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ enable = options.action == 'enable-events' LOGGER.info('%s Kinesis Events', 'Enabling' if enable else 'Disabling') for cluster in options.clusters or config.clusters(): if 'kinesis_events' in config['clusters'][cluster]['modules']: config['clusters'][cluster]['modules']['kinesis_events'][ 'enabled'] = enable config.write() if options.skip_terraform: return True # not an error if not terraform_generate_handler(config): return False return tf_runner(action='apply', targets=[ 'module.{}_{}'.format('kinesis_events', cluster) for cluster in config.clusters() ])
def handler(cls, options, config): """Rollback the current production Lambda version(s) by 1. Args: options: Argparse parsed options config (dict): Parsed configuration from conf/ Returns: bool: False if errors occurred, True otherwise """ # Make sure the Terraform code is up to date if not terraform_generate_handler(config=config): return False LOGGER.info('Rolling back: %s', ' '.join(options.function)) rollback_all = 'all' in options.function prefix = config['global']['account']['prefix'] clusters = sorted(options.clusters or config.clusters()) client = boto3.client('lambda') # Track the success of rolling back the functions success = True if rollback_all or 'alert' in options.function: success = success and _rollback_production( client, '{}_streamalert_alert_processor'.format(prefix)) if rollback_all or 'alert_merger' in options.function: success = success and _rollback_production( client, '{}_streamalert_alert_merger'.format(prefix)) if rollback_all or 'apps' in options.function: for cluster in clusters: apps_config = config['clusters'][cluster]['modules'].get( 'streamalert_apps', {}) for lambda_name in sorted(apps_config): success = success and _rollback_production( client, lambda_name) if rollback_all or 'athena' in options.function: success = success and _rollback_production( client, '{}_streamalert_athena_partition_refresh'.format(prefix)) if rollback_all or 'classifier' in options.function: for cluster in clusters: success = success and _rollback_production( client, '{}_{}_streamalert_classifier'.format( prefix, cluster)) if rollback_all or 'rule' in options.function: success = success and _rollback_production( client, '{}_streamalert_rules_engine'.format(prefix)) if rollback_all or 'threat_intel_downloader' in options.function: success = success and _rollback_production( client, '{}_streamalert_threat_intel_downloader'.format(prefix)) return success
def handler(cls, options, config): """Run Terraform with an optional set of targets and clusters Args: options (argparse.Namespace): Parsed arguments from manage.py config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ if not terraform_generate_handler(config=config): return False # Will create log tables only when file_format set to "json" and return erlier if # log tables creation failed. # This capabity will be faded out in the future release. if get_data_file_format(config) == 'json' and not create_log_tables( config=config): return target_modules, valid = _get_valid_tf_targets(config, options.target) if not valid: return False return terraform_runner( config, targets=target_modules if target_modules else None)
def handler(cls, options, config): """CLI handler for deploying new versions of Lambda functions Args: options (argparse.Namespace): Parsed argparse namespace from the CLI config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Make sure the Terraform code is up to date if not terraform_generate_handler(config=config): return False functions = options.function if 'all' in options.function: functions = { 'alert', 'alert_merger', 'apps', 'athena', 'classifier', 'rule', 'rule_promo', 'scheduled_queries', 'threat_intel_downloader' } if not deploy(functions, config, options.clusters): return False # Update the rule table now if the rules engine is being deployed if 'rule' in functions: _update_rule_table(options, config) return True
def handler(cls, options, config): """Use Terraform to destroy any existing infrastructure Args: options (argparse.Namespace): Parsed arguments from manage.py config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Check for valid credentials if not check_credentials(): return False # Verify terraform is installed if not terraform_check(): return False # Ask for approval here since multiple Terraform commands may be necessary if not continue_prompt(message='Are you sure you want to destroy?'): return False if options.target: target_modules, valid = _get_valid_tf_targets(config, options.target) if not valid: return False return tf_runner( action='destroy', auto_approve=True, targets=target_modules if target_modules else None ) # Migrate back to local state so Terraform can successfully # destroy the S3 bucket used by the backend. # Do not check for terraform or aws creds again since these were checked above if not terraform_generate_handler(config=config, init=True, check_tf=False, check_creds=False): return False if not run_command(['terraform', 'init']): return False # Destroy all of the infrastructure if not tf_runner(action='destroy', auto_approve=True): return False # Remove old Terraform files return TerraformCleanCommand.handler(options, config)
def handler(cls, options, config): """Rollback the current production Lambda version(s) by 1. Args: options: Argparse parsed options config (dict): Parsed configuration from conf/ Returns: bool: False if errors occurred, True otherwise """ # Make sure the Terraform code is up to date if not terraform_generate_handler(config=config): return False functions = function_map() targeted_funcs = set(options.functions) functions = { key: value for key, value in functions.items() if key in targeted_funcs } LOGGER.info('Rolling back: %s', ', '.join(sorted(functions))) prefix = config['global']['account']['prefix'] clusters = sorted(options.clusters or config.clusters()) client = boto3.client('lambda') # Track the success of rolling back the functions success = True for func, suffix in functions.items(): if suffix: # A suffix implies this is a standard function naming convention success = success and _rollback_production( client, '{}_streamalert_{}'.format(prefix, suffix)) elif func == 'apps': # Apps need special handling due to unique naming for cluster in clusters: cluster_modules = config['clusters'][cluster]['modules'] apps_config = cluster_modules.get('streamalert_apps', {}) for lambda_name in sorted(apps_config): success = success and _rollback_production( client, lambda_name) elif func == 'classifier': # Classifers need special handling due to clustering for cluster in clusters: success = success and _rollback_production( client, '{}_{}_streamalert_{}'.format( prefix, cluster, func)) return success
def handler(cls, options, config): """Run Terraform with an optional set of targets and clusters Args: options (argparse.Namespace): Parsed arguments from manage.py config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ if not terraform_generate_handler(config=config): return False target_modules, valid = _get_valid_tf_targets(config, options.target) if not valid: return False return tf_runner(targets=target_modules if target_modules else None)
def _terraform_init_backend(config): """Initialize the infrastructure backend (S3) using Terraform Returns: bool: False if errors occurred, True otherwise """ # Check for valid credentials if not check_credentials(): return False # Verify terraform is installed if not terraform_check(): return False # See generate_main() for how it uses the `init` kwarg for the local/remote backend if not terraform_generate_handler(config=config, init=False): return False LOGGER.info('Initializing StreamAlert backend') return run_command(['terraform', 'init'])
def get_tf_modules(config, generate=False): if generate: if not terraform_generate_handler( config=config, check_tf=False, check_creds=False): return False modules = set() resources = set() for root, _, files in os.walk(config.build_directory): for file_name in files: path = os.path.join(root, file_name) if path.endswith('.tf.json'): with open(path, 'r') as tf_file: tf_data = json.load(tf_file) modules.update(set((tf_data['module']))) resources.update('{}.{}'.format(resource, value) for resource, values in tf_data.get( 'resource', {}).items() for value in values) return {'module': modules, 'resource': resources}
def handler(cls, options, config): """CLI handler for deploying new versions of Lambda functions Args: options (argparse.Namespace): Parsed argparse namespace from the CLI config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Make sure the Terraform code is up to date if not terraform_generate_handler(config=config): return False if not deploy(config, options.functions, options.clusters): return False # Update the rule table now if the rules engine is being deployed if 'rule' in set(options.functions): _update_rule_table(options, config) return True
def handler(cls, options, config): """Initialize infrastructure using Terraform Args: config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ LOGGER.info('Initializing StreamAlert') # generate init Terraform files if not terraform_generate_handler(config=config, init=True): return False LOGGER.info('Initializing Terraform') if not run_command(['terraform', 'init'], cwd=config.build_directory): return False # build init infrastructure LOGGER.info('Building initial infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket', 'aws_s3_bucket.streamalert_secrets', 'aws_s3_bucket.terraform_remote_state', 'aws_s3_bucket.streamalerts', 'aws_kms_key.server_side_encryption', 'aws_kms_alias.server_side_encryption', 'aws_kms_key.streamalert_secrets', 'aws_kms_alias.streamalert_secrets', 'module.streamalert_athena', #required for the alerts table 'aws_dynamodb_table.terraform_remote_state_lock' ] # this bucket must exist before the log tables can be created, but # shouldn't be created unless the firehose is enabled if config['global']['infrastructure'].get('firehose', {}).get('enabled'): init_targets.append('aws_s3_bucket.streamalert_data') if not terraform_runner(config, targets=init_targets): LOGGER.error('An error occurred while running StreamAlert init') return False # generate the main.tf with remote state enabled LOGGER.info('Configuring Terraform Remote State') if not terraform_generate_handler( config=config, check_tf=False, check_creds=False): return False if not run_command(['terraform', 'init'], cwd=config.build_directory): return False LOGGER.info('Deploying Lambda Functions') functions = ['rule', 'alert', 'alert_merger', 'athena', 'classifier'] deploy(config, functions) # we need to manually create the streamalerts table since terraform does not support this # See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486 if get_data_file_format(config) == 'json': # Terraform v0.12 now supports creating Athena tables. We will support # to use terraform aws_glue_catalog_table resource to create table only # when data file_format is set to "parquet" in "athena_partitioner_config" # # For "json" file_format, we will continue using Athena DDL query to # create tables. However, this capabity will be faded out in the future # release because we want users to take advantage of parquet performance. alerts_bucket = firehose_alerts_bucket(config) create_table('alerts', alerts_bucket, config) # Create the glue catalog tables for the enabled logs if not create_log_tables(config=config): return LOGGER.info('Building remaining infrastructure') return terraform_runner(config, refresh=False)