def _add_global_metric_alarm(self, alarm_info): """Add a metric alarm that corresponds to a predefined metrics globally Args: alarm_info (dict): All the necessary values needed to add a CloudWatch metric alarm """ function_name = alarm_info['function'] func_config_name = '{}_config'.format(function_name) # Check if metrics are not enabled, and ask the user if they would like to enable them if func_config_name not in self.config['lambda']: self.config['lambda'][func_config_name] = {} function_config = self.config['lambda'][func_config_name] if function_name in CLUSTERED_FUNCTIONS: if not self._clusters_with_metrics_enabled(function_name): prompt = ( 'Metrics are not currently enabled for the \'{}\' function ' 'within any cluster. Creating an alarm will have no effect ' 'until metrics are enabled for this function in at least one ' 'cluster. Would you still like to continue?'.format( function_name)) if not continue_prompt(message=prompt): return False else: if not function_config.get('enable_custom_metrics'): prompt = ( 'Metrics are not currently enabled for the \'{}\' function. ' 'Would you like to enable metrics for this function?' ).format(function_name) if continue_prompt(message=prompt): self.toggle_metrics(function_name, enabled=True) elif not continue_prompt( message='Would you still like to add this alarm ' 'even though metrics are disabled?'): return False metric_alarms = function_config.get('custom_metric_alarms', {}) # Format the metric name for the aggregate metric alarm_settings = alarm_info.copy() alarm_settings['metric_name'] = '{}-{}'.format( metrics.FUNC_PREFIXES[function_name], alarm_settings['metric_name']) function_config[ 'custom_metric_alarms'] = self._add_metric_alarm_config( alarm_settings, metric_alarms) LOGGER.info( 'Successfully added \'%s\' metric alarm to ' '\'conf/lambda.json\'.', alarm_settings['alarm_name']) return True
def _add_cluster_metric_alarm(self, alarm_info): """Add a metric alarm that corresponds to a predefined metrics for clusters Args: alarm_info (dict): All the necessary values needed to add a CloudWatch metric alarm. """ function_name = alarm_info['function'] # Go over each of the clusters and see if enable_metrics == True and prompt # the user to toggle metrics on if this is False config_name = '{}_config'.format(function_name) for cluster in alarm_info['clusters']: function_config = (self.config['clusters'][cluster][config_name]) if not function_config.get('enable_custom_metrics'): prompt = ( 'Metrics are not currently enabled for the \'{}\' function ' 'within the \'{}\' cluster. Would you like to enable metrics ' 'for this cluster?'.format(function_name, cluster)) if continue_prompt(message=prompt): self.toggle_metrics(function_name, enabled=True, clusters=[cluster]) elif not continue_prompt( message='Would you still like to add this alarm ' 'even though metrics are disabled?'): continue metric_alarms = function_config.get('custom_metric_alarms', {}) # Format the metric name for the cluster based metric # Prepend a prefix for this function and append the cluster name alarm_settings = alarm_info.copy() alarm_settings['metric_name'] = '{}-{}-{}'.format( metrics.FUNC_PREFIXES[function_name], alarm_settings['metric_name'], cluster.upper()) function_config[ 'custom_metric_alarms'] = self._add_metric_alarm_config( alarm_settings, metric_alarms) LOGGER.info( 'Successfully added \'%s\' metric alarm for the \'%s\' ' 'function to \'conf/clusters/%s.json\'.', alarm_settings['alarm_name'], function_name, cluster) return True
def drop_all_tables(config): """Drop all 'streamalert' Athena tables Used when cleaning up an existing deployment Args: config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ if not continue_prompt( message='Are you sure you want to drop all Athena tables?'): return False athena_client = get_athena_client(config) if not athena_client.drop_all_tables(): LOGGER.error('Failed to drop one or more tables from database: %s', athena_client.database) return False LOGGER.info('Successfully dropped all tables from database: %s', athena_client.database) return True
def handler(cls, options, config): """Use Terraform to destroy any existing infrastructure Args: options (argparse.Namespace): Parsed arguments from manage.py config (CLIConfig): Loaded StreamAlert config Returns: bool: False if errors occurred, True otherwise """ # Check for valid credentials if not check_credentials(): return False # Verify terraform is installed if not terraform_check(): return False # Ask for approval here since multiple Terraform commands may be necessary if not continue_prompt(message='Are you sure you want to destroy?'): return False if options.target: target_modules, valid = _get_valid_tf_targets(config, options.target) if not valid: return False return tf_runner( action='destroy', auto_approve=True, targets=target_modules if target_modules else None ) # Migrate back to local state so Terraform can successfully # destroy the S3 bucket used by the backend. # Do not check for terraform or aws creds again since these were checked above if not terraform_generate_handler(config=config, init=True, check_tf=False, check_creds=False): return False if not run_command(['terraform', 'init']): return False # Destroy all of the infrastructure if not tf_runner(action='destroy', auto_approve=True): return False # Remove old Terraform files return TerraformCleanCommand.handler(options, config)
def add_app(self, func_name, app_info): """Add a configuration for a new streamalert app integration function Args: app_info (dict): The necessary values needed to begin configuring a new app integration Returns: bool: False if errors occurred, True otherwise """ exists, prompt_for_auth, overwrite = False, True, False app = StreamAlertApp.get_app(app_info['type']) cluster_name = app_info['cluster'] app_name = app_info['app_name'] # Check to see if there is an existing configuration for this app integration cluster_config = self.config['clusters'][cluster_name] if func_name in cluster_config['modules'].get('streamalert_apps', {}): prompt = ( 'An app with the name \'{}\' is already configured for cluster ' '\'{}\'. Would you like to update the existing app\'s configuration' '?'.format(app_name, cluster_name)) exists = True # Return if the user is not deliberately updating an existing config if not continue_prompt(message=prompt): return prompt = ( 'Would you also like to update the authentication information for ' 'app integration with name \'{}\'?'.format(app_name)) # If this is true, we shouldn't prompt again to warn about overwriting prompt_for_auth = overwrite = continue_prompt(message=prompt) if prompt_for_auth and not save_app_auth_info(app, app_info, func_name, overwrite): return False apps_config = cluster_config['modules'].get('streamalert_apps', {}) if not exists: # Save a default app settings to the config for new apps new_app_config = { 'app_name': app_info['app_name'], 'concurrency_limit': 2, 'log_level': 'info', 'log_retention_days': 14, 'memory': app_info['memory'], 'metric_alarms': { 'errors': { 'enabled': True, 'evaluation_periods': 1, 'period_secs': 120 } }, 'schedule_expression': app_info['schedule_expression'], 'timeout': app_info['timeout'], 'type': app_info['type'] } apps_config[func_name] = new_app_config else: # Allow for updating certain attributes for the app without overwriting # current parts of the configuration updated_app_config = { 'memory': app_info['memory'], 'schedule_expression': app_info['schedule_expression'], 'timeout': app_info['timeout'] } apps_config[func_name].update(updated_app_config) cluster_config['modules']['streamalert_apps'] = apps_config # Add this service to the sources for this app integration # The `streamalert_app` is purposely singular here app_sources = self.config['clusters'][cluster_name][ 'data_sources'].get('streamalert_app', {}) app_sources[func_name] = [app.service()] self.config['clusters'][cluster_name]['data_sources'][ 'streamalert_app'] = app_sources LOGGER.info( 'Successfully added \'%s\' app integration to \'conf/clusters/%s.json\' ' 'for service \'%s\'.', app_info['app_name'], app_info['cluster'], app_info['type']) self.write() return True