def test_get_output_dispatcher_logging(log_mock): """Get output dispatcher - log error""" bad_service = 'bad-output' outputs.get_output_dispatcher(bad_service, REGION, FUNCTION_NAME, CONFIG) log_mock.assert_called_with( 'designated output service [%s] does not exist', bad_service)
def setup_class(cls): """Setup the class before any methods""" cls.__service = 'pagerduty' cls.__descriptor = 'unit_test_pagerduty' cls.__backup_method = None cls.__dispatcher = outputs.get_output_dispatcher( cls.__service, REGION, FUNCTION_NAME, CONFIG)
def setup_class(cls): """Setup the class before any methods""" cls.__service = 'aws-lambda' cls.__descriptor = 'unit_test_lambda' cls.__dispatcher = outputs.get_output_dispatcher(cls.__service, REGION, FUNCTION_NAME, CONFIG)
def test_nonexistent_get_output_dispatcher(): """Get output dispatcher - nonexistent""" nonexistent_service = 'aws-s4' dispatcher = outputs.get_output_dispatcher(nonexistent_service, REGION, FUNCTION_NAME, CONFIG) assert_is_none(dispatcher)
def configure_output(options): """Configure a new output for this service Args: options (argparser): Basically a namedtuple with the service setting """ account_config = CONFIG['global']['account'] region = account_config['region'] prefix = account_config['prefix'] kms_key_alias = account_config['kms_key_alias'] # Verify that the word alias is not in the config. # It is interpolated when the API call is made. if 'alias/' in kms_key_alias: kms_key_alias = kms_key_alias.split('/')[1] # Retrieve the proper service class to handle dispatching the alerts of this services output = get_output_dispatcher(options.service, region, prefix, config_outputs.load_outputs_config()) # If an output for this service has not been defined, the error is logged # prior to this if not output: return # get dictionary of OutputProperty items to be used for user prompting props = output.get_user_defined_properties() for name, prop in props.iteritems(): # pylint: disable=protected-access props[name] = prop._replace(value=user_input( prop.description, prop.mask_input, prop.input_restrictions)) service = output.__service__ config = config_outputs.load_config(props, service) # An empty config here means this configuration already exists, # so we can ask for user input again for a unique configuration if config is False: return configure_output(options) secrets_bucket = '{}.streamalert.secrets'.format(prefix) secrets_key = output.output_cred_name(props['descriptor'].value) # Encrypt the creds and push them to S3 # then update the local output configuration with properties if config_outputs.encrypt_and_push_creds_to_s3(region, secrets_bucket, secrets_key, props, kms_key_alias): updated_config = output.format_output_config(config, props) config_outputs.update_outputs_config(config, updated_config, service) LOGGER_CLI.info( 'Successfully saved \'%s\' output configuration for service \'%s\'', props['descriptor'].value, options.service) else: LOGGER_CLI.error( 'An error occurred while saving \'%s\' ' 'output configuration for service \'%s\'', props['descriptor'].value, options.service)
def run(alert, region, function_name, config): """Send an Alert to its described outputs. Args: alert (dict): dictionary representating an alert with the following structure: { 'record': record, 'rule_name': rule.rule_name, 'rule_description': rule.rule_function.__doc__, 'log_source': str(payload.log_source), 'log_type': payload.type, 'outputs': rule.outputs, 'source_service': payload.service, 'source_entity': payload.entity } region (str): The AWS region of the currently executing Lambda function function_name (str): The name of the lambda function config (dict): The loaded configuration for outputs from conf/outputs.json Yields: (bool, str): Dispatch status and name of the output to the handler """ if not validate_alert(alert): LOGGER.error('Invalid alert format:\n%s', json.dumps(alert, indent=2)) return LOGGER.debug('Sending alert to outputs:\n%s', json.dumps(alert, indent=2)) # strip out unnecessary keys and sort alert = _sort_dict(alert) outputs = alert['outputs'] # Get the output configuration for this rule and send the alert to each for output in set(outputs): try: service, descriptor = output.split(':') except ValueError: LOGGER.error( 'Improperly formatted output [%s]. Outputs for rules must ' 'be declared with both a service and a descriptor for the ' 'integration (ie: \'slack:my_channel\')', output) continue if service not in config or descriptor not in config[service]: LOGGER.error('The output \'%s\' does not exist!', output) continue # Retrieve the proper class to handle dispatching the alerts of this services output_dispatcher = get_output_dispatcher(service, region, function_name, config) if not output_dispatcher: continue LOGGER.debug('Sending alert to %s:%s', service, descriptor) sent = False try: sent = output_dispatcher.dispatch(descriptor=descriptor, rule_name=alert['rule_name'], alert=alert) except Exception as err: # pylint: disable=broad-except LOGGER.exception( 'An error occurred while sending alert ' 'to %s:%s: %s. alert:\n%s', service, descriptor, err, json.dumps(alert, indent=2)) # Yield back the result to the handler yield sent, output
def run(loaded_sns_message, region, function_name, config): """Send an Alert to its described outputs. Args: loaded_sns_message [dict]: SNS message dictionary with the following structure: { 'default': alert } The alert is another dict with the following structure: { 'record': record, 'metadata': { 'rule_name': rule.rule_name, 'rule_description': rule.rule_function.__doc__, 'log': str(payload.log_source), 'outputs': rule.outputs, 'type': payload.type, 'source': { 'service': payload.service, 'entity': payload.entity } } } region [string]: the AWS region being used function_name [string]: the name of the lambda function config [dict]: the loaded configuration for outputs from conf/outputs.json Returns: [generator] yields back dispatch status and name of the output to the handler """ LOGGER.debug(loaded_sns_message) alert = loaded_sns_message['default'] rule_name = alert['metadata']['rule_name'] # strip out unnecessary keys and sort alert = _sort_dict(alert) outputs = alert['metadata']['outputs'] # Get the output configuration for this rule and send the alert to each for output in set(outputs): try: service, descriptor = output.split(':') except ValueError: LOGGER.error( 'Improperly formatted output [%s]. Outputs for rules must ' 'be declared with both a service and a descriptor for the ' 'integration (ie: \'slack:my_channel\')', output) continue if not service in config or not descriptor in config[service]: LOGGER.error('The output \'%s\' does not exist!', output) continue # Retrieve the proper class to handle dispatching the alerts of this services output_dispatcher = get_output_dispatcher(service, region, function_name, config) if not output_dispatcher: continue LOGGER.debug('Sending alert to %s:%s', service, descriptor) sent = False try: sent = output_dispatcher.dispatch(descriptor=descriptor, rule_name=rule_name, alert=alert) except Exception as err: LOGGER.exception( 'An error occurred while sending alert ' 'to %s:%s: %s. alert:\n%s', service, descriptor, err, json.dumps(alert, indent=2)) # Yield back the result to the handler yield sent, output
def test_existing_get_output_dispatcher(): """Get output dispatcher - existing""" service = 'aws-s3' dispatcher = outputs.get_output_dispatcher( service, REGION, FUNCTION_NAME, CONFIG) assert_is_not_none(dispatcher)