def _get_creds_from_s3(self, cred_location, descriptor): """Pull the encrypted credential blob for this service and destination from s3 Args: cred_location (str): The tmp path on disk to to store the encrypted blob descriptor (str): Service destination (ie: slack channel, pd integration) Returns: bool: True if download of creds from s3 was a success """ try: if not os.path.exists(os.path.dirname(cred_location)): os.makedirs(os.path.dirname(cred_location)) client = boto3.client('s3', region_name=self.region) with open(cred_location, 'wb') as cred_output: client.download_fileobj(self.secrets_bucket, self.output_cred_name(descriptor), cred_output) return True except ClientError as err: LOGGER.exception( 'credentials for \'%s\' could not be downloaded ' 'from S3: %s', self.output_cred_name(descriptor), err.response)
def run(self, event): """Run the alert processor! Args: event (dict): Lambda invocation event containing at least the rule name and alert ID. Returns: dict: Maps output (str) to whether it sent successfully (bool). An empty dict is returned if the Alert was improperly formatted. """ # Grab the alert record from Dynamo (if needed). if set(event) == {'AlertID', 'RuleName'}: LOGGER.info('Retrieving %s from alerts table', event) alert_record = self.alerts_table.get_alert_record(event['RuleName'], event['AlertID']) if not alert_record: LOGGER.error('%s does not exist in the alerts table', event) return {} else: alert_record = event # Convert record to an Alert instance. try: alert = Alert.create_from_dynamo_record(alert_record) except AlertCreationError: LOGGER.exception('Invalid alert %s', event) return {} # Remove normalization key from the record. # TODO: Consider including this in at least some outputs, e.g. default Athena firehose if NORMALIZATION_KEY in alert.record: del alert.record[NORMALIZATION_KEY] result = self._send_to_outputs(alert) self._update_table(alert, result) return result
def _send_alert(alert_payload, output, dispatcher): """Send a single alert to the given output. Returns: bool: True if the alert was sent successfully. """ LOGGER.info('Sending alert %s to %s', alert_payload['id'], output) try: return dispatcher.dispatch(descriptor=output.split(':')[1], rule_name=alert_payload['rule_name'], alert=alert_payload) except Exception: # pylint: disable=broad-except LOGGER.exception( 'Exception when sending alert %s to %s. Alert:\n%s', alert_payload['id'], output, json.dumps(alert_payload, indent=2)) return False
def _send_alert(alert, output, dispatcher): """Send a single alert to the given output. Args: alert (Alert): Alert to be sent output (str): Alert output, e.g. "aws-sns:topic-name" dispatcher (OutputDispatcher): Dispatcher to receive the alert Returns: bool: True if the alert was sent successfully. """ LOGGER.info('Sending %s to %s', alert, output) try: return dispatcher.dispatch(alert, output.split(':')[1]) except Exception: # pylint: disable=broad-except LOGGER.exception('Exception when sending %s to %s. Alert:\n%s', alert, output, repr(alert)) return False
def dispatch(self, alert, output): """Send alerts to the given service. This wraps the protected subclass method of _dispatch to aid in usability Args: alert (Alert): Alert instance which triggered a rule descriptor (str): Output descriptor (e.g. slack channel, pd integration) Returns: bool: True if alert was sent successfully, False otherwise """ LOGGER.info('Sending %s to %s', alert, output) descriptor = output.split(':')[1] try: sent = bool(self._dispatch(alert, descriptor)) except Exception: # pylint: disable=broad-except LOGGER.exception('Exception when sending %s to %s. Alert:\n%s', alert, output, repr(alert)) sent = False self._log_status(sent, descriptor) return sent
def run(alert, region, function_name, config): """Send an Alert to its described outputs. Args: alert (dict): dictionary representating an alert with the following structure: { 'record': record, 'rule_name': rule.rule_name, 'rule_description': rule.rule_function.__doc__, 'log_source': str(payload.log_source), 'log_type': payload.type, 'outputs': rule.outputs, 'source_service': payload.service, 'source_entity': payload.entity } region (str): The AWS region of the currently executing Lambda function function_name (str): The name of the lambda function config (dict): The loaded configuration for outputs from conf/outputs.json Yields: (bool, str): Dispatch status and name of the output to the handler """ if not validate_alert(alert): LOGGER.error('Invalid alert format:\n%s', json.dumps(alert, indent=2)) return LOGGER.debug('Sending alert to outputs:\n%s', json.dumps(alert, indent=2)) # strip out unnecessary keys and sort alert = _sort_dict(alert) outputs = alert['outputs'] # Get the output configuration for this rule and send the alert to each for output in set(outputs): try: service, descriptor = output.split(':') except ValueError: LOGGER.error( 'Improperly formatted output [%s]. Outputs for rules must ' 'be declared with both a service and a descriptor for the ' 'integration (ie: \'slack:my_channel\')', output) continue if service not in config or descriptor not in config[service]: LOGGER.error('The output \'%s\' does not exist!', output) continue # Retrieve the proper class to handle dispatching the alerts of this services output_dispatcher = get_output_dispatcher(service, region, function_name, config) if not output_dispatcher: continue LOGGER.debug('Sending alert to %s:%s', service, descriptor) sent = False try: sent = output_dispatcher.dispatch(descriptor=descriptor, rule_name=alert['rule_name'], alert=alert) except Exception as err: # pylint: disable=broad-except LOGGER.exception( 'An error occurred while sending alert ' 'to %s:%s: %s. alert:\n%s', service, descriptor, err, json.dumps(alert, indent=2)) # Yield back the result to the handler yield sent, output