def run(self, event): """Run the alert processor! Args: event (dict): Lambda invocation event containing at least the rule name and alert ID. Returns: dict: Maps output (str) to whether it sent successfully (bool). An empty dict is returned if the Alert was improperly formatted. """ # Grab the alert record from Dynamo (if needed). if set(event) == {'AlertID', 'RuleName'}: LOGGER.info('Retrieving %s from alerts table', event) alert_record = self.alerts_table.get_alert_record(event['RuleName'], event['AlertID']) if not alert_record: LOGGER.error('%s does not exist in the alerts table', event) return {} else: alert_record = event # Convert record to an Alert instance. try: alert = Alert.create_from_dynamo_record(alert_record) except AlertCreationError: LOGGER.exception('Invalid alert %s', event) return {} # Remove normalization key from the record. # TODO: Consider including this in at least some outputs, e.g. default Athena firehose if NORMALIZATION_KEY in alert.record: del alert.record[NORMALIZATION_KEY] result = self._send_to_outputs(alert) self._update_table(alert, result) return result
def _dispatch(self, alert, descriptor): """Send alert to Cloudwatch Logger for Lambda Args: alert (Alert): Alert instance which triggered a rule descriptor (str): Output descriptor """ LOGGER.info('New Alert:\n%s', json.dumps(alert.output_dict(), indent=4)) return True
def dispatch(self, **kwargs): """Send alert to a Kinesis Firehose Delivery Stream Keyword Args: descriptor (str): Service descriptor (ie: slack channel, pd integration) rule_name (str): Name of the triggered rule alert (dict): Alert relevant to the triggered rule Returns: bool: Indicates a successful or failed dispatch of the alert """ @backoff.on_exception(backoff.fibo, ClientError, max_tries=self.MAX_BACKOFF_ATTEMPTS, jitter=backoff.full_jitter, on_backoff=backoff_handler, on_success=success_handler, on_giveup=giveup_handler) def _firehose_request_wrapper(json_alert, delivery_stream): """Make the PutRecord request to Kinesis Firehose with backoff Args: json_alert (str): The JSON dumped alert body delivery_stream (str): The Firehose Delivery Stream to send to Returns: dict: Firehose response in the format below {'RecordId': 'string'} """ return self.__aws_client__.put_record( DeliveryStreamName=delivery_stream, Record={'Data': json_alert}) if self.__aws_client__ is None: self.__aws_client__ = boto3.client('firehose', region_name=self.region) json_alert = json.dumps(kwargs['alert'], separators=(',', ':')) + '\n' if len(json_alert) > self.MAX_RECORD_SIZE: LOGGER.error('Alert too large to send to Firehose: \n%s...', json_alert[0:1000]) return False delivery_stream = self.config[self.__service__][kwargs['descriptor']] LOGGER.info('Sending alert [%s] to aws-firehose:%s', kwargs['rule_name'], delivery_stream) resp = _firehose_request_wrapper(json_alert, delivery_stream) if resp.get('RecordId'): LOGGER.info( 'Alert [%s] successfully sent to aws-firehose:%s with RecordId:%s', kwargs['rule_name'], delivery_stream, resp['RecordId']) return self._log_status(resp, kwargs['descriptor'])
def _log_status(cls, success): """Log the status of sending the alerts Args: success (bool or dict): Indicates if the dispatching of alerts was successful """ if success: LOGGER.info('Successfully sent alert to %s', cls.__service__) else: LOGGER.error('Failed to send alert to %s', cls.__service__) return bool(success)
def _log_status(cls, success, descriptor): """Log the status of sending the alerts Args: success (bool or dict): Indicates if the dispatching of alerts was successful descriptor (str): Service descriptor """ if success: LOGGER.info('Successfully sent alert to %s:%s', cls.__service__, descriptor) else: LOGGER.error('Failed to send alert to %s:%s', cls.__service__, descriptor)
def _dispatch(self, alert, descriptor): """Send alert to a Kinesis Firehose Delivery Stream Args: alert (Alert): Alert instance which triggered a rule descriptor (str): Output descriptor Returns: bool: True if alert was sent successfully, False otherwise """ @backoff.on_exception(backoff.fibo, ClientError, max_tries=self.MAX_BACKOFF_ATTEMPTS, jitter=backoff.full_jitter, on_backoff=backoff_handler(), on_success=success_handler(), on_giveup=giveup_handler()) def _firehose_request_wrapper(json_alert, delivery_stream): """Make the PutRecord request to Kinesis Firehose with backoff Args: json_alert (str): The JSON dumped alert body delivery_stream (str): The Firehose Delivery Stream to send to Returns: dict: Firehose response in the format below {'RecordId': 'string'} """ self.__aws_client__.put_record(DeliveryStreamName=delivery_stream, Record={'Data': json_alert}) if self.__aws_client__ is None: self.__aws_client__ = boto3.client('firehose', region_name=self.region) json_alert = json.dumps(alert.output_dict(), separators=(',', ':')) + '\n' if len(json_alert) > self.MAX_RECORD_SIZE: LOGGER.error('Alert too large to send to Firehose: \n%s...', json_alert[0:1000]) return False delivery_stream = self.config[self.__service__][descriptor] LOGGER.info('Sending %s to aws-firehose:%s', alert, delivery_stream) _firehose_request_wrapper(json_alert, delivery_stream) LOGGER.info('%s successfully sent to aws-firehose:%s', alert, delivery_stream) return True
def _send_alert(alert_payload, output, dispatcher): """Send a single alert to the given output. Returns: bool: True if the alert was sent successfully. """ LOGGER.info('Sending alert %s to %s', alert_payload['id'], output) try: return dispatcher.dispatch(descriptor=output.split(':')[1], rule_name=alert_payload['rule_name'], alert=alert_payload) except Exception: # pylint: disable=broad-except LOGGER.exception( 'Exception when sending alert %s to %s. Alert:\n%s', alert_payload['id'], output, json.dumps(alert_payload, indent=2)) return False
def _send_alert(alert, output, dispatcher): """Send a single alert to the given output. Args: alert (Alert): Alert to be sent output (str): Alert output, e.g. "aws-sns:topic-name" dispatcher (OutputDispatcher): Dispatcher to receive the alert Returns: bool: True if the alert was sent successfully. """ LOGGER.info('Sending %s to %s', alert, output) try: return dispatcher.dispatch(alert, output.split(':')[1]) except Exception: # pylint: disable=broad-except LOGGER.exception('Exception when sending %s to %s. Alert:\n%s', alert, output, repr(alert)) return False
def _item_verify(self, item_str, item_key, item_type, get_id=True): """Method to verify the existance of an item with the API Args: item_str (str): Service to query about in the API item_key (str): Endpoint/key to be extracted from search results item_type (str): Type of item reference to be returned get_id (boolean): Whether to generate a dict with result and reference Returns: dict: JSON object be used in the API call, containing the item id and the item reference, True if it just exists or False if it fails """ item_url = self._get_endpoint(self._base_url, item_key) item_id = self._check_exists(item_str, item_url, item_key, get_id) if not item_id: LOGGER.info('%s not found in %s, %s', item_str, item_key, self.__service__) return False if get_id: return {'id': item_id, 'type': item_type} return item_id
def dispatch(self, alert, output): """Send alerts to the given service. This wraps the protected subclass method of _dispatch to aid in usability Args: alert (Alert): Alert instance which triggered a rule descriptor (str): Output descriptor (e.g. slack channel, pd integration) Returns: bool: True if alert was sent successfully, False otherwise """ LOGGER.info('Sending %s to %s', alert, output) descriptor = output.split(':')[1] try: sent = bool(self._dispatch(alert, descriptor)) except Exception: # pylint: disable=broad-except LOGGER.exception('Exception when sending %s to %s. Alert:\n%s', alert, output, repr(alert)) sent = False self._log_status(sent, descriptor) return sent