def import_events(event, __): logger = logging.getLogger('event-recorder') logger.setLevel(logging.INFO) dsn = os.environ['DB_CONNECTION_STRING'] db_connection = create_db_connection(dsn, get_database_password(dsn)) logger.info('Created connection to DB') for record in event['Records']: bucket = record['s3']['bucket']['name'] filename = record['s3']['object']['key'] iterable = fetch_import_file(bucket, filename) for line in iterable: try: message_envelope = json.loads(line) event = event_from_json_object(message_envelope['document']) if write_audit_event_to_database(event, db_connection): if (event.event_type == 'session_event' and event.details.get('session_event_type') == 'idp_authn_succeeded'): write_billing_event_to_database(event, db_connection) if (event.event_type == 'session_event' and event.details.get('session_event_type') == 'fraud_detected'): write_fraud_event_to_database(event, db_connection) except Exception as exception: logger.exception('Failed to store message{}'.format(exception)) delete_import_file(bucket, filename)
def import_events(event, __): logger = logging.getLogger('event-recorder') logger.setLevel(logging.INFO) dsn = os.environ['DB_CONNECTION_STRING'] database_password = None if 'ENCRYPTED_DATABASE_PASSWORD' in os.environ: # boto returns decrypted as b'bytes' so decode to convert to password string database_password = decrypt( os.environ['ENCRYPTED_DATABASE_PASSWORD']).decode() else: dsn_components = parse_dsn(dsn) database_password = boto3.client('rds').generate_db_auth_token( dsn_components['host'], 5432, dsn_components['user']) db_connection = create_db_connection(dsn, database_password) logger.info('Created connection to DB') for record in event['Records']: bucket = record['s3']['bucket']['name'] filename = record['s3']['object']['key'] iterable = fetch_import_file(bucket, filename) for line in iterable: try: message_envelope = json.loads(line) event = event_from_json_object(message_envelope['document']) if write_audit_event_to_database(event, db_connection): if event.event_type == 'session_event' and event.details.get( 'session_event_type') == 'idp_authn_succeeded': write_billing_event_to_database(event, db_connection) if event.event_type == 'session_event' and event.details.get( 'session_event_type') == 'fraud_detected': write_fraud_event_to_database(event, db_connection) except Exception as exception: logger.exception('Failed to store message{}'.format(exception)) delete_import_file(bucket, filename)
def store_queued_events(_, __): sqs_client = boto3.client('sqs') queue_url = os.environ['QUEUE_URL'] db_connection = create_db_connection() decryption_key = fetch_decryption_key() while True: message = fetch_single_message(sqs_client, queue_url) if message is None: break # noinspection PyBroadException # catch all errors and log them - we never want a single failing message to kill the process. try: decrypted_message = decrypt_message(message['Body'], decryption_key) event = event_from_json(decrypted_message) write_to_database(event, db_connection) delete_message(sqs_client, queue_url, message) except Exception as exception: logging.getLogger('event-recorder').exception('Failed to store message')
def idp_fraud_data_events(event, __): dsn = os.environ['DB_CONNECTION_STRING'] db_connection = create_db_connection(dsn, get_database_password(dsn)) logger.info('Created connection to DB') for record in event['Records']: bucket = record['s3']['bucket']['name'] filename = record['s3']['object']['key'] tags = fetch_object_tags(bucket, filename) idp_entity_id = tags['idp'] username = tags['username'] timezone = DEFAULT_TIMEZONE dialect = DEFAULT_DIALECT has_header = DEFAULT_HAS_HEADER if 'timezone' in tags: timezone = tags['timezone'] if 'dialect' in tags: dialect = tags['dialect'] if 'has_header' in tags: has_header = tags['has_header'].lower() in [ 'true', '1', 'y', 'yes' ] upload_session = create_import_session(filename, idp_entity_id, username, db_connection) if process_file(bucket, filename, upload_session, db_connection, has_header, dialect, timezone): logger.info("Processing successful") update_session_as_validated(upload_session, db_connection) move_to_success(bucket, filename) else: logger.warning("Processing Failed") move_to_error(bucket, filename)
def store_queued_events(_, __): sqs_client = boto3.client('sqs') queue_url = os.environ['QUEUE_URL'] logger = logging.getLogger('event-recorder') logger.setLevel(logging.INFO) if 'ENCRYPTION_KEY' in os.environ: encrypted_decryption_key = os.environ['ENCRYPTION_KEY'] logger.info('Got decryption key from environment variable') else: encrypted_decryption_key = fetch_decryption_key() logger.info('Got decryption key from S3') decryption_key = decrypt(encrypted_decryption_key) logger.info('Decrypted key successfully') dsn = os.environ['DB_CONNECTION_STRING'] database_password = None if 'ENCRYPTED_DATABASE_PASSWORD' in os.environ: # boto returns decrypted as b'bytes' so decode to convert to password string database_password = decrypt( os.environ['ENCRYPTED_DATABASE_PASSWORD']).decode() else: dsn_components = parse_dsn(dsn) database_password = boto3.client('rds').generate_db_auth_token( dsn_components['host'], 5432, dsn_components['user']) db_connection = create_db_connection(dsn, database_password) logger.info('Created connection to DB') event_count = 0 while True: message = fetch_single_message(sqs_client, queue_url) if message is None: logger.info('Queue is empty - finishing after {0} events'.format( event_count)) break event_count += 1 # noinspection PyBroadException # catch all errors and log them - we never want a single failing message to kill the process. event = None try: decrypted_message = decrypt_message(message['Body'], decryption_key) event = event_from_json(decrypted_message) logger.info('Decrypted event with ID: {0}'.format(event.event_id)) write_audit_event_to_database(event, db_connection) logger.info('Stored audit event: {0}'.format(event.event_id)) if event.event_type == 'session_event' and event.details.get( 'session_event_type') == 'idp_authn_succeeded': write_billing_event_to_database(event, db_connection) logger.info('Stored billing event: {0}'.format(event.event_id)) if event.event_type == 'session_event' and event.details.get( 'session_event_type') == 'fraud_detected': write_fraud_event_to_database(event, db_connection) logger.info('Stored fraud event: {0}'.format(event.event_id)) # really don't want the event system to fail because of Splunk logging try: splunk_res = push_event_to_splunk(decrypted_message) except Exception as e: splunk_res = False if splunk_res and splunk_res[0] == 200: # log successfully pushed events logger.info('Pushed fraud event to Splunk: {0}'.format( event.event_id)) elif 'production' in os.environ['QUEUE_URL']: # log unsuccessful push events as errors but don't raise an exception # this way, if Splunk was down, the event system still works as expected logger.error( 'Failed to push fraud event to Splunk: {0}'.format( event.event_id)) delete_message(sqs_client, queue_url, message) logger.info('Deleted event from queue with ID: {0}'.format( event.event_id)) except Exception as exception: if event: logger.exception( 'Failed to store event {0}, event type "{1}" from SQS message ID {2}' .format(event.event_id, event.event_type, message['MessageId'])) else: logger.exception( 'Failed to decrypt message, SQS ID = {0}'.format( message['MessageId']))
def store_queued_events(_, __): sqs_client = boto3.client('sqs') queue_url = os.environ['QUEUE_URL'] logger = logging.getLogger('event-recorder') logger.setLevel(logging.INFO) if 'ENCRYPTION_KEY' in os.environ: encrypted_decryption_key = os.environ['ENCRYPTION_KEY'] logger.info('Got decryption key from environment variable') else: encrypted_decryption_key = fetch_decryption_key() logger.info('Got decryption key from S3') decryption_key = decrypt(encrypted_decryption_key) logger.info('Decrypted key successfully') dsn = os.environ['DB_CONNECTION_STRING'] db_connection = create_db_connection(dsn, get_database_password(dsn)) logger.info('Created connection to DB') event_count = 0 while True: message = fetch_single_message(sqs_client, queue_url) if message is None: logger.info('Queue is empty - finishing after {0} events'.format(event_count)) break event_count += 1 # noinspection PyBroadException # catch all errors and log them - we never want a single failing message to kill the process. event = None try: decrypted_message = decrypt_message(message['Body'], decryption_key) event = event_from_json(decrypted_message) # Send audit events to this lambda function's CloudWatch log group. # This is the raw JSON event on a line by its self so Splunk can # parse it as JSON. print(decrypted_message) logger.info('Decrypted event with ID: {0}'.format(event.event_id)) write_audit_event_to_database(event, db_connection) logger.info('Stored audit event: {0}'.format(event.event_id)) if event.event_type == 'session_event' and event.details.get('session_event_type') == 'idp_authn_succeeded': write_billing_event_to_database(event, db_connection) logger.info('Stored billing event: {0}'.format(event.event_id)) if event.event_type == 'session_event' and event.details.get('session_event_type') == 'fraud_detected': write_fraud_event_to_database(event, db_connection) logger.info('Stored fraud event: {0}'.format(event.event_id)) delete_message(sqs_client, queue_url, message) logger.info('Deleted event from queue with ID: {0}'.format(event.event_id)) except Exception: if event: logger.exception( 'Failed to store event {0}, event type "{1}" from SQS message ID {2}'.format(event.event_id, event.event_type, message['MessageId'])) else: logger.exception('Failed to decrypt message, SQS ID = {0}'.format(message['MessageId']))