def get_manifest_data(bucket, team, dataset, manifest_key): """ Returns a list of items from manifests control table """ dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) s3_interface = S3Interface() local_path = s3_interface.download_object(bucket, manifest_key) ddb_keys = [] items = [] with open(local_path, "r") as raw_file: file_names = [ file_name.strip().split("/")[-1] for file_name in raw_file ] for file in file_names: ddb_keys.append({ "dataset_name": team + "-" + dataset, "manifest_file_name": manifest_key.split("/")[-1], "datafile_name": file }) for ddb_key in ddb_keys: try: items.append( dynamo_interface.get_item_from_manifests_control_table( ddb_key["dataset_name"], ddb_key["manifest_file_name"], ddb_key["datafile_name"])) except KeyError: logger.error("The manifest file has not been processed in Stage A") raise Exception("Manifest File has not been processed in Stage A") return items
def lambda_handler(event, context): """Checks if the file to be processed is manifest driven Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with Processed Bucket and Key(s) """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] keys_to_process = event['body']['keysToProcess'] team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] ddb_key = team + "-" + dataset logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh_id = octagon_client.start_pipeline_execution( pipeline_name='{}-{}-stage-{}'.format(team, pipeline, stage[-1].lower()), dataset_name='{}-{}'.format(team, dataset), comment=event) dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) response = dynamo_interface.get_transform_table_item(ddb_key) logger.info("Querying DynamoDB to check for manifest details") event["body"]["manifest_enabled"] = response["manifest_enabled"] event["body"]["manifest_details"] = response["manifest_details"] # Call custom transform created by user and process the file event['body']['peh_id'] = peh_id remove_content_tmp() octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) remove_content_tmp() raise e return event
def lambda_handler(event, context): """ Checks if a dataset is driven by manifest file Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ try: logger.info("Fetching event data from previous step") team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] env = event['body']['env'] ddb_key = team+"-"+dataset logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = ( octagon.OctagonClient() .with_run_lambda(True) .with_configuration_instance(event['body']['env']) .build() ) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) response = dynamo_interface.get_transform_table_item(ddb_key) event["body"]["manifest_enabled"] = response["manifest_enabled"] event["body"]["manifest_details"] = response["manifest_details"] octagon_client.update_pipeline_execution(status="{} {} Processing".format(stage, component), component=component) except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed(component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return event
def lambda_handler(event, context): """Updates the objects metadata catalog Arguments: event {dict} -- Dictionary with details on S3 event context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with Processed Bucket and Key """ try: logger.info('Fetching event data from previous step') object_metadata = json.loads(event) stage = object_metadata['pipeline_stage'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(object_metadata['env']).build()) object_metadata['peh_id'] = octagon_client.start_pipeline_execution( pipeline_name='{}-{}-stage-{}'.format(object_metadata['team'], object_metadata['pipeline'], stage[-1].lower()), dataset_name='{}-{}'.format(object_metadata['team'], object_metadata['dataset']), comment=event) # Add business metadata (e.g. object_metadata['project'] = 'xyz') logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') dynamo_interface.update_object_metadata_catalog(object_metadata) logger.info( 'Passing arguments to the next function of the state machine') octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return {'statusCode': 200, 'body': object_metadata}
def stage_transform(self, team, dataset, stage): """Returns relevant stage Transformation Arguments: team {string} -- Team owning the transformation dataset {string} -- Dataset targeted by transformation Returns: class -- Transform object """ stage_suffix = stage[-1].lower() dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) dataset_transforms = dynamo_interface.get_transform_table_item( '{}-{}'.format(team, dataset))['transforms'][ 'stage_{}_transform'.format(stage_suffix)] transform_info = "datalake_library.transforms.stage_{}_transforms.{}".format( stage_suffix, dataset_transforms) return getattr(import_module(transform_info), 'CustomTransform')
def lambda_handler(event, context): try: logger.info('Fetching event data from previous step') processed_keys = event['body']['keysToProcess'] team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] bucket = event['body']['bucket'] logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) wlm_ddb_table = dynamo_interface.wlm_control_table item = dynamo_interface.get_item(wlm_ddb_table, {"name":"{}-{}-{}".format(team, dataset,processed_keys[0].split("/")[-2])}) priority = item.get('priority', None) print(priority) print(''.join( [stage[:-1], chr(ord(stage[-1]))])) logger.info('Sending messages to right priority SQS queue') sqs_config = SQSConfiguration(team, dataset, ''.join( [stage[:-1], chr(ord(stage[-1]))]), priority) #Workload management changes sqs_interface = SQSInterface(sqs_config.get_stage_queue_name_wlm) #Workload management changes sqs_interface.send_message_to_fifo_queue(json.dumps(event), '{}-{}'.format(team, dataset)) logger.info("lambda Completed") return { 'statusCode': 200 } except Exception as e: raise e
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['job']['peh_id'] keys_to_process = event['body']['keysToProcess'] s3_path = "post-stage/{}/manifests/{}/{}".format( team, dataset, keys_to_process[0].split("/")[-1]) logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'size': S3Interface().get_size(bucket, key), 'last_modified_date': S3Interface().get_last_modified(bucket, key), 'org': event['body']['org'], 'app': event['body']['app'], 'env': event['body']['env'], 'team': team, 'pipeline': pipeline, 'dataset': dataset, 'stage': 'stage', 'pipeline_stage': stage, 'peh_id': peh_id } dynamo_interface.update_object_metadata_catalog(object_metadata) logger.info("Updating manifests control table") items = get_manifest_data(bucket, team, dataset, keys_to_process[0]) ddb_keys = get_ddb_keys(items) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "COMPLETED") logger.info("Move manifest file to post stage") kms_key = KMSConfiguration(team).get_kms_arn s3_interface = S3Interface() s3_interface.copy_object(bucket, keys_to_process[0], bucket, s3_path, kms_key=kms_key) logger.info("Removing manifest file from pre-stage") s3_interface.delete_objects(bucket, keys_to_process[0]) # Only uncomment if a queue for the next stage exists # logger.info('Sending messages to next SQS queue if it exists') # sqs_config = SQSConfiguration(team, dataset, ''.join([stage[:-1], chr(ord(stage[-1]) + 1)])) # sqs_interface = SQSInterface(sqs_config.get_stage_queue_name) # sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset)) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "FAILED", None, "Failed in Post Update") raise e return 200
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ try: logger.info('Fetching event data from previous step') processed_keys = event['body']['processedKeys'] team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') bucket = S3Configuration().stage_bucket for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'size': S3Interface().get_size(bucket, key), 'last_modified_date': S3Interface().get_last_modified(bucket, key), 'org': event['body']['org'], 'app': event['body']['app'], 'env': event['body']['env'], 'team': team, 'pipeline': pipeline, 'dataset': dataset, 'stage': 'stage', 'pipeline_stage': stage, 'peh_id': peh_id } dynamo_interface.update_object_metadata_catalog(object_metadata) #Workload management changes #--------------------------- wlm_ddb_table = dynamo_interface.wlm_control_table item = dynamo_interface.get_item( wlm_ddb_table, { "name": "{}-{}-{}".format(team, dataset, processed_keys[0].split("/")[-2]) }) priority = item.get('priority', None) print(priority) #--------------------------- logger.info('Sending messages to next SQS queue if it exists') sqs_config = SQSConfiguration(team, dataset, ''.join( [stage[:-1], chr(ord(stage[-1]) + 1)]), priority) #Workload management changes sqs_interface = SQSInterface( sqs_config.get_stage_queue_name_wlm) #Workload management changes sqs_interface.send_batch_messages_to_fifo_queue( processed_keys, 10, '{}-{}'.format(team, dataset)) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return 200
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ def replace_decimals(obj): if isinstance(obj, list): for i in range(len(obj)): obj[i] = replace_decimals(obj[i]) return obj elif isinstance(obj, dict): for k, v in obj.items(): obj[k] = replace_decimals(v) return obj elif isinstance(obj, set): return set(replace_decimals(i) for i in obj) elif isinstance(obj, decimal.Decimal): if obj % 1 == 0: return int(obj) else: return float(obj) else: return obj def get_table_partitions(db, tbl): glue_response = glue_client.get_table(DatabaseName=db, Name=tbl) logger.debug('Glue get_table response: {}'.format(glue_response)) return glue_response['Table']['PartitionKeys'] try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset1 = event['body']['dataset'] peh_id = event['body']['job']['peh_id'] env = event['body']['env'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info( 'Storing metadata to DynamoDB and tagging resulting S3 Objects') for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'size': S3Interface().get_size(bucket, key), 'last_modified_date': S3Interface().get_last_modified(bucket, key), 'org': event['body']['org'], 'app': event['body']['app'], 'env': event['body']['env'], 'team': team, 'pipeline': pipeline, 'dataset': dataset1, 'stage': 'stage', 'pipeline_stage': stage, 'peh_id': peh_id } dynamo_interface.update_object_metadata_catalog(object_metadata) tag_keys = ['org', 'app', 'env', 'team', 'dataset'] tag_dict = {key: object_metadata[key] for key in tag_keys} S3Interface().tag_object(bucket, key, tag_dict) # Only uncomment if a queue for the next stage exists # logger.info('Sending messages to next SQS queue if it exists') # sqs_config = SQSConfiguration(team, dataset, ''.join([stage[:-1], chr(ord(stage[-1]) + 1)])) # sqs_interface = SQSInterface(sqs_config.get_stage_queue_name) # sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset)) prestage_table = event['body']['dest_table']['name'] prestage_db = event['body']['dest_db'] dest_part_name = event['body']['dest_table']['part_name'] dest_part_value = event['body']['dest_table']['part_value'] processOutput = {} if dest_part_name is not '' and dest_part_value is not '': partitions = [] part_dict = {"name": dest_part_name, "value": dest_part_value} partitions.append(part_dict) processOutput['partitions'] = partitions processOutput['processed_keys'] = processed_keys ssmresponse = ssmcli.get_parameter( Name=f'/SDLF/DDB/{team}/{pipeline}/DependenciesByTable') ddb_dependencies_by_table = ssmresponse['Parameter']['Value'] ddb_table = dynamodb.Table(ddb_dependencies_by_table) ssmresponse = ssmcli.get_parameter( Name=f'/SDLF/DDB/{team}/{pipeline}/Dependencies') ddb_dependencies = ssmresponse['Parameter']['Value'] consulta = f'{prestage_db.lower()}.{prestage_table.lower()}' logger.info(consulta) response = ddb_table.get_item(Key={'table_name': consulta}) logger.info(f'Response {response}') if 'Item' in response: list_transforms = response['Item']['list_transforms'] num_of_transforms = len(list_transforms) logger.debug(f'Response {response}') logger.info(f'This table triggers {num_of_transforms} datasets') next_stage = 'B' stage_b_message = {} for dataset in list_transforms: ddb_steps = dynamodb.Table(ddb_dependencies) logger.info(dataset) response = ddb_steps.get_item(Key={'dataset': dataset}) logger.info(f'Response {response}') num_of_transforms = len(list_transforms) item = response['Item'] dest_table = item['dataset'].split('.')[1] dest_db = item['dataset'].split('.')[0] dependencies = item['dependencies'] date_substitutions = replace_decimals( item.get('date_substitutions', [])) logger.info(f'Dependencies: {dependencies}') partition = item.get('partitionColumn', '') partition_mask = item.get('partitionPythonMask', None) partition_value_formatted = None table_check = [] for table in dependencies: table_name = table['TableName'].split('.')[1] table_db = table['TableName'].split('.')[0] table_partition = table.get('FieldColumn', '') table_partition_format = table.get('DateExpression', None) relativedelta_attributes = replace_decimals( table.get('relativedelta_attributes', None)) table_partitions = processOutput.get('partitions', []) usage = table.get('Usage', 'validate').lower() if usage == 'validate': if prestage_db == table_db and prestage_table == table_name: logger.info( f'This table does not update/overwrite {dataset} dataset' ) break else: logger.debug( f'Table {table_db}.{table_name} is not the trigger table' ) else: if prestage_db.lower() == table_db.lower( ) and prestage_table.lower() == table_name.lower(): # dst_tbl_partitions = get_table_partitions(prestage_db,prestage_table) partition_value_formatted = '' # If dest table has partitions and source table has partitions logger.debug( f'Partition: {partition}, table_partitions: {table_partitions}' ) if table_partitions and table_partition_format is not None: table_partition_value = table_partitions[0][ 'value'] value = datetime.strptime( table_partition_value, table_partition_format) target_value = value - relativedelta( **relativedelta_attributes) partition_value_formatted = target_value.strftime( partition_mask) logger.info( f'This table {usage.upper()} dataset {dest_table} ' f' Partition {partition} = {partition_value_formatted}' ) # validate(table_db, table_name, table_partitions) stage_b_message[ 'prev_stage_processed_keys'] = processed_keys stage_b_message['team'] = team stage_b_message['pipeline'] = pipeline stage_b_message['pipeline_stage'] = ''.join( [stage[:-1], next_stage]) stage_b_message['dataset'] = dataset1 stage_b_message['org'] = event['body']['org'] stage_b_message['app'] = event['body']['app'] stage_b_message['env'] = event['body']['env'] stage_b_message['behaviour'] = table[ 'Usage'].lower() stage_b_message['dest_db'] = dest_db stage_b_message['dest_table'] = {} stage_b_message['dest_table']['name'] = dest_table stage_b_message['dest_table'][ 'part_name'] = partition stage_b_message['dest_table'][ 'part_value'] = partition_value_formatted stage_b_message['steps'] = item['steps'] stage_b_message[ 'date_substitutions'] = date_substitutions logger.info( 'Sending messages to next SQS queue if it exists' ) # GEt queue by SSM logger.info(stage_b_message) sqs_config = SQSConfiguration( team, pipeline, stage) sqs_interface = SQSInterface( sqs_config.get_stage_queue_name) sqs_interface.send_message_to_fifo_queue( json.dumps(stage_b_message), '{}-{}'.format(team, pipeline)) break else: logger.info(f'This table triggers 0 datasets') octagon_client.update_pipeline_execution( status=f'{stage} {component} Processing', component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment=f'{stage} {component} Error: {repr(e)}') raise e return 200
def transform_object(self, bucket, keys, team, dataset): ####################################################### # We assume a Glue Job has already been created based on # customer needs. This function makes an API call to start it ####################################################### job_name = 'sdlf-{}-{}-glue-job'.format( team, dataset) # Name of the Glue Job ### Create the list of s3 keys to be processed by the glue job ### keys will contain a single file for manifest processing items = get_manifest_data(bucket, team, dataset, keys[0]) s3_keys = get_s3_keys(items) files = [] file_names = "" for key in s3_keys: files.append(key.split('/')[-1]) if file_names is not None: file_names = file_names + "|" + key else: file_names = key ### Update Manifests Control Table ddb_keys = get_ddb_keys(items) dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "PROCESSING") # S3 Path where Glue Job outputs processed keys # IMPORTANT: Build the output s3_path without the s3://stage-bucket/ processed_keys_path = 'post-stage/{}/{}'.format(team, dataset) # Submitting a new Glue Job job_response = client.start_job_run( JobName=job_name, Arguments={ # Specify any arguments needed based on bucket and keys (e.g. input/output S3 locations) '--JOB_NAME': 'sdlf-{}-{}-glue-job'.format(team, dataset), '--SOURCE_LOCATION': 's3://{}/'.format(bucket), '--OUTPUT_LOCATION': 's3://{}/{}'.format(bucket, processed_keys_path), '--FILE_NAMES': file_names, '--job-bookmark-option': 'job-bookmark-enable' }, MaxCapacity=2.0) # Collecting details about Glue Job after submission (e.g. jobRunId for Glue) json_data = json.loads( json.dumps(job_response, default=datetimeconverter)) job_details = { "jobName": job_name, "jobRunId": json_data.get('JobRunId'), "jobStatus": 'STARTED', "files": list(set(files)) } ####################################################### # IMPORTANT # This function must return a dictionary object with at least a reference to: # 1) processedKeysPath (i.e. S3 path where job outputs data without the s3://stage-bucket/ prefix) # 2) jobDetails (i.e. a Dictionary holding information about the job # e.g. jobName and jobId for Glue or clusterId and stepId for EMR # A jobStatus key MUST be present in jobDetails as it's used to determine the status of the job) # Example: {processedKeysPath' = 'post-stage/engineering/legislators', # 'jobDetails': {'jobName': 'sdlf-engineering-legislators-glue-job', 'jobId': 'jr-2ds438nfinev34', 'jobStatus': 'STARTED'}} ####################################################### response = { 'processedKeysPath': processed_keys_path, 'jobDetails': job_details } return response
def get_dependent_datasets(team_name, dataset_name): dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) transform_info = dynamo_interface.get_transform_table_item("{}-{}".format( team_name, dataset_name)) return transform_info["dependencies"]
def lambda_handler(event, context): """ Load Datafile metadata in manifests control table Check if manifest file is available within the threshold Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ s3_interface = S3Interface() stage_bucket = S3Configuration().stage_bucket dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) current_time = dt.datetime.utcnow() current_timestamp = current_time.timestamp() try: logger.info("Fetching event data from previous step") team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] env = event['body']['env'] bucket = event['body']['bucket'] input_file_key = event['body']['key'] input_file_name = input_file_key.split("/")[-1] manifest_file_pattern = event['body']['manifest_details'][ 'regex_pattern'] manifest_timeout = int( event['body']['manifest_details']['manifest_timeout']) if 'manifest_interval' in event['body']: manifest_interval = event['body']['manifest_interval'] else: manifest_interval = current_timestamp logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(env).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) ### List S3 Objects for the manifest file in the manifest prefix ### For this to work the manifest should have been loaded into DynamoDB manifest_key = "pre-stage/{}/manifests/{}/".format(team, dataset) processed_manifest_keys = s3_interface.list_objects( stage_bucket, manifest_key) matched_keys = [] items = [] if not processed_manifest_keys: logger.info( "Manifest File has not been loaded, sleeping for 5 mins") time.sleep(300) manifest_file_loaded = "False" else: for manifest_file_key in processed_manifest_keys: manifest_file_name = manifest_file_key.split("/")[-1] match = re.match(manifest_file_pattern, manifest_file_name) if match: matched_keys.append(manifest_file_name) ### Query Manifests Control table for keys in matched_keys: dataset_name = team + "-" + dataset try: items.append( dynamo_interface. get_item_from_manifests_control_table( dataset_name, keys, input_file_name)) except KeyError: logger.info( "Manifest File has not been loaded, sleeping for 5 mins" ) manifest_file_loaded = "False" ### Update Manifests Control table if not items: logger.info( "Manifest File has not been loaded, sleeping for 5 mins" ) time.sleep(300) manifest_file_loaded = "False" else: ddb_key = { 'dataset_name': items[0]['dataset_name'], 'datafile_name': items[0]['datafile_name'] } STATUS = "STARTED" dynamo_interface.update_manifests_control_table_stagea( ddb_key, STATUS) manifest_file_loaded = "True" event['body']['manifest_ddb_key'] = ddb_key ### Check if Manifest threshold has exceeded if current_timestamp == manifest_interval: current_timestamp = dt.datetime.utcnow().timestamp() if int( (current_timestamp - manifest_interval) / 60) >= manifest_timeout: logger.error("Manifest Threshold Breached") raise Exception("Manifest Threshold Breached") event['body']['manifest_interval'] = manifest_interval event['body']['manifest_file_loaded'] = manifest_file_loaded except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return event
def lambda_handler(event, context): """ Process the manifest file and loads into DynamoDB Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ s3_interface = S3Interface() stage_bucket = S3Configuration().stage_bucket dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) try: logger.info("Fetching event data from previous step") team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] env = event['body']['env'] bucket = event['body']['bucket'] manifest_file_key = event['body']['key'] manifest_file_name = manifest_file_key.split("/")[-1] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(env).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) ### Download the manifest file to local local_path = s3_interface.download_object(bucket, manifest_file_key) ### Process the manifest file with open(local_path, "r") as raw_file: file_names = [ file_name.strip().split("/")[-1] for file_name in raw_file ] ### Load data into manifests control table for file in file_names: item = { "dataset_name": team + "-" + dataset + "-" + manifest_file_name, "datafile_name": manifest_file_name + "-" + file } dynamo_interface.put_item_in_manifests_control_table(item) ### Set s3 path for Copy s3_path = 'pre-stage/{}/manifests/{}/{}'.format( team, dataset, manifest_file_name) kms_key = KMSConfiguration(team).get_kms_arn ### Copy Manifest File to team/manifest/dataset location s3_interface.copy_object(bucket, manifest_file_key, stage_bucket, s3_path, kms_key=kms_key) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) processed_keys = [s3_path] except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return processed_keys
def lambda_handler(event, context): """Checks if the file to be processed is manifest driven Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with Processed Bucket and Key(s) """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] keys_to_process = event['body']['keysToProcess'] team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] manifest_data_timeout = int( event['body']['manifest_details']['manifest_data_timeout']) current_time = dt.datetime.utcnow() current_timestamp = current_time.timestamp() if 'manifest_interval' in event['body']: manifest_interval = event['body']['manifest_interval'] else: manifest_interval = current_timestamp logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) ### Set max_items_process in datasets table so that the statemachine only processes 1 manifest file at a time ddb_keys = get_ddb_keys(keys_to_process, bucket, team, dataset) dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) ### Query Manifest Control Table to get the status items = [] logger.info( "Querying DynamoDB to check data in manifests control table for Stage A status" ) for ddb_key in ddb_keys: try: items.append( dynamo_interface.get_item_from_manifests_control_table( ddb_key["dataset_name"], ddb_key["manifest_file_name"], ddb_key["datafile_name"])) except KeyError: logger.error( "The manifest file has not been processed in Stage A") raise Exception( "Manifest File has not been processed in Stage A") ### Check stage a status for data files logger.info( "Checking to see if all the files have been processed in Stage A") status_message_list = [] failed_status_message_list = [] wait_message_counter = 0 failed_message_counter = 0 for item in items: if "stage_a_status" in item: stage_a_status = item["stage_a_status"] else: stage_a_status = "NOT STARTED" if stage_a_status != "COMPLETED" and stage_a_status != "FAILED": status_message_list.append("Waiting for Data File {}".format( item["datafile_name"].split("-")[-1])) wait_message_counter += 1 elif stage_a_status == "FAILED": failed_status_message_list.append( "Data Files Failed in Stage A {}".format( item["datafile_name"].split("-")[-1])) failed_message_counter += 1 if failed_message_counter > 0: logger.error("Data File Failure in Stage A, Processing will stop") logger.error("The following files have failed in Stage A") for message in failed_status_message_list: logger.error(message) ### Update manifest control table, mark all files as failed in Stage B for ddb_key in ddb_keys: update_key = dynamo_interface.manifest_keys( ddb_key["dataset_name"], ddb_key["manifest_file_name"], ddb_key["datafile_name"]) dynamo_interface.update_manifests_control_table_stageb( update_key, "FAILED", None, "Datafile Failed in Stage A") raise Exception("Data File Failure in Stage A") if wait_message_counter > 0: logger.info("Waiting for Data Files to be processed in Stage A") for message in status_message_list: logger.info(message) logger.info("Will sleep for 5 mins") time.sleep(300) data_file_wait = "True" if manifest_interval == current_timestamp: current_timestamp = dt.datetime.utcnow().timestamp() if int((current_timestamp - manifest_interval) / 60) >= manifest_data_timeout: logger.error("Data File Threshold Breached") logger.error("Stage B Processing Will Stop Now") data_file_wait = "False" for message in status_message_list: logger.error(message) ### Update manifest control table, mark all files as failed in Stage B for ddb_key in ddb_keys: update_key = dynamo_interface.manifest_keys( ddb_key["dataset_name"], ddb_key["manifest_file_name"], ddb_key["datafile_name"]) dynamo_interface.update_manifests_control_table_stageb( update_key, "FAILED", None, "Datafile threshold Breached") raise Exception("Data File Threshold Breached") else: logger.info("All files processed in Stage A") data_file_wait = "False" for ddb_key in ddb_keys: update_key = dynamo_interface.manifest_keys( ddb_key["dataset_name"], ddb_key["manifest_file_name"], ddb_key["datafile_name"]) dynamo_interface.update_manifests_control_table_stageb( update_key, "STARTED") event["body"]["manifest_interval"] = manifest_interval event["body"]["data_file_wait"] = data_file_wait remove_content_tmp() octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) remove_content_tmp() raise e return event
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution( event['body']['job']['peh_id']) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'team': team, 'pipeline': pipeline, 'dataset': dataset, 'peh_id': event['body']['job']['peh_id'], 'stage': 'post-stage' } dynamo_interface.update_object_metadata_catalog(object_metadata) # Add Tables to Result Path to Enable Deequ Job table_path = "compile_topics_data_csv" tables = [table_path] # Only uncomment if using Kendra and index and data source ALREADY created # Data Sync Job # kendra_client = boto3.client('kendra') # response = kendra_client.start_data_source_sync_job( # Id='ENTER_DATASOURCE_ID', # IndexId='ENTER_INDEX_ID'' # ) # Only uncomment if a queue for the next stage exists # logger.info('Sending messages to next SQS queue if it exists') # sqs_config = SQSConfiguration(team, dataset, ''.join([stage[:-1], chr(ord(stage[-1]) + 1)])) # sqs_interface = SQSInterface(sqs_config.get_stage_queue_name) # sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset)) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return tables
import os import shutil from datalake_library.commons import init_logger from datalake_library.transforms.transform_handler import TransformHandler from datalake_library import octagon from datalake_library.octagon import Artifact, EventReasonEnum, peh from datalake_library.configuration.resource_configs import DynamoConfiguration from datalake_library.interfaces.dynamo_interface import DynamoInterface logger = init_logger(__name__) dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) def remove_content_tmp(): # Remove contents of the Lambda /tmp folder (Not released by default) for root, dirs, files in os.walk('/tmp'): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) def lambda_handler(event, context): """Calls custom transform developed by user Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context
def lambda_handler(event, context): """Calls custom job waiter developed by user Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with Processed Bucket, Key(s) and Job Details """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] keys_to_process = event['body']['keysToProcess'] team = event['body']['team'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] job_details = event['body']['job']['jobDetails'] processed_keys_path = event['body']['job']['processedKeysPath'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) logger.info('Querying manifests control table ') items = get_manifest_data(bucket, team, dataset, keys_to_process[0]) ddb_keys = get_ddb_keys(items) dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Checking Job Status with user custom code') transform_handler = TransformHandler().stage_transform( team, dataset, stage) response = transform_handler().check_job_status( bucket, keys_to_process, processed_keys_path, job_details) # custom user code called response['peh_id'] = event['body']['job']['peh_id'] if event['body']['job']['jobDetails']['jobStatus'] == 'FAILED': peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(response['peh_id']) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: Check Job Logs".format( stage, component)) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "FAILED", None, "Glue Job Failed, Check Logs") except Exception as e: logger.error("Fatal error", exc_info=True) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution( event['body']['job']['peh_id']) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "FAILED", None, "Glue Job Failed, Check Logs") raise e return response
def lambda_handler(event, context): """Checks if any items need processing and triggers state machine Arguments: event {dict} -- Dictionary with no relevant details context {dict} -- Dictionary with details on Lambda context """ # TODO Implement Redrive Logic (through message_group_id) try: team = event['team'] pipeline = event['pipeline'] stage = event['pipeline_stage'] dataset = event['dataset'] org = event['org'] app = event['app'] env = event['env'] stage_bucket = S3Configuration().stage_bucket dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) transform_info = dynamo_interface.get_transform_table_item( '{}-{}'.format(team, dataset)) MIN_ITEMS_TO_PROCESS = int( transform_info['min_items_process']['stage_{}'.format( stage[-1].lower())]) MAX_ITEMS_TO_PROCESS = int( transform_info['max_items_process']['stage_{}'.format( stage[-1].lower())]) sqs_config = SQSConfiguration(team, dataset, stage) queue_interface = SQSInterface(sqs_config.get_stage_queue_name) keys_to_process = [] logger.info('Querying {}-{} objects waiting for processing'.format( team, dataset)) keys_to_process = queue_interface.receive_min_max_messages( MIN_ITEMS_TO_PROCESS, MAX_ITEMS_TO_PROCESS) # If no keys to process, break if not keys_to_process: return logger.info('{} Objects ready for processing'.format( len(keys_to_process))) keys_to_process = list(set(keys_to_process)) response = { 'statusCode': 200, 'body': { "bucket": stage_bucket, "keysToProcess": keys_to_process, "team": team, "pipeline": pipeline, "pipeline_stage": stage, "dataset": dataset, "org": org, "app": app, "env": env } } logger.info('Starting State Machine Execution') state_config = StateMachineConfiguration(team, pipeline, stage) StatesInterface().run_state_machine( state_config.get_stage_state_machine_arn, response) except Exception as e: # If failure send to DLQ if keys_to_process: dlq_interface = SQSInterface(sqs_config.get_stage_dlq_name) dlq_interface.send_message_to_fifo_queue(json.dumps(response), 'failed') logger.error("Fatal error", exc_info=True) raise e return