def get_manifest_data(bucket, team, dataset, manifest_key): """ Returns a list of items from manifests control table """ dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) s3_interface = S3Interface() local_path = s3_interface.download_object(bucket, manifest_key) ddb_keys = [] items = [] with open(local_path, "r") as raw_file: file_names = [ file_name.strip().split("/")[-1] for file_name in raw_file ] for file in file_names: ddb_keys.append({ "dataset_name": team + "-" + dataset, "manifest_file_name": manifest_key.split("/")[-1], "datafile_name": file }) for ddb_key in ddb_keys: try: items.append( dynamo_interface.get_item_from_manifests_control_table( ddb_key["dataset_name"], ddb_key["manifest_file_name"], ddb_key["datafile_name"])) except KeyError: logger.error("The manifest file has not been processed in Stage A") raise Exception("Manifest File has not been processed in Stage A") return items
def get_ddb_keys(keys_to_process, bucket, team, dataset): ### Returns a list of DynamoDB keys for Querying ddb_keys = [] for key in keys_to_process: s3_interface = S3Interface() local_path = s3_interface.download_object(bucket, key) with open(local_path, "r") as raw_file: file_names = [ file_name.strip().split("/")[-1] for file_name in raw_file ] for file in file_names: ddb_keys.append({ "dataset_name": team + "-" + dataset, "manifest_file_name": key.split("/")[-1], "datafile_name": file }) return ddb_keys
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['job']['peh_id'] keys_to_process = event['body']['keysToProcess'] s3_path = "post-stage/{}/manifests/{}/{}".format( team, dataset, keys_to_process[0].split("/")[-1]) logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'size': S3Interface().get_size(bucket, key), 'last_modified_date': S3Interface().get_last_modified(bucket, key), 'org': event['body']['org'], 'app': event['body']['app'], 'env': event['body']['env'], 'team': team, 'pipeline': pipeline, 'dataset': dataset, 'stage': 'stage', 'pipeline_stage': stage, 'peh_id': peh_id } dynamo_interface.update_object_metadata_catalog(object_metadata) logger.info("Updating manifests control table") items = get_manifest_data(bucket, team, dataset, keys_to_process[0]) ddb_keys = get_ddb_keys(items) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "COMPLETED") logger.info("Move manifest file to post stage") kms_key = KMSConfiguration(team).get_kms_arn s3_interface = S3Interface() s3_interface.copy_object(bucket, keys_to_process[0], bucket, s3_path, kms_key=kms_key) logger.info("Removing manifest file from pre-stage") s3_interface.delete_objects(bucket, keys_to_process[0]) # Only uncomment if a queue for the next stage exists # logger.info('Sending messages to next SQS queue if it exists') # sqs_config = SQSConfiguration(team, dataset, ''.join([stage[:-1], chr(ord(stage[-1]) + 1)])) # sqs_interface = SQSInterface(sqs_config.get_stage_queue_name) # sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset)) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) for ddb_key in ddb_keys: dynamo_interface.update_manifests_control_table_stageb( ddb_key, "FAILED", None, "Failed in Post Update") raise e return 200
import boto3 import time import sys import logging import traceback import string import random ####################################################### # Use S3 Interface to interact with S3 objects # For example to download/upload them ####################################################### from datalake_library.commons import init_logger from datalake_library.configuration.resource_configs import S3Configuration, KMSConfiguration from datalake_library.interfaces.s3_interface import S3Interface s3_interface = S3Interface() # IMPORTANT: Stage bucket where transformed data must be uploaded stage_bucket = S3Configuration().stage_bucket athena_client = boto3.client('athena') glue_client = boto3.client('glue') logger = logging.getLogger() logger.setLevel(logging.DEBUG) class CustomTransform(): def __init__(self): logger.info("Athena Light Transform initiated") def transform_object(self, bucket, body, team, dataset): # returns table path, or table path with partition name
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ try: logger.info('Fetching event data from previous step') processed_keys = event['body']['processedKeys'] team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') bucket = S3Configuration().stage_bucket for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'size': S3Interface().get_size(bucket, key), 'last_modified_date': S3Interface().get_last_modified(bucket, key), 'org': event['body']['org'], 'app': event['body']['app'], 'env': event['body']['env'], 'team': team, 'pipeline': pipeline, 'dataset': dataset, 'stage': 'stage', 'pipeline_stage': stage, 'peh_id': peh_id } dynamo_interface.update_object_metadata_catalog(object_metadata) #Workload management changes #--------------------------- wlm_ddb_table = dynamo_interface.wlm_control_table item = dynamo_interface.get_item( wlm_ddb_table, { "name": "{}-{}-{}".format(team, dataset, processed_keys[0].split("/")[-2]) }) priority = item.get('priority', None) print(priority) #--------------------------- logger.info('Sending messages to next SQS queue if it exists') sqs_config = SQSConfiguration(team, dataset, ''.join( [stage[:-1], chr(ord(stage[-1]) + 1)]), priority) #Workload management changes sqs_interface = SQSInterface( sqs_config.get_stage_queue_name_wlm) #Workload management changes sqs_interface.send_batch_messages_to_fifo_queue( processed_keys, 10, '{}-{}'.format(team, dataset)) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return 200
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ def replace_decimals(obj): if isinstance(obj, list): for i in range(len(obj)): obj[i] = replace_decimals(obj[i]) return obj elif isinstance(obj, dict): for k, v in obj.items(): obj[k] = replace_decimals(v) return obj elif isinstance(obj, set): return set(replace_decimals(i) for i in obj) elif isinstance(obj, decimal.Decimal): if obj % 1 == 0: return int(obj) else: return float(obj) else: return obj def get_table_partitions(db, tbl): glue_response = glue_client.get_table(DatabaseName=db, Name=tbl) logger.debug('Glue get_table response: {}'.format(glue_response)) return glue_response['Table']['PartitionKeys'] try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset1 = event['body']['dataset'] peh_id = event['body']['job']['peh_id'] env = event['body']['env'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info( 'Storing metadata to DynamoDB and tagging resulting S3 Objects') for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'size': S3Interface().get_size(bucket, key), 'last_modified_date': S3Interface().get_last_modified(bucket, key), 'org': event['body']['org'], 'app': event['body']['app'], 'env': event['body']['env'], 'team': team, 'pipeline': pipeline, 'dataset': dataset1, 'stage': 'stage', 'pipeline_stage': stage, 'peh_id': peh_id } dynamo_interface.update_object_metadata_catalog(object_metadata) tag_keys = ['org', 'app', 'env', 'team', 'dataset'] tag_dict = {key: object_metadata[key] for key in tag_keys} S3Interface().tag_object(bucket, key, tag_dict) # Only uncomment if a queue for the next stage exists # logger.info('Sending messages to next SQS queue if it exists') # sqs_config = SQSConfiguration(team, dataset, ''.join([stage[:-1], chr(ord(stage[-1]) + 1)])) # sqs_interface = SQSInterface(sqs_config.get_stage_queue_name) # sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset)) prestage_table = event['body']['dest_table']['name'] prestage_db = event['body']['dest_db'] dest_part_name = event['body']['dest_table']['part_name'] dest_part_value = event['body']['dest_table']['part_value'] processOutput = {} if dest_part_name is not '' and dest_part_value is not '': partitions = [] part_dict = {"name": dest_part_name, "value": dest_part_value} partitions.append(part_dict) processOutput['partitions'] = partitions processOutput['processed_keys'] = processed_keys ssmresponse = ssmcli.get_parameter( Name=f'/SDLF/DDB/{team}/{pipeline}/DependenciesByTable') ddb_dependencies_by_table = ssmresponse['Parameter']['Value'] ddb_table = dynamodb.Table(ddb_dependencies_by_table) ssmresponse = ssmcli.get_parameter( Name=f'/SDLF/DDB/{team}/{pipeline}/Dependencies') ddb_dependencies = ssmresponse['Parameter']['Value'] consulta = f'{prestage_db.lower()}.{prestage_table.lower()}' logger.info(consulta) response = ddb_table.get_item(Key={'table_name': consulta}) logger.info(f'Response {response}') if 'Item' in response: list_transforms = response['Item']['list_transforms'] num_of_transforms = len(list_transforms) logger.debug(f'Response {response}') logger.info(f'This table triggers {num_of_transforms} datasets') next_stage = 'B' stage_b_message = {} for dataset in list_transforms: ddb_steps = dynamodb.Table(ddb_dependencies) logger.info(dataset) response = ddb_steps.get_item(Key={'dataset': dataset}) logger.info(f'Response {response}') num_of_transforms = len(list_transforms) item = response['Item'] dest_table = item['dataset'].split('.')[1] dest_db = item['dataset'].split('.')[0] dependencies = item['dependencies'] date_substitutions = replace_decimals( item.get('date_substitutions', [])) logger.info(f'Dependencies: {dependencies}') partition = item.get('partitionColumn', '') partition_mask = item.get('partitionPythonMask', None) partition_value_formatted = None table_check = [] for table in dependencies: table_name = table['TableName'].split('.')[1] table_db = table['TableName'].split('.')[0] table_partition = table.get('FieldColumn', '') table_partition_format = table.get('DateExpression', None) relativedelta_attributes = replace_decimals( table.get('relativedelta_attributes', None)) table_partitions = processOutput.get('partitions', []) usage = table.get('Usage', 'validate').lower() if usage == 'validate': if prestage_db == table_db and prestage_table == table_name: logger.info( f'This table does not update/overwrite {dataset} dataset' ) break else: logger.debug( f'Table {table_db}.{table_name} is not the trigger table' ) else: if prestage_db.lower() == table_db.lower( ) and prestage_table.lower() == table_name.lower(): # dst_tbl_partitions = get_table_partitions(prestage_db,prestage_table) partition_value_formatted = '' # If dest table has partitions and source table has partitions logger.debug( f'Partition: {partition}, table_partitions: {table_partitions}' ) if table_partitions and table_partition_format is not None: table_partition_value = table_partitions[0][ 'value'] value = datetime.strptime( table_partition_value, table_partition_format) target_value = value - relativedelta( **relativedelta_attributes) partition_value_formatted = target_value.strftime( partition_mask) logger.info( f'This table {usage.upper()} dataset {dest_table} ' f' Partition {partition} = {partition_value_formatted}' ) # validate(table_db, table_name, table_partitions) stage_b_message[ 'prev_stage_processed_keys'] = processed_keys stage_b_message['team'] = team stage_b_message['pipeline'] = pipeline stage_b_message['pipeline_stage'] = ''.join( [stage[:-1], next_stage]) stage_b_message['dataset'] = dataset1 stage_b_message['org'] = event['body']['org'] stage_b_message['app'] = event['body']['app'] stage_b_message['env'] = event['body']['env'] stage_b_message['behaviour'] = table[ 'Usage'].lower() stage_b_message['dest_db'] = dest_db stage_b_message['dest_table'] = {} stage_b_message['dest_table']['name'] = dest_table stage_b_message['dest_table'][ 'part_name'] = partition stage_b_message['dest_table'][ 'part_value'] = partition_value_formatted stage_b_message['steps'] = item['steps'] stage_b_message[ 'date_substitutions'] = date_substitutions logger.info( 'Sending messages to next SQS queue if it exists' ) # GEt queue by SSM logger.info(stage_b_message) sqs_config = SQSConfiguration( team, pipeline, stage) sqs_interface = SQSInterface( sqs_config.get_stage_queue_name) sqs_interface.send_message_to_fifo_queue( json.dumps(stage_b_message), '{}-{}'.format(team, pipeline)) break else: logger.info(f'This table triggers 0 datasets') octagon_client.update_pipeline_execution( status=f'{stage} {component} Processing', component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment=f'{stage} {component} Error: {repr(e)}') raise e return 200
def lambda_handler(event, context): """ Load Datafile metadata in manifests control table Check if manifest file is available within the threshold Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ s3_interface = S3Interface() stage_bucket = S3Configuration().stage_bucket dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) current_time = dt.datetime.utcnow() current_timestamp = current_time.timestamp() try: logger.info("Fetching event data from previous step") team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] env = event['body']['env'] bucket = event['body']['bucket'] input_file_key = event['body']['key'] input_file_name = input_file_key.split("/")[-1] manifest_file_pattern = event['body']['manifest_details'][ 'regex_pattern'] manifest_timeout = int( event['body']['manifest_details']['manifest_timeout']) if 'manifest_interval' in event['body']: manifest_interval = event['body']['manifest_interval'] else: manifest_interval = current_timestamp logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(env).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) ### List S3 Objects for the manifest file in the manifest prefix ### For this to work the manifest should have been loaded into DynamoDB manifest_key = "pre-stage/{}/manifests/{}/".format(team, dataset) processed_manifest_keys = s3_interface.list_objects( stage_bucket, manifest_key) matched_keys = [] items = [] if not processed_manifest_keys: logger.info( "Manifest File has not been loaded, sleeping for 5 mins") time.sleep(300) manifest_file_loaded = "False" else: for manifest_file_key in processed_manifest_keys: manifest_file_name = manifest_file_key.split("/")[-1] match = re.match(manifest_file_pattern, manifest_file_name) if match: matched_keys.append(manifest_file_name) ### Query Manifests Control table for keys in matched_keys: dataset_name = team + "-" + dataset try: items.append( dynamo_interface. get_item_from_manifests_control_table( dataset_name, keys, input_file_name)) except KeyError: logger.info( "Manifest File has not been loaded, sleeping for 5 mins" ) manifest_file_loaded = "False" ### Update Manifests Control table if not items: logger.info( "Manifest File has not been loaded, sleeping for 5 mins" ) time.sleep(300) manifest_file_loaded = "False" else: ddb_key = { 'dataset_name': items[0]['dataset_name'], 'datafile_name': items[0]['datafile_name'] } STATUS = "STARTED" dynamo_interface.update_manifests_control_table_stagea( ddb_key, STATUS) manifest_file_loaded = "True" event['body']['manifest_ddb_key'] = ddb_key ### Check if Manifest threshold has exceeded if current_timestamp == manifest_interval: current_timestamp = dt.datetime.utcnow().timestamp() if int( (current_timestamp - manifest_interval) / 60) >= manifest_timeout: logger.error("Manifest Threshold Breached") raise Exception("Manifest Threshold Breached") event['body']['manifest_interval'] = manifest_interval event['body']['manifest_file_loaded'] = manifest_file_loaded except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return event
def lambda_handler(event, context): """ Process the manifest file and loads into DynamoDB Arguments: event {dict} -- Dictionary with details on previous processing step context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with outcome of the process """ s3_interface = S3Interface() stage_bucket = S3Configuration().stage_bucket dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) try: logger.info("Fetching event data from previous step") team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] peh_id = event['body']['peh_id'] env = event['body']['env'] bucket = event['body']['bucket'] manifest_file_key = event['body']['key'] manifest_file_name = manifest_file_key.split("/")[-1] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(env).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution(peh_id) ### Download the manifest file to local local_path = s3_interface.download_object(bucket, manifest_file_key) ### Process the manifest file with open(local_path, "r") as raw_file: file_names = [ file_name.strip().split("/")[-1] for file_name in raw_file ] ### Load data into manifests control table for file in file_names: item = { "dataset_name": team + "-" + dataset + "-" + manifest_file_name, "datafile_name": manifest_file_name + "-" + file } dynamo_interface.put_item_in_manifests_control_table(item) ### Set s3 path for Copy s3_path = 'pre-stage/{}/manifests/{}/{}'.format( team, dataset, manifest_file_name) kms_key = KMSConfiguration(team).get_kms_arn ### Copy Manifest File to team/manifest/dataset location s3_interface.copy_object(bucket, manifest_file_key, stage_bucket, s3_path, kms_key=kms_key) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) processed_keys = [s3_path] except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return processed_keys
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] logger.info('Initializing Octagon client') component = context.function_name.split('-')[-2].title() octagon_client = (octagon.OctagonClient().with_run_lambda( True).with_configuration_instance(event['body']['env']).build()) peh.PipelineExecutionHistoryAPI( octagon_client).retrieve_pipeline_execution( event['body']['job']['peh_id']) logger.info('Initializing DynamoDB config and Interface') dynamo_config = DynamoConfiguration() dynamo_interface = DynamoInterface(dynamo_config) logger.info('Storing metadata to DynamoDB') for key in processed_keys: object_metadata = { 'bucket': bucket, 'key': key, 'team': team, 'pipeline': pipeline, 'dataset': dataset, 'peh_id': event['body']['job']['peh_id'], 'stage': 'post-stage' } dynamo_interface.update_object_metadata_catalog(object_metadata) # Add Tables to Result Path to Enable Deequ Job table_path = "compile_topics_data_csv" tables = [table_path] # Only uncomment if using Kendra and index and data source ALREADY created # Data Sync Job # kendra_client = boto3.client('kendra') # response = kendra_client.start_data_source_sync_job( # Id='ENTER_DATASOURCE_ID', # IndexId='ENTER_INDEX_ID'' # ) # Only uncomment if a queue for the next stage exists # logger.info('Sending messages to next SQS queue if it exists') # sqs_config = SQSConfiguration(team, dataset, ''.join([stage[:-1], chr(ord(stage[-1]) + 1)])) # sqs_interface = SQSInterface(sqs_config.get_stage_queue_name) # sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset)) octagon_client.update_pipeline_execution( status="{} {} Processing".format(stage, component), component=component) octagon_client.end_pipeline_execution_success() except Exception as e: logger.error("Fatal error", exc_info=True) octagon_client.end_pipeline_execution_failed( component=component, issue_comment="{} {} Error: {}".format(stage, component, repr(e))) raise e return tables
def lambda_handler(event, context): """Updates the S3 objects metadata catalog Arguments: event {dict} -- Dictionary with details on Bucket and Keys context {dict} -- Dictionary with details on Lambda context Returns: {dict} -- Dictionary with response """ try: logger.info('Fetching event data from previous step') bucket = event['body']['bucket'] processed_keys_path = event['body']['job']['processedKeysPath'] processed_keys = S3Interface().list_objects(bucket, processed_keys_path) team = event['body']['team'] pipeline = event['body']['pipeline'] stage = event['body']['pipeline_stage'] dataset = event['body']['dataset'] # Lastly, Lets Run Comprehend Multi-Label Classification Job # with the Training Data we created earlier: # Connect to Comprehend Client comprehend_client = boto3.client('comprehend') # Set Parameters for Classifier Training Job and get KMS Key for Encryption input_key = "post-stage/{}/{}/multilabel_classification/training_data.csv".format( team, dataset) s3_input = "s3://{}/{}".format(bucket, input_key) output_key = "post-stage/{}/{}/multilabel_classification/".format( team, dataset) s3_output = "s3://{}/{}".format(bucket, output_key) kms_key = KMSConfiguration(team).get_kms_arn name = "MedicalResearchTopicClassifier" aws_account_id = context.invoked_function_arn.split(":")[4] data_access_role = 'arn:aws:iam::{}:role/sdlf-{}-{}-create-classifier-b'.format( aws_account_id, team, pipeline) # Call Multi-Label Classifier Training to Start response = comprehend_client.create_document_classifier( DocumentClassifierName=name, DataAccessRoleArn=data_access_role, Tags=[ { 'Key': 'Framework', 'Value': 'sdlf' }, ], InputDataConfig={ 'S3Uri': s3_input, 'LabelDelimiter': '|' }, OutputDataConfig={ 'S3Uri': s3_output, 'KmsKeyId': kms_key }, LanguageCode='en', VolumeKmsKeyId=kms_key, Mode='MULTI_LABEL') except Exception as e: logger.error("Fatal error", exc_info=True) raise e return 200