def crop_station_logo_lambda_handler(event, context): """ This lambda function downloads the station logo detection results for the given frame (look up by S3 key) from DDB Crops the detected logo, saves it into S3 and update DDB with pointer to the cropped image. :param event: e.g. { "parsed": { ... }, "config": { "station_logo_check_enabled": true, ... }, "frame": { "Stream_ID": "test_1", "DateTime": "2020-02-22T22:14:53.375000Z", "Segment": "live/test_video_single_pipeline/test_1_00032.ts", "Segment_Millis": 0, "Segment_Frame_Num": 0, "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev-crop", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/22/22/14:53:375000.jpg", "Frame_Width": 1280, "Frame_Height": 720, "Resized_S3_Key": "frames/test_video_single_pipeline/test_1/resized/2020/02/22/22/14:53:375000.jpg" } } :return null """ frame_s3_bucket = event['frame']['S3_Bucket'] frame_s3_key = event['frame']['S3_Key'] frame_table_key = { 'Stream_ID': event['frame']['Stream_ID'], 'DateTime': event['frame']['DateTime'] } # download detection item = get_item_ddb(table_name=DDB_FRAME_TABLE, Key=frame_table_key, AttributesToGet=['Detected_Station_Logos']) logo_detection_results = item.get('Detected_Station_Logos', []) if logo_detection_results: logger.info(logo_detection_results[0]) bb = json.loads( json.dumps(logo_detection_results[0]['Geometry']['BoundingBox'], cls=DecimalEncoder)) name = logo_detection_results[0]['Name'] # crop image dst_s3_bucket, dst_s3_key = crop_image_from_s3(frame_s3_bucket, frame_s3_key, bb, name, dst_s3_bucket=None, dst_s3_key=None) with DDBUpdateBuilder(key=frame_table_key, table_name=DDB_FRAME_TABLE) as update_builder: update_builder.update_attr('Detected_Station_Logo_Crop_S3_KEY', dst_s3_key)
def test_ddb_update_builder_context_manager(ddb_test_table_stub): ddb_test_table_stub.add_response('update_item', {}) # asserts that ddb.Table.updates is called upon exiting the context # manager via a call to DDBUpdateBuilder. with DDBUpdateBuilder(key={'Key': 'test-key'}, table_name='test-table') as update_builder: update_builder.update_attr('test', 'some-value')
def reuse_segment_detection(reuse_segment_start_dt, segment_start_dt, stream_id, expire_ttl): """ Download the segment analysis to reuse, and copy the info to the new segment :return status summary for each check """ segment_detection_to_reuse = get_item_ddb(Key={ 'Stream_ID': stream_id, 'Start_DateTime': reuse_segment_start_dt }, table_name=DDB_FRAGMENT_TABLE) current_segment = get_item_ddb(Key={ 'Stream_ID': stream_id, 'Start_DateTime': segment_start_dt }, table_name=DDB_FRAGMENT_TABLE) with DDBUpdateBuilder( key={ 'Start_DateTime': segment_start_dt, 'Stream_ID': stream_id }, table_name=DDB_FRAGMENT_TABLE, ) as ddb_update_builder: # do not overwrite info that has already been written to the current segment entry detections = copy.deepcopy(segment_detection_to_reuse) for attr in current_segment.keys(): detections.pop(attr, None) for attr in [ key for key in detections.keys() if not key.startswith('Reused') and key != 'ExpireTTL' ]: ddb_update_builder.update_attr(attr, detections[attr]) ddb_update_builder.update_attr('Reused_Detection', True) ddb_update_builder.update_attr('ExpireTTL', expire_ttl) ddb_update_builder.update_attr( 'Reused_From', detections.get('Reused_From', reuse_segment_start_dt)) status_summary = { 'Audio_Status': segment_detection_to_reuse.get('Audio_Status', None), 'Station_Status': segment_detection_to_reuse.get('Station_Status', None), 'Team_Status': segment_detection_to_reuse.get('Team_Status', None), 'Sports_Status': segment_detection_to_reuse.get('Sports_Status', None) } logger.info(f'check status summary: {status_summary}') return status_summary
def consolidate_fragment_lambda_handler(event, context): """ Processes the gathered results from the previous Map step of the frame processing pipeline and writes the results to dynamodb :param event: example { "config": { "audio_check_enabled": true, "station_logo_check_enabled": false, "language_detect_check_enabled": true, "team_detect_check_enabled": false, "appsync_notify_enabled": true }, "parsed": { "isMasterManifest": false, "streamId": "test_1", "lastSegment": { "s3Key": "live/test_video_single_pipeline/test_1_00043.ts", "versionId": "_ey0Mw8QDjqVgpCqUuE_v8tYlUVqd2Mo", "durationSec": 5.875, "startDateTime": "2020-02-22T22:15:59.375000Z", "startTimeRelative": 254.3 }, "expectedProgram": { "Team_Info": "AVL V NOR", "Station_Logo": "Prime Video", "Stream_ID": "test_1", "Event_Title": "EPL AVL V NOR", "Event_ID": "EPL-PROG3", "Event_Type": "Sports", "End_Time": 300, "Start_Time": 180, "languageCode": "en-en", "Segment_Start_Time_In_Loop": 254.3 } }, "frames": [ { "Stream_ID": "test_1", "DateTime": "2020-02-19T22:45:14.938250Z", ... "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:14:938250.jpg" }, { "Stream_ID": "test_1", "DateTime": "2020-02-19T22:45:17.941250Z", ... "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:17:941250.jpg" } ] } :param context: lambda environment context :return: None. This step will write its results to DynamoDB """ logger.info("DDB Frame Table: %s | DDB Fragment Table: %s", DDB_FRAME_TABLE, DDB_FRAGMENT_TABLE) config = event['config'] stream_id = event['parsed']['streamId'] segment_start_dt = event['parsed']['lastSegment']['startDateTime'] # build test of checks from the enabled configs frame_checks = [station_logo_check, team_text_check, sports_check] active_configs = {k for k, v in config.items() if v} active_checks = [check for check in frame_checks if set(check.config_names).issubset(active_configs)] # test if any of the frame configs are active if not active_checks: logger.info('No active configurations to process. Exiting frame consolidation') return # build a list of attributes to retrieve from DDB from the active checks data_attributes = ', '.join({attr for check in active_checks for attr in check.ddb_attrs}) frame_data = [] # get ddb attributes for each frame for frame in event['frames']: item = get_item_ddb( Key={'Stream_ID': frame['Stream_ID'], 'DateTime': frame['DateTime']}, table_name=DDB_FRAME_TABLE, ProjectionExpression=data_attributes, ddb_client=dynamodb ) frame_data.append(item) # update ddb row with results of each check with DDBUpdateBuilder( key={'Start_DateTime': segment_start_dt, 'Stream_ID': stream_id}, table_name=DDB_FRAGMENT_TABLE, ddb_client=dynamodb ) as ddb_update_builder: # write attributes to the segment row from each check for result_name, result_data in check_processing_helper(active_checks, frame_data): ddb_update_builder.update_attr(result_name, result_data) logger.info('%d frame checks completed', len(active_checks))
def consolidate_team_data_lambda_handler(event, context): """ Processes the team data from previous steps and merge the results from text and logo detection :param event: example { "config": { "audio_check_enabled": true, "station_logo_check_enabled": false, "language_detect_check_enabled": true, "team_detect_check_enabled": false, "appsync_notify_enabled": true }, ... "frames": [ { "Stream_ID": "test_1", "DateTime": "2020-02-19T22:45:14.938250Z", ... "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:14:938250.jpg" }, { "Stream_ID": "test_1", "DateTime": "2020-02-19T22:45:17.941250Z", ... "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:17:941250.jpg" } ] } :param context: lambda environment context :return: None. This step will write its results to DynamoDB """ config = event['config'] # add the check to the active_checks list active_configs = {k for k, v in config.items() if v} team_checks_active = {k for k in active_configs if k in calculate_team_confidence.config_names} logger.info('Active team checks [%s]', team_checks_active) # test if any of the frame configs are active if not team_checks_active: logger.info('No team configurations active. Exiting frame consolidation') return # build a list of attributes to retrieve from DDB from the active checks data_attributes = ', '.join(calculate_team_confidence.ddb_attrs) # get ddb attributes for each frame for frame in event['frames']: s3_key = frame['S3_Key'] frame_key = {'Stream_ID': frame['Stream_ID'], 'DateTime': frame['DateTime']} # get stored data for the frame to process frame_data = get_item_ddb( Key=frame_key, table_name=DDB_FRAME_TABLE, ProjectionExpression=data_attributes, ddb_update_builder=dynamodb ) converted_data = convert_from_ddb(frame_data) # update ddb row with results of each check with DDBUpdateBuilder( key=frame_key, table_name=DDB_FRAME_TABLE, ddb_client=dynamodb ) as ddb_update_builder: # write attributes to the segment row from each check for result_name, result_data in consolidate_team_confidence(converted_data): ddb_update_builder.update_attr(result_name, result_data, convert_to_ddb) logger.info('Team data consolidated for frame: %s', s3_key)
def lambda_handler(event, context): """ :param event: e.g. { "parsed": {... }, "config": { "audio_check_enabled": true, "station_logo_check_enabled": true, "language_detect_check_enabled": false, "team_detect_check_enabled": true, "sports_detect_check_enabled": true }, "frame": { ... "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/10/23/42:43:646000.jpg", "Resized_S3_Key": "frames/test_video_single_pipeline/test_1/resized/2020/02/10/23/42:43:646000.jpg" } } :param context: :return: """ frame_info = event['frame'] bucket = frame_info['S3_Bucket'] key = frame_info['S3_Key'] min_confidence = int(os.getenv('SPORTS_MIN_CONFIDENCE', 60)) model_arn = os.getenv('SPORTS_MODEL_ARN') logger.info('Sports Detection for image: %s', os.path.join(bucket, key)) img_data = {'S3Object': {'Bucket': bucket, 'Name': key}} with DDBUpdateBuilder(key={ 'Stream_ID': frame_info['Stream_ID'], 'DateTime': frame_info['DateTime'] }, table_name=DDB_FRAME_TABLE) as update_builder: try: response = rekognition.detect_custom_labels( Image=img_data, MinConfidence=min_confidence, ProjectVersionArn=model_arn) except ClientError as e: logger.error('Error calling detect)sports: %s', e) update_builder.update_attr('Sports_Detect_Error', e.response['Error']['Code']) raise e else: result = response.get('CustomLabels', []) if not result: logger.info('No sports detected') else: res_out = [f'{r["Name"]}: {r["Confidence"]}' for r in result] logger.info('Sports detected: %s', json.dumps(res_out, indent=4)) # extract expected program expected_program = event['parsed']['expectedProgram'] for name, value in SportsCheck().execute(expected_program, result): logger.info("Writing to %s [%s]: %s", DDB_FRAME_TABLE, name, value) update_builder.update_attr(name, value, convert_to_ddb) return result
def lambda_handler(event, context): """ Process results from preceding steps in the workflow to determine status for each check being performed. Persist the computed status and raw data into DDB. :param event: { "s3Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "s3Key": "live/test_video_single_pipeline/test_1.m3u8", "s3VersionId": "J5c7s6IZYD9TIt.BM5Zj53ku1l6rw1M9", "config": { "audio_check_enabled": true, "station_logo_check_enabled": true, "language_detect_check_enabled": true, "team_detect_check_enabled": true, "appsync_notify_enabled": true }, "parsed": { { "isMasterManifest": false, "streamId": "test_1", "lastSegment": { "s3Key": "live/test_video_single_pipeline/test_1_00039.ts", "versionId": "ZQcYoj5uiDgaAU0lkukuHCS2zyh5NXM0", "startDateTime": "2020-01-23T21:36:35.290000Z", "durationSec": 6 }, "expectedProgram"{ ... } } "detections":{ [ { "volume": { "mean": -29.2, "max": -13.9 }, "silence_chunks": [] }, { "expected": { "lanaguageCode": "en-en", "languageName": "English" }, "dectected": { "languageCode": "en-en", "languageName": "English", "confidence": 0.8843594193458557 } }, [] ] } } :return: """ logger.info('Received event: %s', json.dumps(event, indent=2)) segment_start_dt = event['parsed']['lastSegment']['startDateTime'] stream_id = event['parsed']['streamId'] segment_relative_start_time = event['parsed']['lastSegment'][ 'startTimeRelative'] segment_start_time_in_loop = event['parsed']['expectedProgram'][ 'Segment_Start_Time_In_Loop'] segment_duration = event['parsed']['lastSegment']['durationSec'] segment_table_key = { 'Start_DateTime': segment_start_dt, 'Stream_ID': stream_id } with DDBUpdateBuilder(key=segment_table_key, table_name=DDB_FRAGMENT_TABLE) as ddb_update_builder: ddb_update_builder.update_attr( 'Start_Time_Sec', convert_float_to_dec(segment_relative_start_time)) ddb_update_builder.update_attr( 'Start_Time_Sec_In_Loop', convert_float_to_dec(segment_start_time_in_loop)) ddb_update_builder.update_attr('Finished', True) audio_on_status = process_audio_check(event, ddb_update_builder, segment_duration) station_status = get_station_logo_status(event, segment_table_key) team_status = get_team_status(event, segment_table_key) sports_status = get_sports_status(event, segment_table_key) status_summary = { 'Audio_Status': audio_on_status, 'Station_Status': station_status, 'Team_Status': team_status, 'Sports_Status': sports_status } frames = event['detections'][FRAME_RESULT] thumbnail_s3_key = frames[0]['S3_Key'] event['thumbnailKey'] = thumbnail_s3_key event['statusSummary'] = status_summary return event
def lambda_handler(event, context, logo_check=None): """ This handler invokes a rekognition custom label model to detect and classify logos detected in a still frame image. :param event: exmample { "parsed": {... }, "config": { "audio_check_enabled": true, "station_logo_check_enabled": true, "language_detect_check_enabled": false, "team_detect_check_enabled": true }, "frame": { ... "Stream_ID": "test_1", "DateTime": "2020-01-23T21:36:35.290000Z", "Chunk": "test_1_00016.ts", "Millis_In_Chunk": 0, "Frame_Num": 0, "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev", "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/01/23/21/36:35:290000.jpg" } } :param context: lambda context object """ frame_info = event['frame'] bucket = frame_info['S3_Bucket'] key = frame_info['S3_Key'] min_confidence = int(os.getenv('LOGO_MIN_CONFIDENCE', 60)) model_arn = os.getenv('LOGO_MODEL_ARN') logger.info('Logo Detection for image: %s', os.path.join(bucket, key)) img_data = {'S3Object': {'Bucket': bucket, 'Name': key}} with DDBUpdateBuilder(key={'Stream_ID': frame_info['Stream_ID'], 'DateTime': frame_info['DateTime']}, table_name=DDB_FRAME_TABLE, ddb_client=dynamodb) as update_builder: try: response = rekognition.detect_custom_labels( Image=img_data, MinConfidence=min_confidence, ProjectVersionArn=model_arn ) except ClientError as e: logger.error('Error calling detect_custom_labels: %s', e) update_builder.update_attr('Logo_Detect_Error', e.response['Error']['Code']) raise e else: result = response.get('CustomLabels', []) # extract expected program expected_program = event['parsed']['expectedProgram'] if not result: logger.info('No Logos detected') else: res_out = [f'{r["Name"]}: {r["Confidence"]}' for r in result] logger.info('Logos detected: %s', json.dumps(res_out, indent=4)) for name, value in logo_check(expected_program, result): logger.info("Writing to %s [%s]: %s", DDB_FRAME_TABLE, name, value) update_builder.update_attr(name, value, convert_to_ddb)