def reuse_segment_detection(reuse_segment_start_dt, segment_start_dt,
                            stream_id, expire_ttl):
    """
    Download the segment analysis to reuse, and copy the info to the new segment
    :return status summary for each check
    """
    segment_detection_to_reuse = get_item_ddb(Key={
        'Stream_ID':
        stream_id,
        'Start_DateTime':
        reuse_segment_start_dt
    },
                                              table_name=DDB_FRAGMENT_TABLE)
    current_segment = get_item_ddb(Key={
        'Stream_ID': stream_id,
        'Start_DateTime': segment_start_dt
    },
                                   table_name=DDB_FRAGMENT_TABLE)

    with DDBUpdateBuilder(
            key={
                'Start_DateTime': segment_start_dt,
                'Stream_ID': stream_id
            },
            table_name=DDB_FRAGMENT_TABLE,
    ) as ddb_update_builder:
        # do not overwrite info that has already been written to the current segment entry
        detections = copy.deepcopy(segment_detection_to_reuse)
        for attr in current_segment.keys():
            detections.pop(attr, None)
        for attr in [
                key for key in detections.keys()
                if not key.startswith('Reused') and key != 'ExpireTTL'
        ]:
            ddb_update_builder.update_attr(attr, detections[attr])
        ddb_update_builder.update_attr('Reused_Detection', True)
        ddb_update_builder.update_attr('ExpireTTL', expire_ttl)
        ddb_update_builder.update_attr(
            'Reused_From', detections.get('Reused_From',
                                          reuse_segment_start_dt))

    status_summary = {
        'Audio_Status': segment_detection_to_reuse.get('Audio_Status', None),
        'Station_Status':
        segment_detection_to_reuse.get('Station_Status', None),
        'Team_Status': segment_detection_to_reuse.get('Team_Status', None),
        'Sports_Status': segment_detection_to_reuse.get('Sports_Status', None)
    }
    logger.info(f'check status summary: {status_summary}')
    return status_summary
def crop_station_logo_lambda_handler(event, context):
    """
    This lambda function downloads the station logo detection results for the given frame (look up by S3 key) from DDB
    Crops the detected logo, saves it into S3 and update DDB with pointer to the cropped image.

    :param event: e.g.
    {
      "parsed": {
        ...
      },
      "config": {
        "station_logo_check_enabled": true,
        ...
      },
      "frame": {
        "Stream_ID": "test_1",
        "DateTime": "2020-02-22T22:14:53.375000Z",
        "Segment": "live/test_video_single_pipeline/test_1_00032.ts",
        "Segment_Millis": 0,
        "Segment_Frame_Num": 0,
        "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev-crop",
        "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/22/22/14:53:375000.jpg",
        "Frame_Width": 1280,
        "Frame_Height": 720,
        "Resized_S3_Key": "frames/test_video_single_pipeline/test_1/resized/2020/02/22/22/14:53:375000.jpg"
      }
    }
    :return null
    """
    frame_s3_bucket = event['frame']['S3_Bucket']
    frame_s3_key = event['frame']['S3_Key']
    frame_table_key = {
        'Stream_ID': event['frame']['Stream_ID'],
        'DateTime': event['frame']['DateTime']
    }
    # download detection
    item = get_item_ddb(table_name=DDB_FRAME_TABLE,
                        Key=frame_table_key,
                        AttributesToGet=['Detected_Station_Logos'])

    logo_detection_results = item.get('Detected_Station_Logos', [])
    if logo_detection_results:
        logger.info(logo_detection_results[0])
        bb = json.loads(
            json.dumps(logo_detection_results[0]['Geometry']['BoundingBox'],
                       cls=DecimalEncoder))
        name = logo_detection_results[0]['Name']
        # crop image
        dst_s3_bucket, dst_s3_key = crop_image_from_s3(frame_s3_bucket,
                                                       frame_s3_key,
                                                       bb,
                                                       name,
                                                       dst_s3_bucket=None,
                                                       dst_s3_key=None)

        with DDBUpdateBuilder(key=frame_table_key,
                              table_name=DDB_FRAME_TABLE) as update_builder:
            update_builder.update_attr('Detected_Station_Logo_Crop_S3_KEY',
                                       dst_s3_key)
def consolidate_fragment_lambda_handler(event, context):
    """
    Processes the gathered results from the previous Map step of the frame processing pipeline
    and writes the results to dynamodb

    :param event: example
    {
      "config": {
        "audio_check_enabled": true,
        "station_logo_check_enabled": false,
        "language_detect_check_enabled": true,
        "team_detect_check_enabled": false,
        "appsync_notify_enabled": true
      },
      "parsed": {
        "isMasterManifest": false,
        "streamId": "test_1",
        "lastSegment": {
          "s3Key": "live/test_video_single_pipeline/test_1_00043.ts",
          "versionId": "_ey0Mw8QDjqVgpCqUuE_v8tYlUVqd2Mo",
          "durationSec": 5.875,
          "startDateTime": "2020-02-22T22:15:59.375000Z",
          "startTimeRelative": 254.3
        },
        "expectedProgram": {
          "Team_Info": "AVL V NOR",
          "Station_Logo": "Prime Video",
          "Stream_ID": "test_1",
          "Event_Title": "EPL AVL V NOR",
          "Event_ID": "EPL-PROG3",
          "Event_Type": "Sports",
          "End_Time": 300,
          "Start_Time": 180,
          "languageCode": "en-en",
          "Segment_Start_Time_In_Loop": 254.3
        }
      },
      "frames": [
        {
          "Stream_ID": "test_1",
          "DateTime": "2020-02-19T22:45:14.938250Z",
            ...
          "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev",
          "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:14:938250.jpg"
        },
        {
          "Stream_ID": "test_1",
          "DateTime": "2020-02-19T22:45:17.941250Z",
            ...
          "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev",
          "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:17:941250.jpg"
        }
      ]
    }
    :param context: lambda environment context
    :return: None.  This step will write its results to DynamoDB
    """
    logger.info("DDB Frame Table: %s | DDB Fragment Table: %s", DDB_FRAME_TABLE, DDB_FRAGMENT_TABLE)
    config = event['config']
    stream_id = event['parsed']['streamId']
    segment_start_dt = event['parsed']['lastSegment']['startDateTime']

    # build test of checks from the enabled configs

    frame_checks = [station_logo_check, team_text_check, sports_check]

    active_configs = {k for k, v in config.items() if v}
    active_checks = [check for check in frame_checks if set(check.config_names).issubset(active_configs)]

    # test if any of the frame configs are active
    if not active_checks:
        logger.info('No active configurations to process.  Exiting frame consolidation')
        return

    # build a list of attributes to retrieve from DDB from the active checks
    data_attributes = ', '.join({attr for check in active_checks for attr in check.ddb_attrs})
    frame_data = []

    # get ddb attributes for each frame
    for frame in event['frames']:
        item = get_item_ddb(
            Key={'Stream_ID': frame['Stream_ID'], 'DateTime': frame['DateTime']},
            table_name=DDB_FRAME_TABLE,
            ProjectionExpression=data_attributes,
            ddb_client=dynamodb
        )

        frame_data.append(item)

    # update ddb row with results of each check
    with DDBUpdateBuilder(
            key={'Start_DateTime': segment_start_dt, 'Stream_ID': stream_id},
            table_name=DDB_FRAGMENT_TABLE,
            ddb_client=dynamodb
    ) as ddb_update_builder:
        # write attributes to the segment row from each check
        for result_name, result_data in check_processing_helper(active_checks, frame_data):
            ddb_update_builder.update_attr(result_name, result_data)

    logger.info('%d frame checks completed', len(active_checks))
def consolidate_team_data_lambda_handler(event, context):
    """
    Processes the team data from previous steps and merge the results from text and
    logo detection

    :param event: example
    {
      "config": {
        "audio_check_enabled": true,
        "station_logo_check_enabled": false,
        "language_detect_check_enabled": true,
        "team_detect_check_enabled": false,
        "appsync_notify_enabled": true
      },
        ...
      "frames": [
        {
          "Stream_ID": "test_1",
          "DateTime": "2020-02-19T22:45:14.938250Z",
            ...
          "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev",
          "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:14:938250.jpg"
        },
        {
          "Stream_ID": "test_1",
          "DateTime": "2020-02-19T22:45:17.941250Z",
            ...
          "S3_Bucket": "aws-rnd-broadcast-maas-video-processing-dev",
          "S3_Key": "frames/test_video_single_pipeline/test_1/original/2020/02/19/22/45:17:941250.jpg"
        }
      ]
    }
    :param context: lambda environment context
    :return: None.  This step will write its results to DynamoDB
    """
    config = event['config']

    # add the check to the active_checks list
    active_configs = {k for k, v in config.items() if v}
    team_checks_active = {k for k in active_configs if k in calculate_team_confidence.config_names}

    logger.info('Active team checks [%s]', team_checks_active)

    # test if any of the frame configs are active
    if not team_checks_active:
        logger.info('No team configurations active. Exiting frame consolidation')
        return

    # build a list of attributes to retrieve from DDB from the active checks
    data_attributes = ', '.join(calculate_team_confidence.ddb_attrs)

    # get ddb attributes for each frame
    for frame in event['frames']:
        s3_key = frame['S3_Key']
        frame_key = {'Stream_ID': frame['Stream_ID'], 'DateTime': frame['DateTime']}
        # get stored data for the frame to process
        frame_data = get_item_ddb(
            Key=frame_key,
            table_name=DDB_FRAME_TABLE,
            ProjectionExpression=data_attributes,
            ddb_update_builder=dynamodb
        )

        converted_data = convert_from_ddb(frame_data)

        # update ddb row with results of each check
        with DDBUpdateBuilder(
            key=frame_key,
            table_name=DDB_FRAME_TABLE,
            ddb_client=dynamodb
        ) as ddb_update_builder:
            # write attributes to the segment row from each check
            for result_name, result_data in consolidate_team_confidence(converted_data):
                ddb_update_builder.update_attr(result_name, result_data, convert_to_ddb)

        logger.info('Team data consolidated for frame: %s', s3_key)
示例#5
0
def get_sports_status(event, segment_table_key):
    # TODO: modify the state machine so this result get passed from previous processing to save a call to DDB
    item = get_item_ddb(table_name=DDB_FRAGMENT_TABLE,
                        Key=segment_table_key,
                        AttributesToGet=['Sports_Status'])
    return item.get('Sports_Status', None)