Example #1
0
def main(session_dir, 
         build_name = None,
         interactive_video = None):

    # df = pd.read_csv(path_to_test_video + '/fused.csv')
    print build_name
    if (build_name == None):
        files = [ session_dir + '/WHEEL.csv',
                  session_dir + '/CAMERA.csv',
                  ]
    else:
        files = [ session_dir + '/WHEEL.csv',
                  session_dir + '/CAMERA-' + build_name + '.csv',
                  ]    

    analysis_results = runner.run_fusion(
            files, 
            has_camera=True,
            has_wheel=True,
            data_direc=session_dir,
            is_interact=True,
            is_move_video=False,
            interactive_video=interactive_video,
            )
Example #2
0
def run_single_test(
        case_name,
        build_name, 
        results_list_frames,
        results_list_events, 
        event_types = [
            'left_turn', 
            'right_turn', 
            'left_lane_change', 
            'right_lane_change',
            ],
        annotation_file = 'annotation_josh.txt', 
        testing_dir='test_suite/new_test_cases/'):
    
    # modify event types to include sentiment:
    event_types = reduce(
            lambda x,y: x + y,
            map(
                lambda x: [x[0], x[0] + '_bad', x[0] + '_good'], 
                map(lambda x: [x], event_types)
                )
            )

    print case_name
    print annotation_file 
    # Read everything you need
    path_to_test_video = testing_dir + case_name 
    # df = pd.read_csv(path_to_test_video + '/fused.csv')
    print build_name
    if (build_name == None):
        files = [ path_to_test_video + '/WHEEL.csv',
                  path_to_test_video + '/CAMERA.csv',
                  ]
    else:
        files = [ path_to_test_video + '/WHEEL.csv',
                  path_to_test_video + '/CAMERA-' + build_name + '.csv',
                  ]    

    analysis_results = runner.run_fusion(
            files, 
            has_camera=True,
            has_wheel=True,
            data_direc=path_to_test_video,
            is_interact=False,
            is_move_video=False,
            )
    df = analysis_results['df']
    # Use frame index from the test results
    max_index = max(df['frameIndex'])
    baseline = eval(open(testing_dir + case_name + '/' + annotation_file, 'r').read())

    # Declare storage for annotated frames
    zero_frames = { 
            key : np.zeros(max_index, dtype=np.int8) \
                    for key in event_types
                    }

    ####
    # Frames will have either a 0 or 1 at each index
    # 0 indicates that no event is either annotated or predicted
    #   at the index
    # 1 indicates that an event is predicted or annotated
    #   at the index

    # The results from algorithm annotation
    event_frames = copy.deepcopy(zero_frames)
    # The results from human annotations 
    annotation_frames =  copy.deepcopy(zero_frames)
    
    # For each event, mark in the baseline, called annotation_frames
    for i in xrange(len(baseline)):
        start = baseline[i]['start']
        end = baseline[i]['end']
        event_type = baseline[i]['type']
        is_good = baseline[i]['is_good']
        annotation_frames[event_type][start:end] = 1
        ## Lets add a new column for just general sentiment
        annotation_frames[map_sentiment(event_type, is_good)][start:end] = 1

    # Use the annotation code to generate an event list
    head_events_list = analysis_results['head_events_list']
    lane_events_list = analysis_results['lane_events_list']
    head_event_sentiment_list = analysis_results['head_events_sentiment']
    lane_event_sentiment_list = analysis_results['lane_events_sentiment']

    predicted_events_list = head_events_list + lane_events_list
    sentiment_events_list = head_event_sentiment_list + lane_event_sentiment_list 
    
    # Create the prediction events: event_frames
    for i in xrange(len(predicted_events_list)):
        start = int(df.iloc[predicted_events_list[i][0]]['frameIndex'])
        end = int(df.iloc[predicted_events_list[i][1]]['frameIndex'])
        event_type = predicted_events_list[i][2]
        is_good = sentiment_events_list[i][0]
        event_frames[event_type][start:end] += 1
        event_frames[map_sentiment(event_type,is_good)][start:end] += 1

    # Perform a by-frame calculation
    event_summaries=dict()
    for event in event_types:
        # indices where prediction of the event was made
        predicted_frame_index = np.where(event_frames[event] == 1)[0]
        # indices where prediction of the event was not made
        not_predicted_frame_index = np.where(event_frames[event] == 0)[0]
        
        annotations = annotation_frames[event]
        predictions = event_frames[event]

        event_summaries['wrong_count_%s' % event] = \
            sum((annotations + predictions) == 1) 
        event_summaries['tp_count_%s' % event] = \
            sum(annotations[predicted_frame_index] == 1)
        event_summaries['fp_count_%s' % event] = \
            sum(annotations[predicted_frame_index] == 0)
        event_summaries['tn_count_%s' % event] = \
            sum(annotations[not_predicted_frame_index] == 0)
        event_summaries['fn_count_%s' % event] = \
            sum(annotations[not_predicted_frame_index] == 1)

    # Perform a by-event calculation 
    event_results = create_event_dict(event_types, ['marked', 'given', 'correct'])
    
    # Check each predicted value for a match
    for i in xrange(len(predicted_events_list)):
        start = int(df.iloc[predicted_events_list[i][0]]['frameIndex'])
        end = int(df.iloc[predicted_events_list[i][1]]['frameIndex'])
        event_type = predicted_events_list[i][2]
        is_good = sentiment_events_list[i][0]

        # Check if this event overlaps with any annotated test
        (event_pass, sentiment_pass) = is_a_pass(start, end, event_type, is_good, baseline)

        event_results[map_sentiment(event_type, is_good)]['marked'] += 1
        event_results[map_sentiment(event_type, is_good)]['correct'] += int(sentiment_pass)
        event_results[event_type]['marked'] += 1
        event_results[event_type]['correct'] += int(event_pass)

    # Also sum up the values in baseline, used for recall
    for i in xrange(len(baseline)):
        event_type = baseline[i]['type']
        is_good = baseline[i]['is_good']

        event_results[map_sentiment(event_type, is_good)]['given'] += 1
        event_results[event_type]['given'] += 1

    test_results_events = dict(
        case_name=case_name,
        annotation_file=annotation_file,
        )

    test_results_frames = dict(
        case_name=case_name,
        annotation_file=annotation_file,
        )

    for event in event_types:
        
        # Frame based metrics
        wrong_count_key = 'wrong_count_%s' % event
        tp_count_key = 'tp_count_%s' % event
        fp_count_key = 'fp_count_%s' % event
        fn_count_key = 'fn_count_%s' % event
        wrong_count = event_summaries[wrong_count_key]
        tp_count = event_summaries[tp_count_key]
        fp_count = event_summaries[fp_count_key]
        fn_count = event_summaries[fn_count_key]
        test_results_frames[event] = round(1 - float(wrong_count) / max_index, 3)
        test_results_frames['%s_precision' % event] = \
                round(tp_count / float(max(tp_count + fp_count,1)), 3)
        test_results_frames['%s_recall' % event] = \
                round(tp_count / float(max(tp_count + fn_count,1)), 3)
        
        # Event precision based metrics
        if (event_results[event]['marked'] == 0):
            test_results_events['%s_event_precision' % event] = None
        else:
            test_results_events['%s_event_precision' % event] = \
                float(event_results[event]['correct']) / float(event_results[event]['marked'])

        # Event recall based metrics
        if (event_results[event]['given'] == 0):
            test_results_events['%s_event_recall' % event] = None
        else:
            test_results_events['%s_event_recall' % event] = \
                float(event_results[event]['correct']) / float(event_results[event]['given'])

    results_list_events.append(test_results_events)
    results_list_frames.append(test_results_frames)