Beispiel #1
0
def compute_events(data_dir, start_key, end_key, label='', output_dir=None):
    if output_dir != None:
        make_dir(output_dir)

    df_merged = pd.DataFrame()
    for csv in os.listdir(data_dir):
        ha = HeadAnnotator() 
        if not csv.find('.csv') == -1:
            fi_path = '%s/%s' % (data_dir, csv)
            df = pd.read_csv(fi_path)
            ha = HeadAnnotator() 
            event_hash, event_list = ha.annotate_events(df)
            df_p = ha.df
            og_cols = df.columns.tolist()
            fe_cols = df_p.columns.tolist()
            print event_hash
            for c in og_cols:
                if not c in fe_cols:
                    df_p[c] = df[c]
            if output_dir != None:
                sub_dir = '%s/events' % output_dir
                make_dir(sub_dir)
                # assume start key and end key event s have the same
                for i in xrange(len(event_hash[start_key])):
                    if len(event_hash[end_key]) > i:
                        start = event_hash[start_key][i]
                        end = event_hash[end_key][i]
                        df_sub = df_p.loc[start:end]
                        df_sub['original_index'] = df_sub.index
                        # add a class so the training data is 'labeled'
                        df_sub['turn_sentiment'] = label
                        print csv
                        df_sub.to_csv('%s/%s-%s.csv' % (sub_dir, csv.split('.')[0], i), index=False)
            if output_dir != None:
                df_p.to_csv('%s/%s' % (output_dir, csv), index=False)
                visualize.plot_diagnostics(df_p, ha.active_features, '%s/%s' % (output_dir,csv.split('.')[0]), y_col='noseX')
            df_merged = pd.concat([df_merged, df_p])
    return df_merged
Beispiel #2
0
def run_fusion(
        files, 
        has_camera=True, 
        has_wheel=True,
        data_direc='',
        write_results=True,
        is_move_video=True,
        is_interact=True,
        move_to_app=False,
        interactive_video='drivelog_temp.avi',
        ):
    """
    Callback function that
    runs fusion on the two data
    csv files
    """
    df = fusion.fuse_csv(files)
    if not 'timestamp_x' in df.columns.values.tolist():
        df['timestamp_x'] = df['timestamp']
    if write_results:
        df.to_csv('%s/fused.csv' % data_direc)
    if has_camera:
        ### 
        # All events that are dependent on the camera
        ### 
        head_ann = HeadAnnotator()
        head_events_hash, head_events_list =  head_ann.annotate_events(df)
        shc = SignalHeadClassifier(head_ann.df, head_ann.events)
        head_events_sentiment = shc.classify_signals()

        for i in xrange(len(head_events_list)):
            head_events_list[i] = head_events_list[i] + (head_events_sentiment[i])
            print head_events_list

    if has_wheel:
        ###
        # All events that are dependent on the steering wheel
        ###
        lane_events_hash, lane_events_list = LaneAnnotator(data_direc).annotate_events(df)

    if has_wheel and has_camera:
        slc = SignalLaneClassifier(df, lane_events_list, head_events_list, head_events_hash, head_events_sentiment)
        lane_events_sentiment = slc.classify_signals()

        for i in xrange(len(lane_events_list)):
            lane_events_list[i] = lane_events_list[i] + (lane_events_sentiment[i])

    #### Compute sentiment classifications

    # annotate the video
    print "Creating video report....."
    video_index = 'frameIndex'
    metadata_file = 'annotated_metadata.json'
    #interactive_video = "annotated_fused.avi"    

    # Created a fused video if possible
    if (is_move_video and has_camera and has_wheel):
        print head_events_list
        print lane_events_list
        final_fused_video = annotation.annotate_video(
                'drivelog_temp.avi',
                interactive_video,
                map(lambda (s, e, t, sent, reason): \
                        (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                        head_events_list),
                map(lambda (s, e, t, sent, reason): \
                        (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                        lane_events_list),
                metadata_file
                )

        move_video(final_fused_video, data_direc)
        move_video(metadata_file, data_direc)

    # Otherwise, create the two seperate ones
    else:
        if (is_move_video and has_camera):
            # I MAY HAVE BROKE THIS @chris
            print head_events_list
            
            final_head_video = annotation.annotate_video(
                    'drivelog_temp.avi', 
                    interactive_video, 
                    map(lambda (s, e, t, sent, reason): \
                            (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                            head_events_list),
                    [],
                    metadata_file
                    )

            move_video(final_head_video, data_direc)
            move_video(metadata_file, data_direc)

        elif (is_move_video and has_wheel and len(lane_events_list) > 0): 
            
            print lane_events_list
            final_lane_video = annotation.annotate_video(
                    'drivelog_temp.avi', 
                    interactive_video, 
                    [],
                    map(lambda (s, e, t, sent, reason): \
                            (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                            lane_events_list),
                    metadata_file
                    )

            move_video(final_lane_video, data_direc)
            move_video(metadata_file, data_direc)

        else:
            
            final_plain_video = annotation.annotate_video(
                'drivelog_temp.avi',
                interactive_video,
                [],
                [],
                metadata_file
                )

    # Also copy drivelog_temp
    if (is_move_video and has_camera):
        move_video('drivelog_temp.avi', data_direc)

    video_name = os.path.join(data_direc, interactive_video)
    if (move_to_app):

        # Convert video 
        convert_command = 'ffmpeg -i ' + video_name + ' ' + data_direc + '/annotated_fused.mp4'
        os.system(convert_command)
        time.sleep(1)

        # Replace most recent, and add to data dir
        shutil.rmtree('../app/static/data/recent', ignore_errors = True)
        time.sleep(1)
        shutil.copytree(data_direc, '../app/static/data/recent')
        time.sleep(1)
        dir_name = data_direc.split('/')[-1]
        shutil.copytree(data_direc, '../app/static/data/' + dir_name)

    if (has_camera and has_wheel and write_results):
        print "Plotting...."
        vis = Visualize(
                        df,
                        {
                            "head_turns": head_events_list, 
                            "lane_changes": lane_events_list,
                            "head_sentiment": head_events_sentiment,
                            "lane_sentiment": lane_events_sentiment
                        },
                        video_name=video_name,
                        data_direc=data_direc
            )
        vis.visualize(is_interact=is_interact)

    if (has_wheel and has_camera):
        return dict(
                head_events_hash=head_events_hash,
                head_events_list=head_events_list,
                lane_events_hash=lane_events_hash,
                lane_events_list=lane_events_list,
                head_events_sentiment=head_events_sentiment,
                lane_events_sentiment=lane_events_sentiment,
                df=df,
                )
    else:
        return None