Пример #1
0
            def anay_video():
                in_path = filedialog.askopenfilename(initialdir="./Cameras/" +
                                                     camera_ip + "/Videos")
                out_path = "./Cameras/" + camera_ip + "/Videos" + '/out_temp.mp4'
                json_file_path = "./Cameras/" + camera_ip + "/" + camera_ip + "_metadata.json"
                # password
                with open(json_file_path, "r") as json_file:
                    json_data = json.load(json_file)

                cap = cv2.VideoCapture(in_path)
                flag, frame = cap.read()
                cv2.imwrite('./Cameras/' + camera_ip + '/temp.jpg', frame)
                ori_reso = (frame.shape[1], frame.shape[0])
                x, _ = region_select(camera_ip)
                os.remove('./Cameras/' + camera_ip + '/temp.jpg')

                VA = VideoAnalysis(in_path,
                                   out_path,
                                   json_data['resolution'],
                                   ori_reso,
                                   30,
                                   json_data['frame_rate'],
                                   sorted_refPt=x)
                VA.mask_video()
                arr = VA.mask_rcnn_apply()
                interested_frame = VA.find_interested_frames(arr, 2)
                VA.clip_video(interested_frame)
    def run(video_id, video):
        '''
        Main function for running all feature extraction steps.
        TODO: move this to "__main__"

        ARGS:
            video_id: unique video identifier
            video: path to video file (mp4 format)
        RETURNS:
            dataframe with frame-by-frame analysis
        '''

        ## Go through every frame in the video and extract features
        cap = cv2.VideoCapture(video)
        frame = None

        ## Output dataframes
        video_df = pd.DataFrame()
        img_quality_df = pd.DataFrame()

        num_frames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        #num_frames = 3
        debug = 0
        if debug == 0:
            i_frame = 0
            while i_frame < num_frames:
                ## Progress logging
                if i_frame % 100 == 0:
                	print i_frame

                if frame is not None:
                    prev_frame = frame.copy()
                else:
                    prev_frame = None

                ret,frame = cap.read()
                ## If there are no more frames, break out of loop
                if ret == False:
                    break

                frame_series = pd.DataFrame(index=[0])
                frame_series['video_id'] = video_id
                frame_series['frame_number'] = i_frame
                frame_series['time'] = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.

                ##=======================
                ## Image quality metrics
                ##=======================
                ## Blur
                blur_df = SceneAnalysis.get_blur(frame)

                ## Color Spectrum
                color_df = SceneAnalysis.get_hsv_hists(frame)

                img_quality_df = img_quality_df.append(pd.concat([frame_series, blur_df, color_df], axis=1), ignore_index=True)
                #img_quality_df = img_quality_df.append(frame_series)

                ##=======================
                ## Video/motion metrics
                ##=======================
                ## Optical Flow
                if prev_frame is not None:
                    flow_df = VideoUtilities.optical_flow_on_frame(frame, prev_frame)
                else:
                    flow_df = None

                ## Merge results into single dataframe
                #video_df = video_df.append(pd.concat([frame_series, blur_df, color_df, flow_df], axis=1, ignore_index=True))
                #video_df = video_df.append(frame_series)
                #cat['video_id'] = video_id

                #cat = pd.concat([blur_df, color_df, flow_df], axis=1)
                if flow_df is not None:
                    flow_series = flow_df.copy()
                    ##NOTE: the below code is duplicated above - bad style
                    flow_series['video_id'] = video_id
                    flow_series['frame_number'] = i_frame #cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES-1)
                    flow_series['time'] = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.
                    #cat = pd.concat([frame_series, cat], axis=1)
                    video_df = video_df.append(flow_series, ignore_index=True)

                i_frame += 1
        else:
            img_quality_df = pd.read_pickle(video_id + '.img_quality.pkl')

        ## After completing the frame-by-frame analysis, run video metrics
        ## scene changes
        scene_change_df = VideoAnalysis.detect_cut(video, img_quality_df['time'])
        img_quality_df['is_scene_transition'] = scene_change_df['is_scene_transition'].copy()

        ## Pickle model and save it to S3 or local directory
        img_quality_df.to_pickle(video_id + '.img_quality.pkl')
        video_df.to_pickle(video_id + '.flow.pkl')
        return img_quality_df, video_df
                    flow_series = flow_df.copy()
                    ##NOTE: the below code is duplicated above - bad style
                    flow_series['video_id'] = video_id
                    flow_series['frame_number'] = i_frame #cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES-1)
                    flow_series['time'] = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.
                    #cat = pd.concat([frame_series, cat], axis=1)
                    video_df = video_df.append(flow_series, ignore_index=True)

                i_frame += 1
        else:
            img_quality_df = pd.read_pickle(video_id + '.img_quality.pkl')

        ## After completing the frame-by-frame analysis, run video metrics
        ## scene changes
        scene_change_df = VideoAnalysis.detect_cut(video, img_quality_df['time'])
        img_quality_df['is_scene_transition'] = scene_change_df['is_scene_transition'].copy()

        ## Pickle model and save it to S3 or local directory
        img_quality_df.to_pickle(video_id + '.img_quality.pkl')
        video_df.to_pickle(video_id + '.flow.pkl')
        return img_quality_df, video_df

if __name__ == "__main__":
    video = "../media/CKeLfaOl0Qk.mp4"
    video_df = pd.read_pickle('CKeLfaOl0Qk.img_quality.pkl')
    video_series = video_df['time']
    scene_change_df = VideoAnalysis.detect_cut(video, video_series)
    #print scene_change_df[scene_change_df['is_scene_transition'] == 1]
    video_df['is_scene_transition'] = scene_change_df['is_scene_transition'].copy()
    print video_df[video_df['is_scene_transition'] == 1]
Пример #4
0
    def run(video_id, video):
        '''
        Main function for running all feature extraction steps.
        TODO: move this to "__main__"

        ARGS:
            video_id: unique video identifier
            video: path to video file (mp4 format)
        RETURNS:
            dataframe with frame-by-frame analysis
        '''

        ## Go through every frame in the video and extract features
        cap = cv2.VideoCapture(video)
        frame = None

        ## Output dataframes
        video_df = pd.DataFrame()
        img_quality_df = pd.DataFrame()

        num_frames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        #num_frames = 3
        debug = 0
        if debug == 0:
            i_frame = 0
            while i_frame < num_frames:
                ## Progress logging
                if i_frame % 100 == 0:
                    print i_frame

                if frame is not None:
                    prev_frame = frame.copy()
                else:
                    prev_frame = None

                ret, frame = cap.read()
                ## If there are no more frames, break out of loop
                if ret == False:
                    break

                frame_series = pd.DataFrame(index=[0])
                frame_series['video_id'] = video_id
                frame_series['frame_number'] = i_frame
                frame_series['time'] = cap.get(
                    cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.

                ##=======================
                ## Image quality metrics
                ##=======================
                ## Blur
                blur_df = SceneAnalysis.get_blur(frame)

                ## Color Spectrum
                color_df = SceneAnalysis.get_hsv_hists(frame)

                img_quality_df = img_quality_df.append(pd.concat(
                    [frame_series, blur_df, color_df], axis=1),
                                                       ignore_index=True)
                #img_quality_df = img_quality_df.append(frame_series)

                ##=======================
                ## Video/motion metrics
                ##=======================
                ## Optical Flow
                if prev_frame is not None:
                    flow_df = VideoUtilities.optical_flow_on_frame(
                        frame, prev_frame)
                else:
                    flow_df = None

                ## Merge results into single dataframe
                #video_df = video_df.append(pd.concat([frame_series, blur_df, color_df, flow_df], axis=1, ignore_index=True))
                #video_df = video_df.append(frame_series)
                #cat['video_id'] = video_id

                #cat = pd.concat([blur_df, color_df, flow_df], axis=1)
                if flow_df is not None:
                    flow_series = flow_df.copy()
                    ##NOTE: the below code is duplicated above - bad style
                    flow_series['video_id'] = video_id
                    flow_series[
                        'frame_number'] = i_frame  #cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES-1)
                    flow_series['time'] = cap.get(
                        cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.
                    #cat = pd.concat([frame_series, cat], axis=1)
                    video_df = video_df.append(flow_series, ignore_index=True)

                i_frame += 1
        else:
            img_quality_df = pd.read_pickle(video_id + '.img_quality.pkl')

        ## After completing the frame-by-frame analysis, run video metrics
        ## scene changes
        scene_change_df = VideoAnalysis.detect_cut(video,
                                                   img_quality_df['time'])
        img_quality_df['is_scene_transition'] = scene_change_df[
            'is_scene_transition'].copy()

        ## Pickle model and save it to S3 or local directory
        img_quality_df.to_pickle(video_id + '.img_quality.pkl')
        video_df.to_pickle(video_id + '.flow.pkl')
        return img_quality_df, video_df
Пример #5
0
                        cv2.cv.CV_CAP_PROP_POS_MSEC) / 1000.
                    #cat = pd.concat([frame_series, cat], axis=1)
                    video_df = video_df.append(flow_series, ignore_index=True)

                i_frame += 1
        else:
            img_quality_df = pd.read_pickle(video_id + '.img_quality.pkl')

        ## After completing the frame-by-frame analysis, run video metrics
        ## scene changes
        scene_change_df = VideoAnalysis.detect_cut(video,
                                                   img_quality_df['time'])
        img_quality_df['is_scene_transition'] = scene_change_df[
            'is_scene_transition'].copy()

        ## Pickle model and save it to S3 or local directory
        img_quality_df.to_pickle(video_id + '.img_quality.pkl')
        video_df.to_pickle(video_id + '.flow.pkl')
        return img_quality_df, video_df


if __name__ == "__main__":
    video = "../media/CKeLfaOl0Qk.mp4"
    video_df = pd.read_pickle('CKeLfaOl0Qk.img_quality.pkl')
    video_series = video_df['time']
    scene_change_df = VideoAnalysis.detect_cut(video, video_series)
    #print scene_change_df[scene_change_df['is_scene_transition'] == 1]
    video_df['is_scene_transition'] = scene_change_df[
        'is_scene_transition'].copy()
    print video_df[video_df['is_scene_transition'] == 1]
Пример #6
0
def compute_analysis(records):
    if (len(records)) == 0:
        return
    video_analysis = VideoAnalysis(records)
    st.markdown(get_table_download_link(records), unsafe_allow_html=True)
    st.header('Registros de detecciones')
    st.dataframe(records)

    df = video_analysis.get_complete_class_counts(as_seconds=True)

    fig = px.bar(df, x='timestamp', y=df.columns, title='Cantidad de instancias detectadas durante el video')

    fig.update_layout(
        yaxis=dict(
            title=dict(text='instancias'),
        ),
        xaxis=dict(
            title=dict(text='segundos'),
            rangeslider=dict(
                visible=True
            ),
            type="linear"
        )
    )

    st.plotly_chart(
        fig)

    st.header('Buscar rangos de tiempo donde aparecen un conjunto de clases')

    single_class_time_tolerance = st.number_input('Tolerancia de separación de frames (en ms)', min_value=0, step=1000, value=1000)

    s1_col1, s1_col2 = st.beta_columns(2)

    with s1_col1:
        class_selection = st.multiselect('Selecciona las clases que deseas buscar', options=video_analysis.get_unique_classes())

    with s1_col2:    
        ranges = []
        if len(class_selection) > 0:
            ranges = video_analysis.get_timeranges_with_classes(class_selection, single_class_time_tolerance)
        
        range_table = []
        for rng in ranges:
            range_table.append({
                'Desde': rng[0] / 1000,
                'Hasta': rng[1] / 1000
            })

        st.write('Rangos de tiempo donde aparecen las clases seleccionadas (en segundos)')
        st.table(range_table)

    instance_counts_selection = {}

    st.header('Buscar rangos de tiempo donde aparecen una cantidad especifica de instancias')

    instance_count_time_tolerance = st.number_input('Tolerancia de separación de frames (en ms)', min_value=0, step=1000, value=1000, key='instance_tolerance')

    s2_col1, s2_col2 = st.beta_columns(2)

    with s2_col1:
        for classname in video_analysis.get_unique_classes():
            instance_counts_selection[classname] = st.number_input(classname, min_value=0, value=0, step=1)
        
    with s2_col2:
        clean_selections = {}
        for key in instance_counts_selection:
            if instance_counts_selection[key] > 0:
                clean_selections[key] = int(instance_counts_selection[key])
        
        ranges = []
        if len(clean_selections.keys()) > 0:
            ranges = video_analysis.get_timeranges_by_instance_counts(clean_selections, instance_count_time_tolerance)
        
        range_table = []
        for rng in ranges:
            range_table.append({
                'Desde': rng[0] / 1000,
                'Hasta': rng[1] / 1000
            })
        st.write('Rangos de tiempo donde aparecen las clases seleccionadas (en segundos)')
        st.table(range_table)
Пример #7
0
def jpeg():
    return Response(VideoAnalysis(**parameters).request_image(),
                    mimetype='image/jpeg',
                    direct_passthrough=True)
Пример #8
0
def mjpeg():
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(VideoAnalysis(**parameters).mjpeg_generator(),
                    mimetype='multipart/x-mixed-replace; boundary=frame',
                    direct_passthrough=True)
Пример #9
0
from region_select import region_select
from tkinter import filedialog
import json

camera_ip = '[email protected]'
root_path = '/home/ke/TrafficProject/Mask-RCNN/'
in_path = filedialog.askopenfilename(initialdir=root_path + "/Cameras/" +
                                     camera_ip + "/Videos")
out_path = '/home/ke/TrafficProject/Mask-RCNN/' + "/Cameras/" + camera_ip + "/Videos" + '/out_temp.mp4'
json_file_path = root_path + "/Cameras/" + camera_ip + "/" + camera_ip + "_metadata.json"
# password
with open(json_file_path, "r") as json_file:
    json_data = json.load(json_file)

cap = cv2.VideoCapture(in_path)
flag, frame = cap.read()
cv2.imwrite('./Cameras/haha/haha.jpg', frame)
ori_reso = (frame.shape[1], frame.shape[0])
x, _ = region_select('haha')
# A = VideoAnalysis('/home/ke/Downloads/test2.mp4', '/home/ke/Downloads/13.mp4', (640, 400), ori_reso, 30, 1)
VA = VideoAnalysis(in_path,
                   out_path,
                   json_data['resolution'],
                   ori_reso,
                   30,
                   json_data['frame_rate'],
                   sorted_refPt=x)
VA.mask_video()
arr = VA.mask_rcnn_apply()
interested_frame = VA.find_interested_frames(arr, 2)
VA.clip_video(interested_frame)