Пример #1
0
            def anay_video():
                in_path = filedialog.askopenfilename(initialdir="./Cameras/" +
                                                     camera_ip + "/Videos")
                out_path = "./Cameras/" + camera_ip + "/Videos" + '/out_temp.mp4'
                json_file_path = "./Cameras/" + camera_ip + "/" + camera_ip + "_metadata.json"
                # password
                with open(json_file_path, "r") as json_file:
                    json_data = json.load(json_file)

                cap = cv2.VideoCapture(in_path)
                flag, frame = cap.read()
                cv2.imwrite('./Cameras/' + camera_ip + '/temp.jpg', frame)
                ori_reso = (frame.shape[1], frame.shape[0])
                x, _ = region_select(camera_ip)
                os.remove('./Cameras/' + camera_ip + '/temp.jpg')

                VA = VideoAnalysis(in_path,
                                   out_path,
                                   json_data['resolution'],
                                   ori_reso,
                                   30,
                                   json_data['frame_rate'],
                                   sorted_refPt=x)
                VA.mask_video()
                arr = VA.mask_rcnn_apply()
                interested_frame = VA.find_interested_frames(arr, 2)
                VA.clip_video(interested_frame)
Пример #2
0
def compute_analysis(records):
    if (len(records)) == 0:
        return
    video_analysis = VideoAnalysis(records)
    st.markdown(get_table_download_link(records), unsafe_allow_html=True)
    st.header('Registros de detecciones')
    st.dataframe(records)

    df = video_analysis.get_complete_class_counts(as_seconds=True)

    fig = px.bar(df, x='timestamp', y=df.columns, title='Cantidad de instancias detectadas durante el video')

    fig.update_layout(
        yaxis=dict(
            title=dict(text='instancias'),
        ),
        xaxis=dict(
            title=dict(text='segundos'),
            rangeslider=dict(
                visible=True
            ),
            type="linear"
        )
    )

    st.plotly_chart(
        fig)

    st.header('Buscar rangos de tiempo donde aparecen un conjunto de clases')

    single_class_time_tolerance = st.number_input('Tolerancia de separación de frames (en ms)', min_value=0, step=1000, value=1000)

    s1_col1, s1_col2 = st.beta_columns(2)

    with s1_col1:
        class_selection = st.multiselect('Selecciona las clases que deseas buscar', options=video_analysis.get_unique_classes())

    with s1_col2:    
        ranges = []
        if len(class_selection) > 0:
            ranges = video_analysis.get_timeranges_with_classes(class_selection, single_class_time_tolerance)
        
        range_table = []
        for rng in ranges:
            range_table.append({
                'Desde': rng[0] / 1000,
                'Hasta': rng[1] / 1000
            })

        st.write('Rangos de tiempo donde aparecen las clases seleccionadas (en segundos)')
        st.table(range_table)

    instance_counts_selection = {}

    st.header('Buscar rangos de tiempo donde aparecen una cantidad especifica de instancias')

    instance_count_time_tolerance = st.number_input('Tolerancia de separación de frames (en ms)', min_value=0, step=1000, value=1000, key='instance_tolerance')

    s2_col1, s2_col2 = st.beta_columns(2)

    with s2_col1:
        for classname in video_analysis.get_unique_classes():
            instance_counts_selection[classname] = st.number_input(classname, min_value=0, value=0, step=1)
        
    with s2_col2:
        clean_selections = {}
        for key in instance_counts_selection:
            if instance_counts_selection[key] > 0:
                clean_selections[key] = int(instance_counts_selection[key])
        
        ranges = []
        if len(clean_selections.keys()) > 0:
            ranges = video_analysis.get_timeranges_by_instance_counts(clean_selections, instance_count_time_tolerance)
        
        range_table = []
        for rng in ranges:
            range_table.append({
                'Desde': rng[0] / 1000,
                'Hasta': rng[1] / 1000
            })
        st.write('Rangos de tiempo donde aparecen las clases seleccionadas (en segundos)')
        st.table(range_table)
Пример #3
0
def mjpeg():
    """Video streaming route. Put this in the src attribute of an img tag."""
    return Response(VideoAnalysis(**parameters).mjpeg_generator(),
                    mimetype='multipart/x-mixed-replace; boundary=frame',
                    direct_passthrough=True)
Пример #4
0
def jpeg():
    return Response(VideoAnalysis(**parameters).request_image(),
                    mimetype='image/jpeg',
                    direct_passthrough=True)
Пример #5
0
from region_select import region_select
from tkinter import filedialog
import json

camera_ip = '[email protected]'
root_path = '/home/ke/TrafficProject/Mask-RCNN/'
in_path = filedialog.askopenfilename(initialdir=root_path + "/Cameras/" +
                                     camera_ip + "/Videos")
out_path = '/home/ke/TrafficProject/Mask-RCNN/' + "/Cameras/" + camera_ip + "/Videos" + '/out_temp.mp4'
json_file_path = root_path + "/Cameras/" + camera_ip + "/" + camera_ip + "_metadata.json"
# password
with open(json_file_path, "r") as json_file:
    json_data = json.load(json_file)

cap = cv2.VideoCapture(in_path)
flag, frame = cap.read()
cv2.imwrite('./Cameras/haha/haha.jpg', frame)
ori_reso = (frame.shape[1], frame.shape[0])
x, _ = region_select('haha')
# A = VideoAnalysis('/home/ke/Downloads/test2.mp4', '/home/ke/Downloads/13.mp4', (640, 400), ori_reso, 30, 1)
VA = VideoAnalysis(in_path,
                   out_path,
                   json_data['resolution'],
                   ori_reso,
                   30,
                   json_data['frame_rate'],
                   sorted_refPt=x)
VA.mask_video()
arr = VA.mask_rcnn_apply()
interested_frame = VA.find_interested_frames(arr, 2)
VA.clip_video(interested_frame)