Пример #1
0
def animate(i):
    global data_
    global time_text_
    global box_
    global tvec_, y1_, y2_
    global cap_
    global fig_ax_

    t = float(i) / fps_
    ret, img = cap_.read()
    (x0, y0), (x1, y1) = box_
    try:
        frame = img[y0:y1, x0:x1]
    except Exception as e:
        print('[WARN] Frame %s dropped' % i)
        return lines_.values(), time_text_

    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

    if save_video_:
        fig_ax_.imshow(frame[::2, ::2], interpolation='nearest')
    else:
        cv2.imshow('image', frame)
        cv2.waitKey(1)

    inI, outI, edge, pixal = webcam.process_frame(gray)
    tvec_.append(t)
    y1_.append(edge)
    y2_.append(pixal)
    update_axis_limits(axes_['raw'], t, edge)
    update_axis_limits(axes_['raw_twin'], t, pixal)

    lines_['rawA'].set_data(tvec_, y1_)
    lines_['rawB'].set_data(tvec_, y2_)

    # update every 5 seconds.
    if i % int(fps_) == 0 and i > int(fps_) * 5:
        success = True
        data_ = np.array((tvec_, y1_, y2_)).T
        if data_ is None:
            success = False
        try:
            tA, bA = extract.find_blinks_using_edge(data_[:, :])
        except Exception as e:
            print("[WARN ] Failed to detect blink (edges). Error was %s" % e)
            success = False
        try:
            tB, bB = extract.find_blinks_using_pixals(data_[:, :])
        except Exception as e:
            print("[WARN ] Failed to detect blink (pixals). Error was %s" % e)
            success = False
        if success:
            update_axis_limits(axes_['blink'], t, 1)
            update_axis_limits(axes_['blink_twin'], t, 1)
            lines_['blinkA'].set_data(tA, 0.9 * np.ones(len(tA)))
            lines_['blinkB'].set_data(tB, np.ones(len(tB)))

    time_text_.set_text(time_template_ % t)
    return lines_.values(), time_text_
Пример #2
0
def main(args):
    # Extract video first
    data = webcam.video2csv(args)
    edgyBlinks = extract.find_blinks_using_edge(data)
    outfile = "%s_blinks_using_edges.csv" % args['video_file']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
            , header = "time,blinks")

    pixalBlinks = extract.find_blinks_using_pixals(data)
    outfile = "%s_blinks_using_pixals.csv" % args['video_file']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
            , header = "time,blinks")
Пример #3
0
def main(args):
    # Extract video first
    data = webcam.video2csv(args)
    edgyBlinks = extract.find_blinks_using_edge(data)
    outfile = "%s_blinks_using_edges.csv" % args['video_file']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
            , header = "time,blinks")

    pixalBlinks = extract.find_blinks_using_pixals(data)
    outfile = "%s_blinks_using_pixals.csv" % args['video_file']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
            , header = "time,blinks")
Пример #4
0
def main(args):
    # Extract video first
    data = webcam.video2csv(args)
    if len(data) == 0:
        print('[WARN] Could not load data. Quitting.')
        return None
    edgyBlinks = extract.find_blinks_using_edge(data)
    outfile = "%s_blinks_using_edges.csv" % args['video_device']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
            , header = "time,blinks")

    pixalBlinks = extract.find_blinks_using_pixals(data)
    outfile = "%s_blinks_using_pixals.csv" % args['video_file']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
            , header = "time,blinks")
Пример #5
0
def main(args):
    # Extract video first
    data = webcam.video2csv(args)
    if len(data) == 0:
        print('[WARN] Could not load data. Quitting.')
        return None
    edgyBlinks = extract.find_blinks_using_edge(data)
    outfile = "%s_blinks_using_edges.csv" % args['video_device']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
            , header = "time,blinks")

    pixalBlinks = extract.find_blinks_using_pixals(data)
    outfile = "%s_blinks_using_pixals.csv" % args['video_device']
    print("[INFO] Writing to outfile %s" % outfile)
    np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
            , header = "time,blinks")
Пример #6
0
def animate(i):
    global data_
    global time_text_
    global box_
    global tvec_, y1_, y2_
    global cap_
    global fig_ax_

    t = float(i) / fps_
    ret, img = cap_.read()
    (x0, y0), (x1, y1) = box_
    try:
        frame = img[y0:y1,x0:x1]
    except Exception as e:
        print('[WARN] Frame %s dropped' % i)
        return lines_.values(), time_text_

    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

    if save_video_:
        fig_ax_.imshow(frame[::2,::2], interpolation='nearest')
    else:
        cv2.imshow('image', frame)
        cv2.waitKey(1)

    inI, outI, edge, pixal = webcam.process_frame(gray)
    tvec_.append(t); y1_.append(edge); y2_.append(pixal)
    update_axis_limits(axes_['raw'], t, edge)
    update_axis_limits(axes_['raw_twin'], t, pixal)

    lines_['rawA'].set_data(tvec_, y1_)
    lines_['rawB'].set_data(tvec_, y2_)
    
    if i % int(fps_) == 0 and i > int(fps_)*5:
        data_ = np.array((tvec_, y1_, y2_)).T
        tA, bA = extract.find_blinks_using_edge(data_[:,:])
        tB, bB = extract.find_blinks_using_pixals(data_[:,:])
        update_axis_limits(axes_['blink'], t, 1)
        update_axis_limits(axes_['blink_twin'], t, 1)
        lines_['blinkA'].set_data(tA, 0.9*np.ones(len(tA)))
        lines_['blinkB'].set_data(tB, np.ones(len(tB)))

    time_text_.set_text(time_template_ % t)
    return lines_.values(), time_text_
Пример #7
0
def animate(i):
    global data_
    global time_text_
    global box_
    global tvec_, y1_, y2_
    global cap_
    global fig_ax_

    t = float(i) / fps_
    ret, img = cap_.read()
    (x0, y0), (x1, y1) = box_
    frame = img[y0:y1, x0:x1]
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

    if save_video_:
        fig_ax_.imshow(frame[::2, ::2], interpolation='nearest')
    else:
        cv2.imshow('image', frame)
        cv2.waitKey(1)

    inI, outI, edge, pixal = webcam.process_frame(gray)
    tvec_.append(t)
    y1_.append(edge)
    y2_.append(pixal)
    update_axis_limits(axes_['raw'], t, edge)
    update_axis_limits(axes_['raw_twin'], t, pixal)

    lines_['rawA'].set_data(tvec_, y1_)
    lines_['rawB'].set_data(tvec_, y2_)

    if i % int(fps_) == 0 and i > int(fps_) * 5:
        data_ = np.array((tvec_, y1_, y2_)).T
        tA, bA = extract.find_blinks_using_edge(data_[:, :])
        tB, bB = extract.find_blinks_using_pixals(data_[:, :])
        update_axis_limits(axes_['blink'], t, 1)
        update_axis_limits(axes_['blink_twin'], t, 1)
        lines_['blinkA'].set_data(tA, 0.9 * np.ones(len(tA)))
        lines_['blinkB'].set_data(tB, np.ones(len(tB)))

    time_text_.set_text(time_template_ % t)
    return lines_.values(), time_text_