Esempio n. 1
0
def get_crop_roi(vid):
    ret, frame = vid.read()
    if ret == False:
        Exception("No Frame from first video!")
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    roi = cv2.selectROI(frame)
    frame = imops.crop(frame, roi)
    cv2.destroyAllWindows()
    return roi, frame
Esempio n. 2
0
def run_mp(file, params, data_dir, n_proc=7):
    vid = cv2.VideoCapture(file)
    total_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))

    # Create queues
    task_queue = mp.Queue()
    done_queue = mp.Queue()

    # start processes
    procs = []
    for i in range(n_proc):
        procs.append(
            mp.Process(target=workers.frame_worker2,
                       args=(task_queue, done_queue, params, i + 1)))
        procs[-1].start()

    # and the grabber
    #grabber = mp.Process(target=workers.result_grabber, args=(done_queue,))
    #grabber.start()

    for i in trange(total_frames, position=0):
        ret, frame = vid.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = imops.crop(frame, params['roi'])

        n_frame = vid.get(cv2.CAP_PROP_POS_FRAMES)

        #task_queue.put((frame, n_frame), block=True, timeout=60)
        task_queue.put((frame, n_frame), block=False)
        # try:
        #     results.append(done_queue.get())
        # except:
        #     pass
        # if i%100==0:
        #     print(len(results))

    results = []
    for i in range(n_proc):
        results.append(done_queue.get())
Esempio n. 3
0
def run(files, params, data_dir):
    thetas = np.linspace(0, np.pi * 2, num=200, endpoint=False)
    # loop through videos...
    pool = mp.Pool(8)

    for fn in tqdm(files, total=len(files), position=0):

        # open video, get params, make basic objects
        vid = cv2.VideoCapture(fn)
        total_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
        #frame_counter = count()

        # appending to lists is actually pretty fast in python when dealing w/ uncertain quantities
        # store ellipse parameters here, rejoin into a pandas dataframe at the end
        # x_list = [] # x position of ellipse center
        # y_list = [] # y position of ellipse center
        # a_list = [] # major axis (enforced when lists are combined - fitutils.clean_lists)
        # b_list = [] # minor axis ("")
        # t_list = [] # theta, angle of a from x axis, radians, increasing counterclockwise
        # n_list = [] # frame number
        # v_list = [] # mean value of points contained within ellipse
        # c_list = [] # coverage - n_points/perimeter
        # g_list = [] # gradient magnitude of edge points

        results = []
        for i in trange(total_frames, position=1):
            ret, frame = vid.read()
            if ret == False:
                # ret aka "if return == true"
                # aka didn't return a frame
                # so
                # yno
                # we got ta take a
                break

            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame = imops.crop(frame, params['roi'])

            n_frame = vid.get(cv2.CAP_PROP_POS_FRAMES)

            results.append(
                pool.apply_async(runops.process_frame_all,
                                 args=(frame, params, n_frame)))

            #
            # # Chew up a frame, return a list of ellipses
            # try:
            #     ellipses, frame_preproc, edge_mag = runops.process_frame(frame, params)
            # except TypeError:
            #     # if doesn't fit any ellipses, will return a single None, which will throw a
            #     # typeerror because it tries to unpack None into the three values above...
            #     continue
            #
            # for e, n_pts in ellipses:
            #     # ellipses actually gets returned as a tuple (ellipse object, n_pts)
            #     if not e:
            #         continue
            #
            #     x_list.append(e.params[0])
            #     y_list.append(e.params[1])
            #     a_list.append(e.params[2])
            #     b_list.append(e.params[3])
            #     t_list.append(e.params[4])
            #     n_list.append(n_frame)
            #
            #     # get mean darkness within each ellipse
            #     # TODO: Validate - make sure we're getting the right shit here.
            #     ell_mask_y, ell_mask_x = draw.ellipse(e.params[0], e.params[1], e.params[2], e.params[3],
            #                                           shape=(frame_preproc.shape[1], frame_preproc.shape[0]),
            #                                           rotation=e.params[4])
            #     v_list.append(np.mean(frame_preproc[ell_mask_x, ell_mask_y]))
            #
            #
            #
            #     # coverage - number of points vs. circumference
            #     # perim: https://stackoverflow.com/a/42311034
            #     perimeter = np.pi * (3 * (e.params[2] + e.params[3]) -
            #                          np.sqrt((3 * e.params[2] + e.params[3]) *
            #                         (e.params[2] + 3 * e.params[3])))
            #
            #     c_list.append(float(n_pts)/perimeter)
            #
            #     # get the mean edge mag for predicted points on the ellipse,
            #     # off-target ellipses often go through the pupil aka through areas with low gradients...
            #     e_points = np.round(e.predict_xy(thetas)).astype(np.int)
            #     e_points[:,0] = np.clip(e_points[:,0], 0, frame_preproc.shape[0]-1)
            #     e_points[:, 1] = np.clip(e_points[:,1], 0, frame_preproc.shape[1]-1)
            #     g_list.append(np.mean(edge_mag[e_points[:,0], e_points[:,1]]))

        got_results = []
        for r in tqdm(results, total=total_frames, position=2):
            got_results.append(r.get())

        flat_results = got_results[0]
        for i in got_results[1:]:
            for k, v in i.items():
                flat_results[k].extend(v)

        df = pd.DataFrame.from_dict(flat_results)
        vid_name = os.path.basename(fn).rsplit('.', 1)[0]
        save_fn = os.path.join(data_dir, "Ellall_" + vid_name + ".csv")
        df.to_csv(save_fn)
Esempio n. 4
0
def run_shitty():
    vid_file = '/home/lab/pupil_vids/nick3.avi'
    # run once we get good params
    # remake video object to restart from frame 0
    vid = cv2.VideoCapture(vid_file)
    total_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
    roi = (725, 529, 523, 334)
    sig_cutoff = 0.7
    sig_gain = 10
    canny_sig = 4.07
    canny_high = 1.17
    canny_low = 0.3
    # pmod = model.Pupil_Model(ix, iy, rad)

    # cv2.namedWindow('run', flags=cv2.WINDOW_NORMAL)
    # fig, ax = plt.subplots(4,1)
    thetas = np.linspace(0, np.pi * 2, num=100, endpoint=False)
    frame_counter = count()

    # frame_params = np.ndarray(shape=(0, 7))
    x_list = []
    y_list = []
    a_list = []
    b_list = []
    t_list = []
    n_list = []
    v_list = []

    starttime = time()
    for i in range(100):
        k = cv2.waitKey(1) & 0xFF
        if k == ord('\r'):
            break

        ret, frame_orig = vid.read()
        if ret == False:
            break
        n_frame = frame_counter.next()
        frame_orig = cv2.cvtColor(frame_orig, cv2.COLOR_BGR2GRAY)
        frame_orig = imops.crop(frame_orig, roi)

        frame = imops.preprocess_image(frame_orig, params['roi'],
                                       sig_cutoff=params['sig_cutoff'],
                                       sig_gain=params['sig_gain'])

        edges = scharr_canny(frame, sigma=params['canny_sig'],
                                   high_threshold=params['canny_high'], low_threshold=params['canny_low'])

        edges_rep = repair_edges(edges, frame)

        ellipses = [imops.fit_ellipse(e) for e in edges_rep]
        # ell_pts = np.ndarray(shape=(0, 2))
        for e in ellipses:
            if not e:
                continue

            x_list.append(e.params[0])
            y_list.append(e.params[1])
            a_list.append(e.params[2])
            b_list.append(e.params[3])
            t_list.append(e.params[4])
            n_list.append(n_frame)
            # get mean darkness
            ell_mask_y, ell_mask_x = draw.ellipse(e.params[0], e.params[1], e.params[2], e.params[3],
                                                  shape=(frame.shape[1], frame.shape[0]),
                                                  rotation=e.params[4])

            v_list.append(np.mean(frame[ell_mask_x, ell_mask_y]))
Esempio n. 5
0
    #     #points_up = points+1
    #     #points_down = points-1
    #     #points = np.concatenate((points, points_up, points_down), axis=0)
    # try:
    #     e_points = np.row_stack(e_points)
    # except ValueError:
    #     continue
    p = ell_smooth.iloc[n_frame]
    e_points = emod.predict_xy(thetas, params=(p.x, p.y, p.a, p.b, p.t))
    e_points = e_points.astype(np.int)





    frame_orig = imops.crop(frame_orig, roi)
    frame_orig = img_as_float(frame_orig)

    draw.set_color(frame_orig, (e_points[:, 0], e_points[:, 1]), (1, 0, 0))
    cv2.imshow('run', frame_orig)

    #frame_orig = frame_orig*255
    #frame_orig = frame_orig.astype(np.uint8)

    #writer.writeFrame(frame_orig)



#writer.close()
cv2.destroyAllWindows()
Esempio n. 6
0
def set_params(files):
    # cycle through files..
    filecyc = cycle(files)
    # start with the first video
    vid = cv2.VideoCapture(filecyc.next())

    # crop roi (x, y, width, height)
    roi, frame = get_crop_roi(vid)

    # draw pupil (sry its circular)
    ix, iy, rad = draw_pupil(frame)

    # initial values, empirically set.
    # have to use ints with cv2's windows, we'll convert later
    sig_cutoff = 50
    sig_gain = 5
    canny_sig = 200
    canny_high = 50
    canny_low = 10
    closing_rad = 3

    cv2.namedWindow('params', flags=cv2.WINDOW_NORMAL)
    cv2.createTrackbar('Sigmoid Cutoff', 'params', sig_cutoff, 100,
                       imops.nothing)
    cv2.createTrackbar('Sigmoid Gain', 'params', sig_gain, 20, imops.nothing)
    cv2.createTrackbar('Gaussian Blur', 'params', canny_sig, 700,
                       imops.nothing)
    cv2.createTrackbar('Canny High Threshold', 'params', canny_high, 300,
                       imops.nothing)
    cv2.createTrackbar('Canny Low Threshold', 'params', canny_low, 300,
                       imops.nothing)
    cv2.createTrackbar('Closing Radius', 'params', closing_rad, 10,
                       imops.nothing)

    while True:
        k = cv2.waitKey(1) & 0xFF
        if k == ord('\r'):
            break

        ret, frame_orig = vid.read()
        if ret == False:
            # cycle to the next video (or restart) and skip this iter of param setting
            vid = cv2.VideoCapture(filecyc.next())
            continue
        frame = frame_orig.copy()

        sig_cutoff = cv2.getTrackbarPos('Sigmoid Cutoff', 'params')
        sig_gain = cv2.getTrackbarPos('Sigmoid Gain', 'params')
        canny_sig = cv2.getTrackbarPos('Gaussian Blur', 'params')
        canny_high = cv2.getTrackbarPos('Canny High Threshold', 'params')
        canny_low = cv2.getTrackbarPos('Canny Low Threshold', 'params')
        closing_rad = cv2.getTrackbarPos('Closing Radius', 'params')

        # see i toldya it would be fine to have ints...
        sig_cutoff = sig_cutoff / 100.
        canny_sig = canny_sig / 100.
        canny_high = canny_high / 100.
        canny_low = canny_low / 100.

        frame = imops.preprocess_image(frame,
                                       roi,
                                       sig_cutoff=sig_cutoff,
                                       sig_gain=sig_gain,
                                       closing=closing_rad)
        edges_params = imops.scharr_canny(frame,
                                          sigma=canny_sig,
                                          high_threshold=canny_high,
                                          low_threshold=canny_low)

        # TODO: Also respect grayscale param here
        frame_orig = cv2.cvtColor(frame_orig, cv2.COLOR_BGR2GRAY)
        frame_orig = imops.crop(frame_orig, roi)
        frame_orig = img_as_float(frame_orig)

        cv2.imshow('params', np.vstack([frame_orig, frame, edges_params]))
    cv2.destroyAllWindows()

    # collect parameters
    params = {
        "sig_cutoff": sig_cutoff,
        "sig_gain": sig_gain,
        "canny_sig": canny_sig,
        "canny_high": canny_high,
        "canny_low": canny_low,
        "mask": {
            'x': ix,
            'y': iy,
            'r': rad
        },
        "files": files,
        "roi": roi,
        "shape": (roi[3], roi[2]),
    }

    return params
Esempio n. 7
0
def video_from_params(param_fn, ell_fn, which_vid=0):
    thetas = np.linspace(0, np.pi * 2, num=300, endpoint=False)

    # load params from .json file, vid filenames will be in there
    with open(param_fn, 'r') as param_f:
        params = json.load(param_f)

    # for now just do one video
    vid_fn = str(params['files'][which_vid])

    vid = cv2.VideoCapture(vid_fn)
    total_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))

    ell_df = pd.read_csv(ell_fn)

    vid_path, vid_name = os.path.split(vid_fn)
    vid_name = "Ellone_" + vid_name.rsplit('.', 1)[0] + ".mp4"
    vid_out_fn = vid_path + "/" + vid_name

    writer = io.FFmpegWriter(vid_out_fn, outputdict={'-vcodec': 'libx264'})

    emod = measure.EllipseModel()

    ell_frame = ell_df.groupby('n')

    for i in trange(total_frames):

        ret, frame = vid.read()
        if ret == False:
            break
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = imops.crop(frame, params['roi'])
        frame = img_as_float(frame)

        try:
            ell_rows = ell_frame.get_group(i)

            for i, e in ell_rows.iterrows():
                e_points = emod.predict_xy(thetas,
                                           params=(e.x, e.y, e.a, e.b, e.t))
                e_points = e_points.astype(np.int)

                draw.set_color(frame, (e_points[:, 0], e_points[:, 1]),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0] + 1, e_points[:, 1]),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0] - 1, e_points[:, 1]),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0], e_points[:, 1] + 1),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0], e_points[:, 1] - 1),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0] + 1, e_points[:, 1] + 1),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0] + 1, e_points[:, 1] - 1),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0] - 1, e_points[:, 1] + 1),
                               (1, 0, 0))
                draw.set_color(frame, (e_points[:, 0] - 1, e_points[:, 1] - 1),
                               (1, 0, 0))

        except KeyError:
            # no ellipses this frame, just write frame
            pass

        writer.writeFrame(frame * 255)

    writer.close()
Esempio n. 8
0
def play_fit(vid, roi, params, fps=30):
    thetas = np.linspace(0, np.pi * 2, num=200, endpoint=False)

    # start vid at first frame in params
    if "n" in params.keys():

        first_frame = params.n.min()
    else:
        first_frame = params.index.min()

    ret = vid.set(cv2.CAP_PROP_POS_FRAMES, first_frame)

    frame_counter = count()

    emod = measure.EllipseModel()

    cv2.namedWindow('play', flags=cv2.WINDOW_NORMAL)
    for i in xrange(len(params)):
        k = cv2.waitKey(1) & 0xFF
        if k == ord('\r'):
            break

        ret, frame_orig = vid.read()
        if ret == False:
            break
        frame_orig = cv2.cvtColor(frame_orig, cv2.COLOR_BGR2RGB)

        n_frame = frame_counter.next()

        if 'n' in params.keys():
            ell_rows = params[params.n == n_frame]
            frame_orig = imops.crop(frame_orig, roi)
            frame_orig = img_as_float(frame_orig)
            for i, e in ell_rows.iterrows():
                e_points = emod.predict_xy(thetas,
                                           params=(e.x, e.y, e.a, e.b, e.t))
                e_points = e_points.astype(np.int)

                draw.set_color(frame_orig, (e_points[:, 0], e_points[:, 1]),
                               (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0] + 1, e_points[:, 1]), (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0] - 1, e_points[:, 1]), (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0], e_points[:, 1] + 1), (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0], e_points[:, 1] - 1), (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0] + 1, e_points[:, 1] + 1),
                               (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0] + 1, e_points[:, 1] - 1),
                               (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0] - 1, e_points[:, 1] + 1),
                               (1, 0, 0))
                draw.set_color(frame_orig,
                               (e_points[:, 0] - 1, e_points[:, 1] - 1),
                               (1, 0, 0))

        else:
            p = params.iloc[n_frame]
            e_points = emod.predict_xy(thetas,
                                       params=(p.x, p.y, p.a, p.b, p.t))
            e_points = e_points.astype(np.int)

            frame_orig = imops.crop(frame_orig, roi)
            frame_orig = img_as_float(frame_orig)

            draw.set_color(frame_orig, (e_points[:, 0], e_points[:, 1]),
                           (1, 0, 0))
            draw.set_color(frame_orig, (e_points[:, 0] + 1, e_points[:, 1]),
                           (1, 0, 0))
            draw.set_color(frame_orig, (e_points[:, 0] - 1, e_points[:, 1]),
                           (1, 0, 0))
            draw.set_color(frame_orig, (e_points[:, 0], e_points[:, 1] + 1),
                           (1, 0, 0))
            draw.set_color(frame_orig, (e_points[:, 0], e_points[:, 1] - 1),
                           (1, 0, 0))
            draw.set_color(frame_orig,
                           (e_points[:, 0] + 1, e_points[:, 1] + 1), (1, 0, 0))
            draw.set_color(frame_orig,
                           (e_points[:, 0] + 1, e_points[:, 1] - 1), (1, 0, 0))
            draw.set_color(frame_orig,
                           (e_points[:, 0] - 1, e_points[:, 1] + 1), (1, 0, 0))
            draw.set_color(frame_orig,
                           (e_points[:, 0] - 1, e_points[:, 1] - 1), (1, 0, 0))
        cv2.imshow('play', frame_orig)
        sleep(1. / fps)

        # frame_orig = frame_orig*255
        # frame_orig = frame_orig.astype(np.uint8)

        # writer.writeFrame(frame_orig)

    cv2.destroyAllWindows()