示例#1
0
def find_eyelids(eye_img, debug_index):

    u_eyelid = find_upper_eyelid(eye_img, debug_index)
    l_eyelid = find_lower_eyelid(eye_img, debug_index)

    if debug_index == 2:
        debug_img_1 = stack_imgs_horizontal([__debug_imgs_upper[1], __debug_imgs_lower[1]])
        debug_img_2 = stack_imgs_horizontal([__debug_imgs_upper[2], __debug_imgs_lower[2]])
        full_debug_img = stack_imgs_vertical([debug_img_1, debug_img_2])
        cv2.imshow(__winname, full_debug_img)

    return u_eyelid, l_eyelid
示例#2
0
def find_eyelids(eye_img, debug_index):

    u_eyelid = find_upper_eyelid(eye_img, debug_index)
    l_eyelid = find_lower_eyelid(eye_img, debug_index)

    if debug_index == 2:
        debug_img_1 = stack_imgs_horizontal(
            [__debug_imgs_upper[1], __debug_imgs_lower[1]])
        debug_img_2 = stack_imgs_horizontal(
            [__debug_imgs_upper[2], __debug_imgs_lower[2]])
        full_debug_img = stack_imgs_vertical([debug_img_1, debug_img_2])
        cv2.imshow(__winname, full_debug_img)

    return u_eyelid, l_eyelid
def find_pupil(
    eye_img_bgr, fast_width_grads=25.5, fast_width_iso=80, weight_grads=0.9, weight_iso=0.1, debug_index=False
):

    eye_img_r = cv2.split(eye_img_bgr)[2]

    fast_size_grads = (int((fast_width_grads / eye_img_bgr.shape[0]) * eye_img_bgr.shape[1]), int(fast_width_grads))
    fast_img_grads = cv2.resize(eye_img_r, fast_size_grads)

    fast_size_iso = (int(fast_width_iso), int((fast_width_iso / eye_img_r.shape[1]) * eye_img_r.shape[0]))
    fast_img_iso = cv2.resize(eye_img_r, fast_size_iso)

    c_map_grads = eye_center_locator_gradients.get_center_map(fast_img_grads)
    c_map_iso = eye_center_locator_isophote.get_center_map(fast_img_iso)

    c_map_norm_grads = cv2.normalize(c_map_grads, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
    c_map_big_grads = cv2.resize(c_map_norm_grads, (eye_img_bgr.shape[1], eye_img_bgr.shape[0])).astype(np.uint8)

    c_map_norm_iso = cv2.normalize(c_map_iso, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
    c_map_big_iso = cv2.resize(c_map_norm_iso, (eye_img_bgr.shape[1], eye_img_bgr.shape[0])).astype(np.uint8)

    joint_c_map = cv2.addWeighted(c_map_big_grads, w_grads, c_map_big_iso, w_iso, 1.0)

    max_val_index = np.argmax(joint_c_map)
    pupil_y0, pupil_x0 = max_val_index // joint_c_map.shape[1], max_val_index % joint_c_map.shape[1]

    max_val_index_2 = np.argmax(c_map_big_grads)
    pupil_y0_2, pupil_x0_2 = max_val_index_2 // joint_c_map.shape[1], max_val_index_2 % joint_c_map.shape[1]

    max_val_index_3 = np.argmax(c_map_big_iso)
    pupil_y0_3, pupil_x0_3 = max_val_index_3 // joint_c_map.shape[1], max_val_index_3 % joint_c_map.shape[1]

    if debug_index:

        debug_img = eye_img_bgr.copy()

        joint_c_map = cv2.cvtColor(joint_c_map, cv2.COLOR_GRAY2BGR)
        c_map_big_iso = cv2.cvtColor(c_map_big_iso, cv2.COLOR_GRAY2BGR)
        c_map_big_grads = cv2.cvtColor(c_map_big_grads, cv2.COLOR_GRAY2BGR)

        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255), 16, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0_3, pupil_y0_3), (255, 0, 255), 8, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0_2, pupil_y0_2), (255, 0, 255), 8, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0, pupil_y0), (255, 0, 0), 16, 2)

        draw_utils.draw_cross(c_map_big_iso, (pupil_x0_3, pupil_y0_3), (255, 0, 0), 16, 2)
        draw_utils.draw_cross(c_map_big_grads, (pupil_x0_2, pupil_y0_2), (255, 0, 0), 16, 2)

        stacked_imgs = image_utils.stack_imgs_horizontal([debug_img, c_map_big_grads, c_map_big_iso, joint_c_map])
        __debug_imgs[debug_index] = stacked_imgs

        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical([__debug_imgs[1], __debug_imgs[2]])
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs)

    return pupil_x0, pupil_y0
def find_pupil(eye_img_bgr, debug_index=False):

    eye_img_r = cv2.cvtColor(eye_img_bgr, cv2.COLOR_BGR2GRAY)

    # Scale to small image for faster computation
    scale = __fast_width / eye_img_r.shape[1]
    small_size = (int(__fast_width),
                  int((__fast_width / eye_img_r.shape[1]) *
                      eye_img_r.shape[0]))
    eye_img_small = cv2.resize(eye_img_r, small_size)
    eye_img_small = cv2.GaussianBlur(eye_img_small, (3, 3), 0)

    center_map = get_center_map(eye_img_small)

    max_val_index = np.argmax(center_map)
    pupil_y0, pupil_x0 = max_val_index // center_map.shape[
        1], max_val_index % center_map.shape[1]

    # Scale back to original coordinates
    pupil_y0, pupil_x0 = int((pupil_y0 + 0.5) / scale), int(
        (pupil_x0 + 0.5) / scale)

    if debug_index:

        eye_img_r_debug = cv2.cvtColor(eye_img_r, cv2.COLOR_GRAY2BGR)
        debug_img = eye_img_bgr.copy()

        cmap_norm = cv2.normalize(center_map,
                                  0,
                                  255,
                                  norm_type=cv2.NORM_MINMAX).astype(np.uint8)
        center_map_big = cv2.resize(
            cmap_norm,
            (eye_img_r.shape[1], eye_img_r.shape[0])).astype(np.uint8)
        center_map_big = cv2.cvtColor(center_map_big, cv2.COLOR_GRAY2BGR)

        overlay_img = cv2.addWeighted(center_map_big, 0.9, eye_img_r_debug,
                                      0.1, 1)

        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255),
                              6)
        draw_utils.draw_cross(overlay_img, (pupil_x0, pupil_y0), (255, 0, 0),
                              6)

        # stacked_small_size = image_utils.stack_imgs_vertical([eye_img_small, cmap_norm])
        stacked_imgs = image_utils.stack_imgs_horizontal(
            [debug_img, overlay_img])
        __debug_imgs[debug_index] = stacked_imgs

        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical(
                [__debug_imgs[1], __debug_imgs[2]])
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs)

    return pupil_x0, pupil_y0
示例#5
0
def find_pupil(eye_img_bgr, debug_index=False):
    """ Estimates the centre of the pupil using image gradients
    """

    eye_img_r = cv2.split(eye_img_bgr)[2]  # Extract red channel only

    # Scale to small image for faster computation
    scale = __fast_width / eye_img_bgr.shape[0]
    small_size = (int(
        (__fast_width / eye_img_bgr.shape[0]) * eye_img_bgr.shape[1]),
                  int(__fast_width))
    eye_img_small = cv2.resize(eye_img_r, small_size)

    center_map = get_center_map(eye_img_small)

    max_val_index = np.argmax(center_map)
    pupil_y0, pupil_x0 = max_val_index // center_map.shape[
        1], max_val_index % center_map.shape[1]

    # Scale back to original coordinates
    pupil_y0, pupil_x0 = int((pupil_y0 + 0.5) / scale), int(
        (pupil_x0 + 0.5) / scale)

    if debug_index:

        eye_img_r_debug = cv2.cvtColor(eye_img_r, cv2.COLOR_GRAY2BGR)
        debug_img = eye_img_bgr.copy()
        cmap_norm = cv2.normalize(center_map,
                                  alpha=0,
                                  beta=255,
                                  norm_type=cv2.NORM_MINMAX)
        center_map_big = cv2.resize(
            cmap_norm, (eye_img_bgr.shape[1], eye_img_bgr.shape[0]))
        center_map_big = cv2.cvtColor(center_map_big.astype(np.uint8),
                                      cv2.COLOR_GRAY2BGR)
        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255),
                              6)
        draw_utils.draw_cross(center_map_big, (pupil_x0, pupil_y0),
                              (255, 0, 0), 6)

        stacked_imgs = image_utils.stack_imgs_horizontal(
            [debug_img, eye_img_r_debug, center_map_big])
        __debug_imgs[debug_index] = stacked_imgs

        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical(
                [__debug_imgs[1], __debug_imgs[2]])
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs)

    return pupil_x0, pupil_y0
def find_pupil(eye_img_bgr, debug_index=False):
    
    eye_img_r = cv2.cvtColor(eye_img_bgr, cv2.COLOR_BGR2GRAY)
    
    # Scale to small image for faster computation
    scale = __fast_width / eye_img_r.shape[1]
    small_size = (int(__fast_width), int((__fast_width / eye_img_r.shape[1]) * eye_img_r.shape[0]))
    eye_img_small = cv2.resize(eye_img_r, small_size)
    eye_img_small = cv2.GaussianBlur(eye_img_small, (3, 3), 0)
    
    center_map = get_center_map(eye_img_small)
    
    max_val_index = np.argmax(center_map)
    pupil_y0, pupil_x0 = max_val_index // center_map.shape[1], max_val_index % center_map.shape[1]
    
    # Scale back to original coordinates
    pupil_y0, pupil_x0 = int((pupil_y0 + 0.5) / scale), int((pupil_x0 + 0.5) / scale)
    
    if debug_index:
        
        eye_img_r_debug = cv2.cvtColor(eye_img_r, cv2.COLOR_GRAY2BGR)
        debug_img = eye_img_bgr.copy()
        
        cmap_norm = cv2.normalize(center_map, 0, 255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)
        center_map_big = cv2.resize(cmap_norm, (eye_img_r.shape[1], eye_img_r.shape[0])).astype(np.uint8)
        center_map_big = cv2.cvtColor(center_map_big, cv2.COLOR_GRAY2BGR)
        
        overlay_img = cv2.addWeighted(center_map_big, 0.9, eye_img_r_debug, 0.1, 1)
                
        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255), 6)
        draw_utils.draw_cross(overlay_img, (pupil_x0, pupil_y0), (255, 0, 0), 6)
        
        # stacked_small_size = image_utils.stack_imgs_vertical([eye_img_small, cmap_norm])
        stacked_imgs = image_utils.stack_imgs_horizontal([debug_img, overlay_img])
        __debug_imgs[debug_index] = stacked_imgs
        
        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical([__debug_imgs[1], __debug_imgs[2]]);
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs);

    return pupil_x0, pupil_y0
def find_pupil(eye_img_bgr, debug_index=False):
    
    """ Estimates the centre of the pupil using image gradients
    """

    eye_img_r = cv2.split(eye_img_bgr)[2]   # Extract red channel only
    
    # Scale to small image for faster computation
    scale = __fast_width / eye_img_bgr.shape[0]
    small_size = (int((__fast_width / eye_img_bgr.shape[0]) * eye_img_bgr.shape[1]), int(__fast_width))
    eye_img_small = cv2.resize(eye_img_r, small_size)
    
    center_map = get_center_map(eye_img_small)
    
    max_val_index = np.argmax(center_map)
    pupil_y0, pupil_x0 = max_val_index // center_map.shape[1], max_val_index % center_map.shape[1]
    
    # Scale back to original coordinates
    pupil_y0, pupil_x0 = int((pupil_y0 + 0.5) / scale), int((pupil_x0 + 0.5) / scale)
    
    if debug_index:
        
        eye_img_r_debug = cv2.cvtColor(eye_img_r, cv2.COLOR_GRAY2BGR)
        debug_img = eye_img_bgr.copy()
        cmap_norm = cv2.normalize(center_map, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
        center_map_big = cv2.resize(cmap_norm, (eye_img_bgr.shape[1], eye_img_bgr.shape[0]))
        center_map_big = cv2.cvtColor(center_map_big.astype(np.uint8), cv2.COLOR_GRAY2BGR)
        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255), 6)
        draw_utils.draw_cross(center_map_big, (pupil_x0, pupil_y0), (255, 0, 0), 6)
        
        stacked_imgs = image_utils.stack_imgs_horizontal([debug_img, eye_img_r_debug, center_map_big])
        __debug_imgs[debug_index] = stacked_imgs
        
        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical([__debug_imgs[1], __debug_imgs[2]]);
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs);
            
    return pupil_x0, pupil_y0
示例#8
0
def get_limb_pts(eye_img, phi=20, angle_step=1, debug_index=False):

    polar_img_w = 360 / angle_step  # Polar image has one column per angle of interest
    phi_range_1 = ((90 - phi) / angle_step, (90 + phi) / angle_step
                   )  # Ranges of angles to be ignored (too close to lids)
    phi_range_2 = ((270 - phi) / angle_step, (270 + phi) / angle_step)

    eye_img_grey = cv2.cvtColor(eye_img, cv2.COLOR_BGR2GRAY)  # Do BGR-grey
    eye_img_grey = cv2.medianBlur(eye_img_grey, 5)

    # Scale to fixed size image for re-using transform matrix
    scale = eye_img.shape[0] / float(__fixed_width)
    img_fixed_size = cv2.resize(eye_img_grey, (__fixed_width, __fixed_width))

    # Transform image into polar coords and blur
    img_polar = linpolar(img_fixed_size,
                         trans_w=polar_img_w,
                         trans_h=__fixed_width / 2)
    img_polar = cv2.GaussianBlur(img_polar, (5, 5), 0)

    # Take the segment between min & max radii and filter with Gabor kernel
    img_polar_seg = img_polar[__min_limb_r:__max_limb_r, :]
    filter_img = cv2.filter2D(img_polar_seg, -1, __gabor_kern)

    # Black out ignored angles
    filter_img.T[phi_range_1[0]:phi_range_1[1]] = 0
    filter_img.T[phi_range_2[0]:phi_range_2[1]] = 0

    # In polar image, x <-> theta, y <-> magnitude
    pol_ys = np.argmax(filter_img,
                       axis=0)  # Take highest filter response as limbus points
    pol_xs = np.arange(filter_img.shape[1])[pol_ys > 0]
    mags = (pol_ys + __min_limb_r)[pol_ys > 0]
    thts = np.radians(pol_xs * angle_step)

    # Translate each point back into fixed img coords
    xs, ys = cv2.polarToCart(mags.astype(float), thts)
    xs = (
        xs + __fixed_width / 2
    ) * scale  # Shift and scale cart. coords back to original eye-ROI coords
    ys = (ys + __fixed_width / 2) * scale

    # Points returned in form
    #    [[ x1   y1]
    #     [ x2   y2]
    #         ...
    #     [ xn   yn]]
    pts_cart = np.concatenate([xs, ys], axis=1)

    # --------------------- Debug Drawing ---------------------
    if debug_index != False:
        debug_img = eye_img.copy()
        debug_polar = cv2.cvtColor(img_polar, cv2.COLOR_GRAY2BGR)

        cv2.imwrite("polar.jpg", debug_polar)

        cv2.line(debug_polar, (0, __min_limb_r),
                 (img_polar.shape[1], __min_limb_r), (255, 255, 0))
        cv2.line(debug_polar, (0, __max_limb_r),
                 (img_polar.shape[1], __max_limb_r), (255, 255, 0))
        cv2.circle(debug_img, (debug_img.shape[1] / 2, debug_img.shape[0] / 2),
                   int(debug_img.shape[0] * __limb_r_ratios[0]), (255, 255, 0))
        cv2.circle(debug_img, (debug_img.shape[1] / 2, debug_img.shape[0] / 2),
                   int(debug_img.shape[0] * __limb_r_ratios[1]), (255, 255, 0))

        pts_polar = np.squeeze(np.dstack([pol_xs, mags]))
        draw_points(debug_polar, pts_polar, (0, 0, 255), width=1)
        draw_points(debug_img, pts_cart, (0, 0, 255), width=1)

        stacked_imgs_polar = stack_imgs_vertical([debug_polar, filter_img])
        stacked_imgs = stack_imgs_horizontal(
            [debug_img, eye_img_grey, stacked_imgs_polar])

        __debug_imgs[debug_index] = stacked_imgs

        if debug_index == 2:
            full_debug_img = stack_imgs_vertical(
                [__debug_imgs[1], __debug_imgs[2]])
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs)
    # --------------------- Debug Drawing ---------------------

    return pts_cart
示例#9
0
def find_lower_eyelid(eye_img, debug_index):

    line_y_offset = 0  # Amount to shift eye-lid by after detection

    img_blue = cv2.split(eye_img)[2]
    img_w, img_h = eye_img.shape[:2]

    # Indexes to extract window sub-images
    w_y1, w_y2 = int(img_h * __l_win_rats_h[0]), int(img_h *
                                                     sum(__l_win_rats_h[:2]))
    wl_x1, wl_x2 = int(img_w * __l_win_rats_w_l[0]), int(
        img_w * sum(__l_win_rats_w_l[:2]))
    wr_x1, wr_x2 = int(img_w * __l_win_rats_w_r[0]), int(
        img_w * sum(__l_win_rats_w_r[:2]))

    # Split image into two halves
    window_img_l = img_blue[w_y1:w_y2, wl_x1:wl_x2]
    window_img_r = img_blue[w_y1:w_y2, wr_x1:wr_x2]
    window_img_l = cv2.GaussianBlur(window_img_l, (5, 5), 20)
    window_img_r = cv2.GaussianBlur(window_img_r, (5, 5), 20)

    filter_img_l = cv2.filter2D(window_img_l, -1, __gabor_kern_diag)
    filter_img_r = cv2.filter2D(window_img_r, -1,
                                cv2.flip(__gabor_kern_diag, 1))
    filter_img = np.concatenate([filter_img_l, filter_img_r], axis=1)

    # In polar image, x <-> theta, y <-> magnitude
    max_vals = np.max(filter_img, axis=0)
    ys = np.argmax(filter_img,
                   axis=0)  # Take highest filter response as limbus points
    xs = (np.arange(filter_img.shape[1]) + wl_x1)[max_vals > __min_thresh]
    ys = (ys + w_y1)[max_vals > __min_thresh]

    l_lid_pts = np.squeeze(np.dstack([xs, ys]), axis=0)

    # Only RANSAC fit eyelid if there are enough points
    if l_lid_pts.size < __min_num_pts_u * 2:
        eyelid_lower_line = None
    else:
        eyelid_lower_line = ransac_line(l_lid_pts)

    if eyelid_lower_line is not None:
        a, b = eyelid_lower_line
        b = b + line_y_offset
        eyelid_lower_line = a, b

    if debug_index:
        debug_img = eye_img.copy()

        filter_img = cv2.cvtColor(filter_img, cv2.COLOR_GRAY2BGR)

        if l_lid_pts.size > 2:
            draw_points(debug_img, l_lid_pts, (0, 0, 255), 1, 2)

        if eyelid_lower_line is not None:
            cv2.line(debug_img, (0, int(b)), (img_w, int(a * img_w + b)),
                     (0, 255, 0))

        window_img = np.concatenate([window_img_l, window_img_r], axis=1)
        stacked_windows = stack_imgs_vertical([window_img, filter_img])
        stacked_imgs = stack_imgs_horizontal([stacked_windows, debug_img])
        __debug_imgs_lower[debug_index] = stacked_imgs

        if debug_index > 2:
            cv2.imshow(__winname + repr(debug_index) + "l", stacked_imgs)

    return eyelid_lower_line
示例#10
0
def find_upper_eyelid(eye_img, debug_index):

    u_2_win_rats_w = [0.0, 1.0, 0.0]  # Margins around ROI windows
    u_2_win_rats_h = [0.0, 0.5, 0.5]

    # FIXME - using r channel?
    img_blue = cv2.split(eye_img)[2]
    img_w, img_h = eye_img.shape[:2]

    # Indexes to extract window sub-images
    w_y1, w_y2 = int(img_h * u_2_win_rats_h[0]), int(img_h *
                                                     sum(u_2_win_rats_h[:2]))
    w_x1, w_x2 = int(img_w * u_2_win_rats_w[0]), int(img_w *
                                                     sum(u_2_win_rats_w[:2]))

    # Split image into two halves
    window_img = img_blue[w_y1:w_y2, w_x1:w_x2]

    # Supress eyelashes
    morph_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    window_img = cv2.morphologyEx(window_img, cv2.MORPH_CLOSE, morph_kernel)

    # Filter right half with inverse kernel of left half to ignore iris/sclera boundary
    filter_img_win = cv2.filter2D(window_img, -1, __gabor_kern_horiz)

    # Copy windows back into correct places in full filter image
    filter_img = np.zeros(eye_img.shape[:2], dtype=np.uint8)
    filter_img[w_y1:w_y2, w_x1:w_x2] = filter_img_win

    # Mask with circles
    cv2.circle(filter_img,
               (3 * filter_img.shape[1] / 7, filter_img.shape[0] / 2),
               filter_img.shape[1] / 4, 0, -1)
    cv2.circle(filter_img,
               (4 * filter_img.shape[1] / 7, filter_img.shape[0] / 2),
               filter_img.shape[1] / 4, 0, -1)

    ys = np.argmax(filter_img, axis=0)
    xs = np.arange(filter_img.shape[1])[ys > 0]
    ys = (ys)[ys > 0]

    u_lid_pts = []

    for i, x in enumerate(xs):
        col = filter_img.T[x]
        start_ind, end_ind = ys[i] + 5, min(ys[i] + 100, len(col) - 2)
        col_window = col[start_ind:end_ind]
        max_col = np.max(col)
        max_win = np.max(col_window)
        if max_col - max_win < 50:
            new_y = np.argmax(col_window) + ys[i] + 5
            u_lid_pts.append((x, new_y))
        else:
            u_lid_pts.append((x, ys[i]))

    # Only RANSAC fit eyelid if there are enough points
    if len(u_lid_pts) < __min_num_pts_u * 2:
        eyelid_upper_parabola = None
        u_lid_pts = []
    else:
        u_lid_pts_l = [(x, y) for (x, y) in u_lid_pts
                       if x < filter_img.shape[1] / 2]
        u_lid_pts_r = [(x, y) for (x, y) in u_lid_pts
                       if x > filter_img.shape[1] / 2]

        # Fit eye_img coord points of sclera-segs to degree 2 polynomial
        # a(x^2) + b(x) + c
        eyelid_upper_parabola = ransac_parabola(u_lid_pts_l,
                                                u_lid_pts_r,
                                                ransac_iters_max=5,
                                                refine_iters_max=2,
                                                max_err=4)
    if eyelid_upper_parabola is not None:
        a, b, c = eyelid_upper_parabola
        c = c - __parabola_y_offset
        eyelid_upper_parabola = a, b, c

    # --------------------- Debug Drawing ---------------------
    if debug_index:
        debug_img = eye_img.copy()

        if eyelid_upper_parabola is not None:
            lid_xs = np.arange(21) * img_w / 20
            lid_ys = a * lid_xs**2 + b * lid_xs + c
            lid_pts = np.dstack([lid_xs, lid_ys]).astype(int)
            cv2.polylines(debug_img, lid_pts, False, (0, 255, 0), 1)

        draw_points(debug_img, u_lid_pts, (0, 0, 255), 1, 2)
        filter_img = cv2.cvtColor(filter_img, cv2.COLOR_GRAY2BGR)
        draw_points(filter_img, u_lid_pts, (0, 0, 255), 1, 2)

        stacked_windows = stack_imgs_vertical([window_img, filter_img])
        stacked_imgs = stack_imgs_horizontal([stacked_windows, debug_img])
        __debug_imgs_upper[debug_index] = stacked_imgs

        if debug_index > 2:
            cv2.imshow(__winname + repr(debug_index) + "u", stacked_imgs)
    # --------------------- Debug Drawing ---------------------

    return eyelid_upper_parabola
示例#11
0
    def get_gaze_from_frame(self, frame):

        frame = cv2.undistort(frame, cam_mat_n7, dist_coefs_n7)

        frame_pyr = image_utils.make_gauss_pyr(frame, 4)
        full_frame = frame_pyr[1].copy()
        half_frame = frame_pyr[2].copy()

        limbuses = [None, None]
        gaze_pts_mm = [None, None]
        gaze_pts_px = [None, None]

        try:
            sub_img_cx0, sub_img_cy0 = None, None
            eye_r_roi, eye_l_roi = eye_extractor.get_eye_rois(
                frame_pyr, 4, debug=self.debug, device=self.device)

            for i, eye_roi in enumerate([eye_r_roi, eye_l_roi]):

                try:
                    if eye_roi.img is None: break

                    # Gives unique winnames for each ROI
                    debug_index = ((i + 1) if self.debug else False)

                    eye_roi.img = self.pre_proc.erase_specular(
                        eye_roi.img, debug=debug_index)

                    pupil_x0, pupil_y0 = eye_center_locator_combined.find_pupil(
                        eye_roi.img,
                        fast_width_grads=25.0,
                        fast_width_iso=80.0,
                        weight_grads=0.8,
                        weight_iso=0.2,
                        debug_index=debug_index)
                    eye_roi.refine_pupil((pupil_x0, pupil_y0), full_frame)
                    roi_x0, roi_y0, roi_w, roi_h = eye_roi.roi_x0, eye_roi.roi_y0, eye_roi.roi_w, eye_roi.roi_h

                    u_eyelid, l_eyelid = find_eyelids(eye_roi.img, debug_index)

                    pts_found = get_limb_pts(eye_img=eye_roi.img,
                                             phi=20,
                                             angle_step=1,
                                             debug_index=debug_index)
                    pts_found = eyelid_locator.filter_limbus_pts(
                        u_eyelid, l_eyelid, pts_found)

                    ellipse = ransac_ellipse.ransac_ellipse_fit(
                        points=pts_found,
                        bgr_img=eye_roi.img,
                        roi_pos=(roi_x0, roi_y0),
                        ransac_iters_max=5,
                        refine_iters_max=3,
                        max_err=1,
                        debug=False)

                    # Shift 2D limbus ellipse and points to account for eye ROI coords
                    (ell_x0, ell_y0), (ell_w,
                                       ell_h), angle = ellipse.rotated_rect
                    new_rotated_rect = (roi_x0 + ell_x0,
                                        roi_y0 + ell_y0), (ell_w, ell_h), angle
                    ellipse = Ellipse(new_rotated_rect)
                    pts_found_to_draw = [(px + roi_x0, py + roi_y0)
                                         for (px, py) in pts_found]

                    # Correct coords when extracting eye for half-frame
                    (sub_img_cx0, sub_img_cy0) = (roi_x0 + ell_x0,
                                                  roi_y0 + ell_y0)

                    # Ignore incorrect limbus
                    limbus = gaze_geometry.ellipse_to_limbuses_persp_geom(
                        ellipse, self.device)
                    limbuses[i] = limbus

                    # Draw eye features onto debug image
                    draw_utils.draw_limbus(full_frame,
                                           limbus,
                                           color=debug_colors[i],
                                           scale=1)
                    draw_utils.draw_points(full_frame,
                                           pts_found_to_draw,
                                           color=debug_colors[i],
                                           width=1,
                                           thickness=2)
                    draw_utils.draw_normal(full_frame,
                                           limbus,
                                           self.device,
                                           color=debug_colors[i],
                                           scale=1)
                    draw_utils.draw_normal(half_frame,
                                           limbus,
                                           self.device,
                                           color=debug_colors[i],
                                           scale=0.5,
                                           arrow_len_mm=20)
                    eye_img = full_frame[eye_roi.roi_y0:(eye_roi.roi_y0 +
                                                         eye_roi.roi_h),
                                         eye_roi.roi_x0:(eye_roi.roi_x0 +
                                                         eye_roi.roi_w)]
                    draw_utils.draw_eyelids(u_eyelid, l_eyelid, eye_img)

                except ransac_ellipse.NoEllipseFound:
                    if self.debug: print 'No Ellipse Found'
                    cv2.rectangle(full_frame, (roi_x0, roi_y0),
                                  (roi_x0 + roi_w, roi_y0 + roi_h),
                                  (0, 0, 255),
                                  thickness=4)

                except ransac_ellipse.CoverageTooLow as e:
                    if self.debug:
                        print 'Ellipse Coverage Too Low : %s' % e.msg
                    cv2.rectangle(full_frame, (roi_x0, roi_y0),
                                  (roi_x0 + roi_w, roi_y0 + roi_h),
                                  (0, 0, 255),
                                  thickness=4)

                finally:

                    # Extract only eye_roi block after other drawing methods
                    if sub_img_cx0 is not None:
                        eye_img = full_frame[sub_img_cy0 - 60:sub_img_cy0 + 60,
                                             sub_img_cx0 - 60:sub_img_cx0 + 60]
                    else:
                        eye_img = full_frame[eye_roi.roi_y0:(eye_roi.roi_y0 +
                                                             eye_roi.roi_h),
                                             eye_roi.roi_x0:(eye_roi.roi_x0 +
                                                             eye_roi.roi_w)]

                    # Transfer eye_img block to section of half_frame
                    half_frame[half_frame.shape[0] -
                               eye_img.shape[0]:half_frame.shape[0],
                               (half_frame.shape[1] - eye_img.shape[1]) *
                               i:half_frame.shape[1] if i else eye_img.
                               shape[1]] = eye_img

        except eye_extractor.NoEyesFound as e:
            if self.debug: print 'No Eyes Found: %s' % e.msg

        # Remove any extreme outliers
        limbuses = limbus_outlier_removal.remove_outliers(limbuses)

        # Get gaze points
        for i, limbus in enumerate(limbuses):
            if limbus is None: continue
            gaze_pts_mm[i] = gaze_geometry.get_gaze_point_mm(limbus)
            gaze_pts_px[i] = gaze_geometry.convert_gaze_pt_mm_to_px(
                gaze_pts_mm[i], self.device)

        smoothed_gaze_pt_mm = self.smoother.smooth_gaze(gaze_pts_mm)
        smoothed_gaze_pt_px = gaze_geometry.convert_gaze_pt_mm_to_px(
            smoothed_gaze_pt_mm, self.device)

        # Visualize in 2D and 3D
        cv2.imshow('gaze system', half_frame)
        self.visualizer3d.update_vis(limbuses, smoothed_gaze_pt_mm)

        # If recording, take a screenshot of vpython and add to vid. capture
        if self.recording:
            vis_screen = self.visualizer3d.take_screenshot()
            stacked_imgs = image_utils.stack_imgs_horizontal(
                [vis_screen, half_frame])
            self.vid_writer.write(stacked_imgs)

        return smoothed_gaze_pt_px
示例#12
0
def find_pupil(eye_img_bgr,
               fast_width_grads=25.5,
               fast_width_iso=80,
               weight_grads=0.9,
               weight_iso=0.1,
               debug_index=False):

    eye_img_r = cv2.split(eye_img_bgr)[2]

    fast_size_grads = (int(
        (fast_width_grads / eye_img_bgr.shape[0]) * eye_img_bgr.shape[1]),
                       int(fast_width_grads))
    fast_img_grads = cv2.resize(eye_img_r, fast_size_grads)

    fast_size_iso = (int(fast_width_iso),
                     int((fast_width_iso / eye_img_r.shape[1]) *
                         eye_img_r.shape[0]))
    fast_img_iso = cv2.resize(eye_img_r, fast_size_iso)

    c_map_grads = eye_center_locator_gradients.get_center_map(fast_img_grads)
    c_map_iso = eye_center_locator_isophote.get_center_map(fast_img_iso)

    c_map_norm_grads = cv2.normalize(c_map_grads,
                                     alpha=0,
                                     beta=255,
                                     norm_type=cv2.NORM_MINMAX)
    c_map_big_grads = cv2.resize(
        c_map_norm_grads,
        (eye_img_bgr.shape[1], eye_img_bgr.shape[0])).astype(np.uint8)

    c_map_norm_iso = cv2.normalize(c_map_iso,
                                   alpha=0,
                                   beta=255,
                                   norm_type=cv2.NORM_MINMAX)
    c_map_big_iso = cv2.resize(
        c_map_norm_iso,
        (eye_img_bgr.shape[1], eye_img_bgr.shape[0])).astype(np.uint8)

    joint_c_map = cv2.addWeighted(c_map_big_grads, w_grads, c_map_big_iso,
                                  w_iso, 1.0)

    max_val_index = np.argmax(joint_c_map)
    pupil_y0, pupil_x0 = max_val_index // joint_c_map.shape[
        1], max_val_index % joint_c_map.shape[1]

    max_val_index_2 = np.argmax(c_map_big_grads)
    pupil_y0_2, pupil_x0_2 = max_val_index_2 // joint_c_map.shape[
        1], max_val_index_2 % joint_c_map.shape[1]

    max_val_index_3 = np.argmax(c_map_big_iso)
    pupil_y0_3, pupil_x0_3 = max_val_index_3 // joint_c_map.shape[
        1], max_val_index_3 % joint_c_map.shape[1]

    if debug_index:

        debug_img = eye_img_bgr.copy()

        joint_c_map = cv2.cvtColor(joint_c_map, cv2.COLOR_GRAY2BGR)
        c_map_big_iso = cv2.cvtColor(c_map_big_iso, cv2.COLOR_GRAY2BGR)
        c_map_big_grads = cv2.cvtColor(c_map_big_grads, cv2.COLOR_GRAY2BGR)

        draw_utils.draw_cross(debug_img, (pupil_x0, pupil_y0), (0, 255, 255),
                              16, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0_3, pupil_y0_3),
                              (255, 0, 255), 8, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0_2, pupil_y0_2),
                              (255, 0, 255), 8, 2)
        draw_utils.draw_cross(joint_c_map, (pupil_x0, pupil_y0), (255, 0, 0),
                              16, 2)

        draw_utils.draw_cross(c_map_big_iso, (pupil_x0_3, pupil_y0_3),
                              (255, 0, 0), 16, 2)
        draw_utils.draw_cross(c_map_big_grads, (pupil_x0_2, pupil_y0_2),
                              (255, 0, 0), 16, 2)

        stacked_imgs = image_utils.stack_imgs_horizontal(
            [debug_img, c_map_big_grads, c_map_big_iso, joint_c_map])
        __debug_imgs[debug_index] = stacked_imgs

        if debug_index == 2:
            full_debug_img = image_utils.stack_imgs_vertical(
                [__debug_imgs[1], __debug_imgs[2]])
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs)

    return pupil_x0, pupil_y0
示例#13
0
def find_lower_eyelid(eye_img, debug_index):

    line_y_offset = 0  # Amount to shift eye-lid by after detection

    img_blue = cv2.split(eye_img)[2]
    img_w, img_h = eye_img.shape[:2]

    # Indexes to extract window sub-images
    w_y1, w_y2 = int(img_h * __l_win_rats_h[0]), int(img_h * sum(__l_win_rats_h[:2]))
    wl_x1, wl_x2 = int(img_w * __l_win_rats_w_l[0]), int(img_w * sum(__l_win_rats_w_l[:2]))
    wr_x1, wr_x2 = int(img_w * __l_win_rats_w_r[0]), int(img_w * sum(__l_win_rats_w_r[:2]))

    # Split image into two halves
    window_img_l = img_blue[w_y1:w_y2, wl_x1:wl_x2]
    window_img_r = img_blue[w_y1:w_y2, wr_x1:wr_x2]
    window_img_l = cv2.GaussianBlur(window_img_l, (5, 5), 20)
    window_img_r = cv2.GaussianBlur(window_img_r, (5, 5), 20)

    filter_img_l = cv2.filter2D(window_img_l, -1, __gabor_kern_diag)
    filter_img_r = cv2.filter2D(window_img_r, -1, cv2.flip(__gabor_kern_diag, 1))
    filter_img = np.concatenate([filter_img_l, filter_img_r], axis=1)

    # In polar image, x <-> theta, y <-> magnitude
    max_vals = np.max(filter_img, axis=0)
    ys = np.argmax(filter_img, axis=0)  # Take highest filter response as limbus points
    xs = (np.arange(filter_img.shape[1]) + wl_x1)[max_vals > __min_thresh]
    ys = (ys + w_y1)[max_vals > __min_thresh]

    l_lid_pts = np.squeeze(np.dstack([xs, ys]), axis=0)

    # Only RANSAC fit eyelid if there are enough points
    if l_lid_pts.size < __min_num_pts_u * 2:
        eyelid_lower_line = None
    else:
        eyelid_lower_line = ransac_line(l_lid_pts)

    if eyelid_lower_line is not None:
        a, b = eyelid_lower_line
        b = b + line_y_offset
        eyelid_lower_line = a, b

    if debug_index:
        debug_img = eye_img.copy()

        filter_img = cv2.cvtColor(filter_img, cv2.COLOR_GRAY2BGR)

        if l_lid_pts.size > 2:
            draw_points(debug_img, l_lid_pts, (0, 0, 255), 1, 2)

        if eyelid_lower_line is not None:
            cv2.line(debug_img, (0, int(b)), (img_w, int(a * img_w + b)), (0, 255, 0))

        window_img = np.concatenate([window_img_l, window_img_r], axis=1)
        stacked_windows = stack_imgs_vertical([window_img, filter_img])
        stacked_imgs = stack_imgs_horizontal([stacked_windows, debug_img])
        __debug_imgs_lower[debug_index] = stacked_imgs

        if debug_index > 2:
            cv2.imshow(__winname + repr(debug_index) + "l", stacked_imgs)

    return eyelid_lower_line
示例#14
0
def find_upper_eyelid(eye_img, debug_index):

    u_2_win_rats_w = [0.0, 1.0, 0.0]  # Margins around ROI windows
    u_2_win_rats_h = [0.0, 0.5, 0.5]

    # FIXME - using r channel?
    img_blue = cv2.split(eye_img)[2]
    img_w, img_h = eye_img.shape[:2]

    # Indexes to extract window sub-images
    w_y1, w_y2 = int(img_h * u_2_win_rats_h[0]), int(img_h * sum(u_2_win_rats_h[:2]))
    w_x1, w_x2 = int(img_w * u_2_win_rats_w[0]), int(img_w * sum(u_2_win_rats_w[:2]))

    # Split image into two halves
    window_img = img_blue[w_y1:w_y2, w_x1:w_x2]

    # Supress eyelashes
    morph_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    window_img = cv2.morphologyEx(window_img, cv2.MORPH_CLOSE, morph_kernel)

    # Filter right half with inverse kernel of left half to ignore iris/sclera boundary
    filter_img_win = cv2.filter2D(window_img, -1, __gabor_kern_horiz)

    # Copy windows back into correct places in full filter image
    filter_img = np.zeros(eye_img.shape[:2], dtype=np.uint8)
    filter_img[w_y1:w_y2, w_x1:w_x2] = filter_img_win

    # Mask with circles
    cv2.circle(filter_img, (3 * filter_img.shape[1] / 7, filter_img.shape[0] / 2), filter_img.shape[1] / 4, 0, -1)
    cv2.circle(filter_img, (4 * filter_img.shape[1] / 7, filter_img.shape[0] / 2), filter_img.shape[1] / 4, 0, -1)

    ys = np.argmax(filter_img, axis=0)
    xs = np.arange(filter_img.shape[1])[ys > 0]
    ys = (ys)[ys > 0]

    u_lid_pts = []

    for i, x in enumerate(xs):
        col = filter_img.T[x]
        start_ind, end_ind = ys[i] + 5, min(ys[i] + 100, len(col) - 2)
        col_window = col[start_ind:end_ind]
        max_col = np.max(col)
        max_win = np.max(col_window)
        if max_col - max_win < 50:
            new_y = np.argmax(col_window) + ys[i] + 5
            u_lid_pts.append((x, new_y))
        else:
            u_lid_pts.append((x, ys[i]))

    # Only RANSAC fit eyelid if there are enough points
    if len(u_lid_pts) < __min_num_pts_u * 2:
        eyelid_upper_parabola = None
        u_lid_pts = []
    else:
        u_lid_pts_l = [(x, y) for (x, y) in u_lid_pts if x < filter_img.shape[1] / 2]
        u_lid_pts_r = [(x, y) for (x, y) in u_lid_pts if x > filter_img.shape[1] / 2]

        # Fit eye_img coord points of sclera-segs to degree 2 polynomial
        # a(x^2) + b(x) + c
        eyelid_upper_parabola = ransac_parabola(
            u_lid_pts_l, u_lid_pts_r, ransac_iters_max=5, refine_iters_max=2, max_err=4
        )
    if eyelid_upper_parabola is not None:
        a, b, c = eyelid_upper_parabola
        c = c - __parabola_y_offset
        eyelid_upper_parabola = a, b, c

    # --------------------- Debug Drawing ---------------------
    if debug_index:
        debug_img = eye_img.copy()

        if eyelid_upper_parabola is not None:
            lid_xs = np.arange(21) * img_w / 20
            lid_ys = a * lid_xs ** 2 + b * lid_xs + c
            lid_pts = np.dstack([lid_xs, lid_ys]).astype(int)
            cv2.polylines(debug_img, lid_pts, False, (0, 255, 0), 1)

        draw_points(debug_img, u_lid_pts, (0, 0, 255), 1, 2)
        filter_img = cv2.cvtColor(filter_img, cv2.COLOR_GRAY2BGR)
        draw_points(filter_img, u_lid_pts, (0, 0, 255), 1, 2)

        stacked_windows = stack_imgs_vertical([window_img, filter_img])
        stacked_imgs = stack_imgs_horizontal([stacked_windows, debug_img])
        __debug_imgs_upper[debug_index] = stacked_imgs

        if debug_index > 2:
            cv2.imshow(__winname + repr(debug_index) + "u", stacked_imgs)
    # --------------------- Debug Drawing ---------------------

    return eyelid_upper_parabola
示例#15
0
def get_limb_pts(eye_img, phi=20, angle_step=1, debug_index=False):
    
    polar_img_w = 360 / angle_step                                      # Polar image has one column per angle of interest
    phi_range_1 = ((90 - phi) / angle_step, (90 + phi) / angle_step)    # Ranges of angles to be ignored (too close to lids)
    phi_range_2 = ((270 - phi) / angle_step, (270 + phi) / angle_step)
    
    eye_img_grey = cv2.cvtColor(eye_img, cv2.COLOR_BGR2GRAY)      # Do BGR-grey
    eye_img_grey = cv2.medianBlur(eye_img_grey, 5)
    
    # Scale to fixed size image for re-using transform matrix
    scale = eye_img.shape[0] / float(__fixed_width)
    img_fixed_size = cv2.resize(eye_img_grey, (__fixed_width, __fixed_width))
    
    # Transform image into polar coords and blur
    img_polar = linpolar(img_fixed_size, trans_w=polar_img_w, trans_h=__fixed_width / 2)
    img_polar = cv2.GaussianBlur(img_polar, (5, 5), 0)
    
    # Take the segment between min & max radii and filter with Gabor kernel
    img_polar_seg = img_polar[__min_limb_r:__max_limb_r, :]
    filter_img = cv2.filter2D(img_polar_seg, -1, __gabor_kern)
    
    # Black out ignored angles
    filter_img.T[ phi_range_1[0] : phi_range_1[1] ] = 0
    filter_img.T[ phi_range_2[0] : phi_range_2[1] ] = 0

    # In polar image, x <-> theta, y <-> magnitude         
    pol_ys = np.argmax(filter_img, axis=0)                      # Take highest filter response as limbus points
    pol_xs = np.arange(filter_img.shape[1])[pol_ys > 0]
    mags = (pol_ys + __min_limb_r)[pol_ys > 0]
    thts = np.radians(pol_xs * angle_step)

    # Translate each point back into fixed img coords
    xs, ys = cv2.polarToCart(mags.astype(float), thts)
    xs = (xs + __fixed_width / 2) * scale                       # Shift and scale cart. coords back to original eye-ROI coords
    ys = (ys + __fixed_width / 2) * scale
    
    # Points returned in form
    #    [[ x1   y1]
    #     [ x2   y2]
    #         ...
    #     [ xn   yn]]
    pts_cart = np.concatenate([xs, ys], axis=1)
    
    # --------------------- Debug Drawing ---------------------
    if debug_index != False:
        debug_img = eye_img.copy()
        debug_polar = cv2.cvtColor(img_polar, cv2.COLOR_GRAY2BGR)
        
        cv2.imwrite("polar.jpg",debug_polar)
        
        cv2.line(debug_polar, (0, __min_limb_r), (img_polar.shape[1], __min_limb_r), (255, 255, 0))
        cv2.line(debug_polar, (0, __max_limb_r), (img_polar.shape[1], __max_limb_r), (255, 255, 0))
        cv2.circle(debug_img, (debug_img.shape[1] / 2, debug_img.shape[0] / 2), int(debug_img.shape[0] * __limb_r_ratios[0]), (255, 255, 0))
        cv2.circle(debug_img, (debug_img.shape[1] / 2, debug_img.shape[0] / 2), int(debug_img.shape[0] * __limb_r_ratios[1]), (255, 255, 0))
        
        pts_polar = np.squeeze(np.dstack([pol_xs, mags]))
        draw_points(debug_polar, pts_polar, (0, 0, 255), width=1)
        draw_points(debug_img, pts_cart, (0, 0, 255), width=1)
    
        stacked_imgs_polar = stack_imgs_vertical([debug_polar, filter_img])
        stacked_imgs = stack_imgs_horizontal([debug_img, eye_img_grey, stacked_imgs_polar])
        
        __debug_imgs[debug_index] = stacked_imgs
        
        if debug_index == 2:
            full_debug_img = stack_imgs_vertical([__debug_imgs[1], __debug_imgs[2]]);
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs);
    # --------------------- Debug Drawing ---------------------

    return pts_cart
示例#16
0
 def get_gaze_from_frame(self, frame):
     
     frame = cv2.undistort(frame, cam_mat_n7, dist_coefs_n7)
     
     frame_pyr = image_utils.make_gauss_pyr(frame, 4)
     full_frame = frame_pyr[1].copy()
     half_frame = frame_pyr[2].copy()
     
     limbuses = [None, None]
     gaze_pts_mm = [None, None]
     gaze_pts_px = [None, None]
     
     try:
         sub_img_cx0, sub_img_cy0 = None, None
         eye_r_roi, eye_l_roi = eye_extractor.get_eye_rois(frame_pyr, 4, debug=self.debug, device=self.device)
         
         for i, eye_roi in enumerate([eye_r_roi, eye_l_roi]):
             
             try:
                 if eye_roi.img is None: break
                 
                 # Gives unique winnames for each ROI
                 debug_index = ((i + 1) if self.debug else False)  
         
                 eye_roi.img = self.pre_proc.erase_specular(eye_roi.img, debug=debug_index)
         
                 pupil_x0, pupil_y0 = eye_center_locator_combined.find_pupil(eye_roi.img,
                                                                             fast_width_grads=25.0,
                                                                             fast_width_iso=80.0,
                                                                             weight_grads=0.8,
                                                                             weight_iso=0.2,
                                                                             debug_index=debug_index)
                 eye_roi.refine_pupil((pupil_x0, pupil_y0), full_frame)
                 roi_x0, roi_y0, roi_w, roi_h = eye_roi.roi_x0, eye_roi.roi_y0, eye_roi.roi_w, eye_roi.roi_h
                 
                 u_eyelid, l_eyelid = find_eyelids(eye_roi.img, debug_index)
                 
                 pts_found = get_limb_pts(eye_img=eye_roi.img,
                                          phi=20,
                                          angle_step=1,
                                          debug_index=debug_index)
                 pts_found = eyelid_locator.filter_limbus_pts(u_eyelid, l_eyelid, pts_found)
                 
                 ellipse = ransac_ellipse.ransac_ellipse_fit(points=pts_found,
                                                             bgr_img=eye_roi.img,
                                                             roi_pos=(roi_x0, roi_y0),
                                                             ransac_iters_max=5,
                                                             refine_iters_max=3,
                                                             max_err=1,
                                                             debug=False)
                 
                 # Shift 2D limbus ellipse and points to account for eye ROI coords
                 (ell_x0, ell_y0), (ell_w, ell_h), angle = ellipse.rotated_rect               
                 new_rotated_rect = (roi_x0 + ell_x0, roi_y0 + ell_y0), (ell_w, ell_h), angle
                 ellipse = Ellipse(new_rotated_rect)                                                                                
                 pts_found_to_draw = [(px + roi_x0, py + roi_y0) for (px, py) in pts_found]
                 
                 # Correct coords when extracting eye for half-frame
                 (sub_img_cx0, sub_img_cy0) = (roi_x0 + ell_x0, roi_y0 + ell_y0)
                 
                 # Ignore incorrect limbus
                 limbus = gaze_geometry.ellipse_to_limbuses_persp_geom(ellipse, self.device)
                 limbuses[i] = limbus
                 
                 # Draw eye features onto debug image
                 draw_utils.draw_limbus(full_frame, limbus, color=debug_colors[i], scale=1)
                 draw_utils.draw_points(full_frame, pts_found_to_draw, color=debug_colors[i], width=1, thickness=2)
                 draw_utils.draw_normal(full_frame, limbus, self.device, color=debug_colors[i], scale=1)
                 draw_utils.draw_normal(half_frame, limbus, self.device, color=debug_colors[i], scale=0.5, arrow_len_mm=20)
                 eye_img = full_frame[eye_roi.roi_y0:(eye_roi.roi_y0 + eye_roi.roi_h),
                                      eye_roi.roi_x0:(eye_roi.roi_x0 + eye_roi.roi_w)]
                 draw_utils.draw_eyelids(u_eyelid, l_eyelid, eye_img)
                 
             except ransac_ellipse.NoEllipseFound:
                 if self.debug: print 'No Ellipse Found'
                 cv2.rectangle(full_frame, (roi_x0, roi_y0), (roi_x0 + roi_w, roi_y0 + roi_h), (0, 0, 255), thickness=4)
                 
             except ransac_ellipse.CoverageTooLow as e:
                 if self.debug: print 'Ellipse Coverage Too Low : %s' % e.msg
                 cv2.rectangle(full_frame, (roi_x0, roi_y0), (roi_x0 + roi_w, roi_y0 + roi_h), (0, 0, 255), thickness=4)
                 
             finally:
                 
                 # Extract only eye_roi block after other drawing methods
                 if sub_img_cx0 is not None: 
                     eye_img = full_frame[sub_img_cy0 - 60:sub_img_cy0 + 60,
                                          sub_img_cx0 - 60:sub_img_cx0 + 60]
                 else:
                     eye_img = full_frame[eye_roi.roi_y0:(eye_roi.roi_y0 + eye_roi.roi_h),
                                          eye_roi.roi_x0:(eye_roi.roi_x0 + eye_roi.roi_w)]
                 
                 # Transfer eye_img block to section of half_frame
                 half_frame[half_frame.shape[0] - eye_img.shape[0]:half_frame.shape[0],
                            (half_frame.shape[1] - eye_img.shape[1]) * i: half_frame.shape[1] if i else eye_img.shape[1]] = eye_img
                            
     except eye_extractor.NoEyesFound as e:
         if self.debug: print 'No Eyes Found: %s' % e.msg
         
     # Remove any extreme outliers
     limbuses = limbus_outlier_removal.remove_outliers(limbuses)
     
     # Get gaze points
     for i, limbus in enumerate(limbuses):
         if limbus is None: continue
         gaze_pts_mm[i] = gaze_geometry.get_gaze_point_mm(limbus)
         gaze_pts_px[i] = gaze_geometry.convert_gaze_pt_mm_to_px(gaze_pts_mm[i], self.device)
     
     smoothed_gaze_pt_mm = self.smoother.smooth_gaze(gaze_pts_mm)
     smoothed_gaze_pt_px = gaze_geometry.convert_gaze_pt_mm_to_px(smoothed_gaze_pt_mm, self.device)
     
     # Visualize in 2D and 3D
     cv2.imshow('gaze system', half_frame)
     self.visualizer3d.update_vis(limbuses, smoothed_gaze_pt_mm)
     
     # If recording, take a screenshot of vpython and add to vid. capture
     if self.recording:
         vis_screen = self.visualizer3d.take_screenshot()
         stacked_imgs = image_utils.stack_imgs_horizontal([vis_screen, half_frame])
         self.vid_writer.write(stacked_imgs)
         
     return smoothed_gaze_pt_px