def transform(segment, resolution, window_height, mode='', **kw): # determine type of transform given mode if mode == 'upsample': return iris_transform(segment, Pupil(segment), window_height, theta_resolution=resolution) elif mode == 'eyelid': print('Eyelid Currently Not Implemented') return None # pupil = Pupil(segment) # seg = iris_transform(segment, pupil, window_height, theta_resolution=1) # # eyelid_mask = eyelid.detect_eyelid(segment, pupil, ROI_STRIP_WIDTH=100, ROI_BUFFER=5, POLY_TRANS=-10) # eyelid_mask = iris_transform(eyelid_mask, pupil, window_height, theta_resolution=1) # eyelid_mask_inv = eyelid_mask == 0 # # seg = np.multiply(seg, eyelid_mask) # # mean = np.sum(seg)/(seg.size - len(np.where(eyelid_mask_inv)[0])) # mean = np.sum(seg)/seg.size # noise_seg = np.multiply(np.ones(seg.shape), mean) # # noise(seg.shape, mean) # seg = np.add(seg, np.multiply(eyelid_mask_inv, noise_seg)) # # return seg else: return iris_transform(segment, Pupil(segment), window_height, theta_resolution=resolution)
def quantify_torsion(WINDOW_RADIUS, RESOLUTION, torsion_mode, transform_mode, video, start_frame, reference_frame, end_frame, pupil_list, blink_list, threshold, alternate, WINDOW_THETA=None, SEGMENT_THETA=None, upper_iris=None, lower_iris=None, feature_coords=None): ''' Utilizes the 2D cross correlation algorithm xcorr2d to measure and return torsion using the settings given. Inputs: WINDOW_RADIUS: Integer Mandatory input which sets the radial thickness of the iris transform. RESOLUTION: Double Mandatory input with sets the upsampling factor or the interpolation resolution depending on settings below. torsion_mode: String Mandatory input which determines whether interpolation or upsampling is used. if torsion_mode = 'interp', then interpolation is used and RESOLUTION is assumed to be interpolation resolution. Consequently an upsampling factor of 1 is used in the transform. if torsion_mode = 'upsample', then upsampling is used and RESOLUTION is assumed to be the upsampling factor. transform_mode: String Mandatory input which determines whether a subset of the iris is used or the full iris is used during correlation. if transform_mode = 'subset', a subset of the iris is used. if transform_mode = 'full', the full iris us used. video: Video object Mandatory input start_frame: Integer Mandatory input which is the index of the reference frame. end_frame: Integer Mandatory input which is the index of the last frame to analyze. pupil_list: dictionary of pupil objects key: (int) video frame value: pupil object blink_list: dictionary of whether or not a frame captures a blink key: (int) video frame value: 1 - blink 0 - no blink None - None WINDOW_THETA: Integer Angle bounds above/below the feature that define the portion of the iris that is to be included in the reference iris window. This window should be smaller than the segment. Mandatory input if transform_mode = 'subset'. SEGMENT_THETA: Integer Angle bounds above/below the feature that define the portion of the iris that is to be included in each segment, for which the window is to be located in. Mandatory input if transform_mode = 'subset'. upper_iris_occ: dictionary, {'c': column index, 'r': row index} Holds the [row,column] coordinates of the upper boundary of the iris that is not occluded by eyelids or eyelashes. lower_iris_occ: dictionary, {'c': column index, 'r': row index} Holds the [row,column] coordinates of the lower boundary of the iris that is not occluded by eyelids or eyelashes. feature_coords: dictionary, {'c': column index, 'r': row index} Holds the dictionary of feature coordinates tracked during subset correlation. Mandatory input if transform_mode = 'subset'. Returns: torsion: Dictionary key = frame number value = rotation from reference frame torsion_deriative: Dictionary key = frame number value = rotation from previous frame ''' upsample_factor = 1 noise_replace = False if torsion_mode == 'interp': pass elif torsion_mode == 'upsample': upsample_factor = RESOLUTION if transform_mode == 'full': if upper_iris and lower_iris: noise_replace = True start = 0 reference_bounds = (0, 360) # what are these? 360 degrees? comparison_bounds = (0, 360) elif transform_mode == 'subset': feature_r, feature_theta = iris.get_polar_coord( feature_coords['r'], feature_coords['c'], pupil_list[start_frame]) reference_bounds = (feature_theta - WINDOW_THETA, feature_theta + WINDOW_THETA) comparison_bounds = (feature_theta - SEGMENT_THETA, feature_theta + SEGMENT_THETA) start = int((SEGMENT_THETA - WINDOW_THETA) / upsample_factor) # TODO: This is actually such a dumb way, copying and pasting code. elif transform_mode == 'alternate': # Get the aspects as if you were doing a full iris analysis if upper_iris and lower_iris: noise_replace = True start = 0 reference_bounds = (0, 360) # what are these? 360 degrees? comparison_bounds = (0, 360) # Get the aspects as if you are doing the subset feature_r, feature_theta = iris.get_polar_coord( feature_coords['r'], feature_coords['c'], pupil_list[start_frame]) reference_bounds_sr = (feature_theta - WINDOW_THETA, feature_theta + WINDOW_THETA) comparison_bounds_sr = (feature_theta - SEGMENT_THETA, feature_theta + SEGMENT_THETA) start_sr = int((SEGMENT_THETA - WINDOW_THETA) / upsample_factor) # get the reference window from the first frame of the video # this will be the base for all torsion ie. all rotation is relative to this window if start_frame == reference_frame: if alternate: first_window_sr = iris.iris_transform( video[start_frame], pupil_list[start_frame], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds_sr) first_window = iris.iris_transform(video[start_frame], pupil_list[start_frame], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds) else: ref_pupil = pupil.Pupil(video[reference_frame], threshold) if alternate: first_window_sr = iris.iris_transform( video[reference_frame], ref_pupil, WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds_sr) first_window = iris.iris_transform(video[reference_frame], ref_pupil, WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds) # TODO: If noise replace is selected, cannot select segment removal if noise_replace: # transform (column,row) into (theta,r) space about pupil centre # get the boundaries of usable iris in polar upper_iris_r, upper_iris_theta = iris.get_polar_coord( upper_iris['r'], upper_iris['c'], pupil_list[start_frame]) lower_iris_r, lower_iris_theta = iris.get_polar_coord( lower_iris['r'], lower_iris['c'], pupil_list[start_frame]) # mirrors the upper angular boundary across the vertical axis upper_occlusion_theta = (90 - np.absolute(upper_iris_theta - 90), 90 + np.absolute(upper_iris_theta - 90)) # mirrors the lower angular boundary across the vertical axis # deal with the branch cut at 270 if lower_iris_theta < 0: lower_occlusion_theta = (-90 - np.absolute(lower_iris_theta + 90), -90 + np.absolute(lower_iris_theta + 90)) else: lower_occlusion_theta = (-90 - np.absolute(lower_iris_theta - 270), -90 + np.absolute(lower_iris_theta - 270)) # replace occluded sections with noise first_window = eyelid_removal.noise_replace(first_window, upper_occlusion_theta, lower_occlusion_theta) # Nani is this? Why is it not with the other stuff above? if transform_mode == 'full' or transform_mode == 'alternate': # extend iris window first_window = eyelid_removal.iris_extension( first_window, theta_resolution=upsample_factor, lower_theta=-pre.MAX_ANGLE, upper_theta=pre.MAX_ANGLE) # TODO: Add a button to show iris segments torsion = {} torsion_derivative = {} # find torsion between start_frame+1:last_frame for i, frame in tqdm(enumerate(video[start_frame:end_frame])): frame_loc = i + start_frame # check if a pupil exists, or if there is a blink if not pupil_list[frame_loc] or blink_list[frame_loc] is None: # if there is no pupil, torsion cannot be calculated torsion[frame_loc] = None torsion_derivative[frame_loc] = None print( 'WARNING: No pupil in frame: %d \n Torsion cannot be calculated' % (frame_loc)) else: # unwrap the iris (convert into polar) #current_frame = iris.iris_transform(frame, pupil_list[frame_loc], WINDOW_RADIUS, theta_resolution = upsample_factor, theta_window = comparison_bounds) # TODO: add when eyelid is not found??? if alternate and blink_list[frame_loc] == 1: # TODO: Need to add detection, this method of checking if 0 exists is too much of a gong show current_frame = iris.iris_transform( frame, pupil_list[frame_loc], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=comparison_bounds_sr) # get the degree of rotation of the current frame based on reference frame deg = xcorr2d.xcorr2d(current_frame, first_window_sr, start=start_sr, prev_deg=None, torsion_mode=torsion_mode, resolution=RESOLUTION, threshold=0, max_angle=pre.MAX_ANGLE) if i > 0: # Calculate torsion based off of previous window previous_window_sr = iris.iris_transform( video[frame_loc - 1], pupil_list[frame_loc - 1], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds_sr) # get the degree of rotation of the current frame based on previous frame previous_deg = xcorr2d.xcorr2d(current_frame, previous_window_sr, start=start_sr, prev_deg=None, torsion_mode=torsion_mode, resolution=RESOLUTION, threshold=0, max_angle=pre.MAX_ANGLE) else: previous_deg = None else: current_frame = iris.iris_transform( frame, pupil_list[frame_loc], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=comparison_bounds) # get the degree of rotation of the current frame based on reference frame deg = xcorr2d.xcorr2d(current_frame, first_window, start=start, prev_deg=None, torsion_mode=torsion_mode, resolution=RESOLUTION, threshold=0, max_angle=pre.MAX_ANGLE) if i > 0: # get the previous frame previous_window = iris.iris_transform( video[frame_loc - 1], pupil_list[frame_loc - 1], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds) previous_window = eyelid_removal.iris_extension( previous_window, theta_resolution=upsample_factor, lower_theta=-pre.MAX_ANGLE, upper_theta=pre.MAX_ANGLE) # get the degree of rotation of the current frame based on previous frame previous_deg = xcorr2d.xcorr2d(current_frame, previous_window, start=start, prev_deg=None, torsion_mode=torsion_mode, resolution=RESOLUTION, threshold=0, max_angle=pre.MAX_ANGLE) else: previous_deg = None # save the torsion3 torsion[frame_loc] = deg torsion_derivative[frame_loc] = previous_deg ''' # Get the change in angle compared to previous frame if frame_loc != start_frame : if deg is None or torsion[frame_loc-1] is None: torsion_derivative[frame_loc] = None else: torsion_derivative[frame_loc] = deg - torsion[frame_loc-1] else: torsion_derivative[frame_loc] = None ''' return torsion, torsion_derivative
def quantify_torsion(WINDOW_RADIUS, RESOLUTION, torsion_mode, transform_mode, video, start_frame, reference_frame, end_frame, pupil_list, threshold, WINDOW_THETA=None, SEGMENT_THETA=None, upper_iris=None, lower_iris=None, feature_coords=None): ''' Utilizes the 2D cross correlation algorithm xcorr2d to measure and return torsion using the settings given. Inputs: WINDOW_RADIUS: Integer Mandatory input which sets the radial thickness of the iris transform. RESOLUTION: Double Mandatory input with sets the upsampling factor or the interpolation resolution depending on settings below. torsion_mode: String Mandatory input which determines whether interpolation or upsampling is used. if torsion_mode = 'interp', then interpolation is used and RESOLUTION is assumed to be interpolation resolution. Consequently an upsampling factor of 1 is used in the transform. if torsion_mode = 'upsample', then upsampling is used and RESOLUTION is assumed to be the upsampling factor. transform_mode: String Mandatory input which determines whether a subset of the iris is used or the full iris is used during correlation. if transform_mode = 'subset', a subset of the iris is used. if transform_mode = 'full', the full iris us used. video: Video object Mandatory input start_frame: Integer Mandatory input which is the index of the reference frame. end_frame: Integer Mandatory input which is the index of the last frame to analyze. pupil_list: dictionary of pupil objects key: (int) video frame value: pupil object WINDOW_THETA: Integer Angle bounds above/below the feature that define the portion of the iris that is to be included in the reference iris window. This window should be smaller than the segment. Mandatory input if transform_mode = 'subset'. SEGMENT_THETA: Integer Angle bounds above/below the feature that define the portion of the iris that is to be included in each segment, for which the window is to be located in. Mandatory input if transform_mode = 'subset'. upper_iris_occ: dictionary, {'c': column index, 'r': row index} Holds the [row,column] coordinates of the upper boundary of the iris that is not occluded by eyelids or eyelashes. lower_iris_occ: dictionary, {'c': column index, 'r': row index} Holds the [row,column] coordinates of the lower boundary of the iris that is not occluded by eyelids or eyelashes. feature_coords: dictionary, {'c': column index, 'r': row index} Holds the dictionary of feature coordinates tracked during subset correlation. Mandatory input if transform_mode = 'subset'. Returns: torsion: Dictionary key = frame number value = rotation from reference frame ''' upsample_factor = 1 noise_replace = False if torsion_mode == 'interp': pass elif torsion_mode == 'upsample': upsample_factor = RESOLUTION if transform_mode == 'subset': feature_r, feature_theta = iris.get_polar_coord( feature_coords['r'], feature_coords['c'], pupil_list[start_frame]) reference_bounds = (feature_theta - WINDOW_THETA, feature_theta + WINDOW_THETA) comparison_bounds = (feature_theta - SEGMENT_THETA, feature_theta + SEGMENT_THETA) start = int((SEGMENT_THETA - WINDOW_THETA) / upsample_factor) elif transform_mode == 'full': if upper_iris and lower_iris: noise_replace = True start = 0 reference_bounds = (0, 360) comparison_bounds = (0, 360) # get the reference window from the first frame of the video # this will be the base for all torsion ie. all rotation is relative to this window if start_frame == reference_frame: first_window = iris.iris_transform(video[start_frame], pupil_list[start_frame], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds) else: ref_pupil = pupil.Pupil(video[reference_frame], threshold) first_window = iris.iris_transform(video[reference_frame], ref_pupil, WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=reference_bounds) if noise_replace: # transform (colum,row) into (theta,r) space about pupil centre # get the boundaries of usable iris in polar upper_iris_r, upper_iris_theta = iris.get_polar_coord( upper_iris['r'], upper_iris['c'], pupil_list[start_frame]) lower_iris_r, lower_iris_theta = iris.get_polar_coord( lower_iris['r'], lower_iris['c'], pupil_list[start_frame]) # mirrors the upper angular boundary across the vertical axis upper_occlusion_theta = (90 - np.absolute(upper_iris_theta - 90), 90 + np.absolute(upper_iris_theta - 90)) # mirrors the lower angular boundary across the vertical axis # deal with the branch cut at 270 if lower_iris_theta < 0: lower_occlusion_theta = (-90 - np.absolute(lower_iris_theta + 90), -90 + np.absolute(lower_iris_theta + 90)) else: lower_occlusion_theta = (-90 - np.absolute(lower_iris_theta - 270), -90 + np.absolute(lower_iris_theta - 270)) # replace occluded sections with noise first_window = eyelid_removal.noise_replace(first_window, upper_occlusion_theta, lower_occlusion_theta) if transform_mode == 'full': # extend iris window first_window = eyelid_removal.iris_extension( first_window, theta_resolution=upsample_factor, lower_theta=-pre.MAX_ANGLE, upper_theta=pre.MAX_ANGLE) torsion = {} # find torsion between start_frame+1:last_frame for i, frame in tqdm(enumerate(video[start_frame:end_frame])): frame_loc = i + start_frame # check if a pupil exists if not pupil_list[frame_loc]: # if there is no pupil, torsion cannot be calculated torsion[frame_loc] = None print( 'WARNING: No pupil in frame: %d \n Torsion cannot be calculated' % (frame_loc)) else: # unwrap the iris (convert into polar) current_frame = iris.iris_transform( frame, pupil_list[frame_loc], WINDOW_RADIUS, theta_resolution=upsample_factor, theta_window=comparison_bounds) # get the degree of rotation of the current frame deg = xcorr2d.xcorr2d(current_frame, first_window, start=start, prev_deg=None, torsion_mode=torsion_mode, resolution=RESOLUTION, threshold=0, max_angle=pre.MAX_ANGLE) # save the torsion torsion[frame_loc] = deg return torsion
def corr2d(video_path, verborose=True, **kw): # save all results to dictionary results = { 'interp': { 'full': [], 'subset': [] }, 'upsample': { 'full': [], 'subset': [] }, 'standard': { 'full': [], 'subset': [] } } # get parameters from kw args start_frame = kw.get('start_frame', 0) end_frame = kw.get('end_frame', -1) transform_resolution = kw.get('transform_resolution', 1) interp_resolution = kw.get('interp_resolution', 0.1) upsample_resolution = kw.get('upsample_resolution', 0.1) interp_threshold = kw.get('interp_threshold', 0.3) upsample_threshold = kw.get('upsample_threshold', 0) interp_start = kw.get('interp_start', 250) upsample_start = kw.get('upsample_start', 2500) window_length = kw.get('window_length', 50) window_height = kw.get('window_height', 30) max_angle = kw.get('max_angle', 25) pupil_threshold = kw.get('pupil_threshold', 10) im_crop = kw.get('im_crop', None) # List of crop indeces of form [row_lower_lim, row_upper_lim, col_lower_lim, col_upper_lim] video_name = os.path.basename(video_path) # save all results as data objects within in a folder now = datetime.datetime.now().strftime("%Y_%m_%d") file_path = os.path.abspath(os.path.join(os.curdir, 'results', video_name, now)) if not os.path.isdir(file_path): os.makedirs(file_path) # create video object video = v.Video(video_path) metadata = kw # TODO add more? metadata['VIDEO_FPS'] = video.fps # create the reference windows first_frame = video[start_frame] # Crop the video frame if im_crop is not None: first_frame = first_frame[im_crop[0]:im_crop[1], im_crop[2]:im_crop[3]] iris_segment_interp = iris_transform(first_frame, Pupil(first_frame, threshold=pupil_threshold), window_height, theta_resolution=transform_resolution) iris_segment_upsample = iris_transform(first_frame, Pupil(first_frame, threshold=pupil_threshold), window_height, theta_resolution=upsample_resolution) reference_windows = { 'interp': { 'subset': iris_segment_interp[:, slice(interp_start,interp_start+window_length)], 'extend': extend(iris_segment_interp, diff=max_angle) }, 'upsample': { 'subset': iris_segment_upsample[:, slice(upsample_start,upsample_start+int(window_length/upsample_resolution))], 'extend': extend(iris_segment_upsample, diff=int(max_angle/upsample_resolution)) } } if verborose: print('Starting batch 2D Cross Correlation with upsample and interpolation ...') start_time = time.time() for segment in video[start_frame+1:end_frame]: # Crop frame if im_crop is not None: segment = segment[im_crop[0]:im_crop[1], im_crop[2]:im_crop[3]] # segment = segment[0:500, 500:1100] interp_seg = iris_transform(segment, Pupil(segment, threshold=pupil_threshold), window_height, theta_resolution=transform_resolution) upsample_seg = iris_transform(segment, Pupil(segment, threshold=pupil_threshold), window_height, theta_resolution=upsample_resolution) # standard method is upsampling with resolution 1 t_standard_full = xcorr2d(interp_seg, reference_windows['interp']['extend'], 0, resolution=1, threshold=0, torsion_mode='upsample') t_standard_subset = xcorr2d(interp_seg, reference_windows['interp']['subset'], interp_start, resolution=1, threshold=-1, torsion_mode='upsample') t_interp_full = xcorr2d(interp_seg, reference_windows['interp']['extend'], 0, resolution=interp_resolution, threshold=interp_threshold, torsion_mode='interp') t_interp_subset = xcorr2d(interp_seg, reference_windows['interp']['subset'], interp_start, resolution=interp_resolution, threshold=interp_threshold, torsion_mode='interp') t_upsample_full = xcorr2d(upsample_seg, reference_windows['upsample']['extend'], 0, resolution=interp_resolution, threshold=interp_threshold, torsion_mode='upsample') t_upsample_subset = xcorr2d(upsample_seg, reference_windows['upsample']['subset'], upsample_start, resolution=upsample_resolution, threshold=upsample_threshold, torsion_mode='upsample') results['standard']['full'].append(t_standard_full) results['standard']['subset'].append(t_standard_subset) results['interp']['full'].append(t_interp_full) results['interp']['subset'].append(t_interp_subset) results['upsample']['full'].append(t_upsample_full) results['upsample']['subset'].append(t_upsample_subset) if verborose: print('Elapsed Time: {}s'.format(round(time.time() - start_time,2)), sep=' ', end='\r', flush=True) if verborose: print('Duration:', time.time() - start_time) for method in results: for mode in results[method]: obj = Data('_'.join((method, mode)), file_path) obj.set(results[method][mode], start_frame, metadata) obj.save() if verborose: print('Saving {} {} results.'.format(method, mode))
def interpolation_subset_method(video_path, verborose=True, **kw): # get parameters from kw args start_frame = kw.get('start_frame', 0) end_frame = kw.get('end_frame', -1) transform_resolution = kw.get('transform_resolution', 1) interp_resolution = kw.get('interp_resolution', 0.1) upsample_resolution = kw.get('upsample_resolution', 0.1) interp_threshold = kw.get('interp_threshold', 0.3) upsample_threshold = kw.get('upsample_threshold', 0) interp_start = kw.get('interp_start', 250) upsample_start = kw.get('upsample_start', 2500) window_length = kw.get('window_length', 50) window_height = kw.get('window_height', 30) max_angle = kw.get('max_angle', 25) pupil_threshold = kw.get('pupil_threshold', 10) im_crop = kw.get('im_crop', None) # List of crop indeces of form [row_lower_lim, row_upper_lim, col_lower_lim, col_upper_lim] video_name = os.path.basename(video_path) # Create dict to temporarily hold important data data = { 'pupil_list' : [], 'torsion' : [], } # save all results as data objects within in a folder now = datetime.datetime.now().strftime("%Y_%m_%d") file_path = os.path.abspath(os.path.join(os.curdir, 'results', video_name, now)) if not os.path.isdir(file_path): os.makedirs(file_path) # create video object video = v.Video(video_path) metadata = kw # TODO add more? metadata['VIDEO_FPS'] = video.fps # create the reference windows first_frame = video[start_frame] # Crop the video frame if im_crop is not None: first_frame = first_frame[im_crop[0]:im_crop[1], im_crop[2]:im_crop[3]] # Find first frame pupil and populate data lists pup = Pupil(first_frame, threshold=pupil_threshold) data['pupil_list'].append(pup) data['torsion'].append(0) # Create the reference window from the first frame iris_segment_interp = iris_transform(first_frame, pup, window_height, theta_resolution=transform_resolution) reference_window = iris_segment_interp[:, slice(interp_start,interp_start+window_length)] if verborose: print('Starting batch 2D Cross Correlation with upsample and interpolation ...') start_time = time.time() for I in video[start_frame+1:end_frame]: # Crop frame if im_crop is not None: I = I[im_crop[0]:im_crop[1], im_crop[2]:im_crop[3]] # find pupil in frame pup = Pupil(I, threshold=pupil_threshold) data['pupil_list'].append(pup) # Extract the iris segment P = iris_transform(I, pup, window_height, theta_resolution=transform_resolution) # Find the torsion t = xcorr2d(P, reference_window, interp_start, resolution=interp_resolution, threshold=interp_threshold, torsion_mode='interp') data['torsion'].append(t) if verborose: print('Elapsed Time: {}s'.format(round(time.time() - start_time,2)), sep=' ', end='\r', flush=True) if verborose: print('Duration:', time.time() - start_time) # Save results obj = Data('_subset_interpolation', file_path) obj.set(data['torsion'], start_frame=start_frame, pupil_list=data['pupil_list'], metadata=metadata) obj.save() if verborose: print('Saving {} {} results.')