def testObject(absorption, shape=None, phase=None, invert=False, invert_phase=False, dtype=None, backend=None, **kwargs): # Load absorption image test_object = _loadImage(absorption, shape, dtype, backend, **kwargs) # Normalize test_object -= yp.min(test_object) test_object /= yp.max(test_object) # invert if requested if invert: test_object = 1 - test_object # Apply correct range to absorption absorption_max, absorption_min = kwargs.get('max_value', 1.1), kwargs.get( 'min_value', 0.9) test_object *= (absorption_max - absorption_min) test_object += absorption_min # Add phase if label is provided if phase: # Load phase image phase = _loadImage(phase, shape, **kwargs) # invert if requested if invert_phase: phase = 1 - phase # Normalize phase -= yp.min(phase) phase /= yp.max(phase) # Apply correct range to absorption phase_max, phase_min = kwargs.get('max_value_phase', 0), kwargs.get('min_value_phase', 1) phase *= (phase_max - phase_min) phase += phase_min # Add phase to test_object test_object = yp.astype(test_object, 'complex32') test_object *= yp.exp( 1j * yp.astype(yp.real(phase), yp.getDatatype(test_object))) # Cast to correct dtype and backend return yp.cast(test_object, dtype, backend)
def pupil(shape, camera_pixel_size=6.5e-6, objective_magnification=10, system_magnification=1.0, illumination_wavelength=0.53e-6, objective_numerical_aperture=0.25, center=True, dtype=None, backend=None, **kwargs): """ Creates a biobjective_numerical_aperturery pupil function :param shape: :class:`list, tuple, np.array` Shape of sensor plane (pixels) :param camera_pixel_size: :class:`float` Pixel size of sensor in spatial units :param illumination_wavelength: :class:`float` Detection illumination_wavelength in spatial units :param objective_numerical_aperture: :class:`float` Detection Numerical Aperture """ assert len( shape ) == 2, "pupil should be two dimensioobjective_numerical_aperturel!" # Store dtype and backend dtype = dtype if dtype is not None else yp.config.default_dtype backend = backend if backend is not None else yp.config.default_backend # Calculate effective pixel size effective_pixel_size = camera_pixel_size / system_magnification / objective_magnification # Generate coordiobjective_numerical_aperturete system fylin, fxlin = yp.grid(shape, 1 / effective_pixel_size / np.asarray(shape)) # Generate pupil pupil_radius = objective_numerical_aperture / illumination_wavelength pupil = np.asarray((fxlin**2 + fylin**2) <= pupil_radius**2).astype( np.float) # Convert to correct dtype and backend pupil = yp.cast(pupil, dtype, backend) if center: return pupil else: return yp.fft.ifftshift(pupil)
def _loadImage(image_label, shape, dtype=None, backend=None, **kwargs): # Determine backend and dtype backend = backend if backend is not None else yp.config.default_backend dtype = dtype if dtype is not None else yp.config.default_dtype # Load image image = np.asarray( imageio.imread(test_images_directory + '/' + _image_dict[image_label]['filename'])) # Process color channel if yp.ndim(image) > 2: color_processing_mode = kwargs.get('color_channel', 'average') if color_processing_mode == 'average': image = np.mean(image, 2) elif color_processing_mode == None: pass else: assert type(color_processing_mode) in [np.int, int] image = image[:, :, int(color_processing_mode)] # Resize image if requested if shape is not None: # Warn if the measurement will be band-limited in the frequency domain if any([image.shape[i] < shape[i] for i in range(len(shape))]): print( 'WARNING : Raw image size (%d x %d) is smaller than requested size (%d x %d). Resolution will be lower than bandwidth of image.' % (image.shape[0], image.shape[1], shape[0], shape[1])) # Perform resize operation image = resize(image, shape, mode=kwargs.get('reshape_mode', 'constant'), preserve_range=True, anti_aliasing=kwargs.get('anti_aliasing', False)).astype(np.float) return yp.cast(image, dtype, backend)
def blur_vectors(self, dtype=None, backend=None, debug=False, use_phase_ramp=False, corrections={}): """ This function generates the object size, image size, and blur kernels from a libwallerlab dataset object. Args: dataset: An io.Dataset object dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported) Returns: object_size: The object size this dataset can recover image_size: The computed image size of the dataset blur_kernel_list: A dictionary of blur kernels lists, one key per color channel. """ # Assign dataset dataset = self # Get corrections from metadata if len(corrections) is 0 and 'blur_vector' in self.metadata.calibration: corrections = dataset.metadata.calibration['blur_vector'] # Get datatype and backends dtype = dtype if dtype is not None else yp.config.default_dtype backend = backend if backend is not None else yp.config.default_backend # Calculate effective pixel size if necessaey if dataset.metadata.system.eff_pixel_size_um is None: dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \ (dataset.metadata.objective.mag * dataset.metadata.system.mag) # Recover and store position and illumination list blur_vector_roi_list = [] position_list, illumination_list = [], [] frame_segment_map = [] for frame_index in range(dataset.shape[0]): frame_state = dataset.frame_state_list[frame_index] # Store which segment this measurement uses frame_segment_map.append(frame_state['position']['common']['linear_segment_index']) # Extract list of illumination values for each time point if 'illumination' in frame_state: illumination_list_frame = [] if type(frame_state['illumination']) is str: illum_state_list = self._frame_state_list[0]['illumination']['states'] else: illum_state_list = frame_state['illumination']['states'] for time_point in illum_state_list: illumination_list_time_point = [] for illumination in time_point: illumination_list_time_point.append( {'index': illumination['index'], 'value': illumination['value']}) illumination_list_frame.append(illumination_list_time_point) else: raise ValueError('Frame %d does not contain illumination information' % frame_index) # Extract list of positions for each time point if 'position' in frame_state: position_list_frame = [] for time_point in frame_state['position']['states']: position_list_time_point = [] for position in time_point: if 'units' in position['value']: if position['value']['units'] == 'mm': ps_um = dataset.metadata.system.eff_pixel_size_um position_list_time_point.append( [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um]) elif position['value']['units'] == 'um': position_list_time_point.append( [position['value']['y'] / ps_um, position['value']['x'] / ps_um]) elif position['value']['units'] == 'pixels': position_list_time_point.append([position['value']['y'], position['value']['x']]) else: raise ValueError('Invalid units %s for position in frame %d' % (position['value']['units'], frame_index)) else: # print('WARNING: Could not find posiiton units in metadata, assuming mm') ps_um = dataset.metadata.system.eff_pixel_size_um position_list_time_point.append( [1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um]) position_list_frame.append(position_list_time_point[0]) # Assuming single time point for now. # Define positions and position indicies used positions_used, position_indicies_used = [], [] for index, pos in enumerate(position_list_frame): for color in illumination_list_frame[index][0]['value']: if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]): position_indicies_used.append(index) positions_used.append(pos) # Generate ROI for this blur vector from htdeblur.blurkernel import getPositionListBoundingBox blur_vector_roi = getPositionListBoundingBox(positions_used) # Append to list blur_vector_roi_list.append(blur_vector_roi) # Crop illumination list to values within the support used illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)]) # Store corresponding positions position_list.append(positions_used) # Apply kernel scaling or compression if necessary if 'scale' in corrections: # We need to use phase-ramp based kernel generation if we modify the positions use_phase_ramp = True # Modify position list for index in range(len(position_list)): _positions = np.asarray(position_list[index]) for scale_correction in corrections['scale']: factor, axis = corrections['scale']['factor'], corrections['scale']['axis'] _positions[:, axis] = ((_positions[:, axis] - yp.min(_positions[:, axis])) * factor + yp.min(_positions[:, axis])) position_list[index] = _positions.tolist() # Synthesize blur vectors blur_vector_list = [] for frame_index in range(dataset.shape[0]): # Generate blur vectors if use_phase_ramp: from llops.operators import PhaseRamp kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape] offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32', dataset.backend) # Create phase ramp and calculate offset R = PhaseRamp(kernel_shape, dtype='complex32', backend=dataset.backend) # Generate blur vector blur_vector = yp.zeros(R.M, dtype='complex32', backend=dataset.backend) for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]): pos = yp.cast(pos, dtype=dataset.dtype, backend=dataset.backend) blur_vector += (R * (yp.cast(pos - offset, 'complex32'))) # Take inverse Fourier Transform blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0) if position_list[frame_index][0][-1] > position_list[frame_index][0][0]: blur_vector = yp.flip(blur_vector) else: blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]], dtype=dtype, backend=backend) # Normalize illuminaiton vectors blur_vector /= yp.scalar(yp.sum(blur_vector)) # Append to list blur_vector_list.append(blur_vector) # Return return blur_vector_list, blur_vector_roi_list
def demosaic(frame, order='grbg', bayer_coupling_matrix=None, debug=False, white_balance=False): # bayer_coupling_matrix = None # bgrg: cells very green # rggb: slight gteen tint """Demosaic a frame""" frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3), yp.getDatatype(frame), yp.getBackend(frame)) if bayer_coupling_matrix is not None: frame_vec = yp.zeros((4, int(yp.shape(frame)[0] * yp.shape(frame)[1] / 4)), yp.getDatatype(frame), yp.getBackend(frame)) # Cast bayer coupling matrix bayer_coupling_matrix = yp.cast(bayer_coupling_matrix, yp.getDatatype(frame), yp.getBackend(frame)) # Define frame vector for bayer_pattern_index in range(4): pixel_offsets = (0, 0) if bayer_pattern_index == 3: img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1]::2] elif bayer_pattern_index == 1: img_sub = frame[pixel_offsets[0]::2, pixel_offsets[1] + 1::2] elif bayer_pattern_index == 2: img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1]::2] elif bayer_pattern_index == 0: img_sub = frame[pixel_offsets[0] + 1::2, pixel_offsets[1] + 1::2] frame_vec[bayer_pattern_index, :] = yp.dcopy(yp.vec(img_sub)) if debug: print("Channel %d mean is %g" % (bayer_pattern_index, yp.scalar(yp.real(yp.sum(img_sub))))) # Perform demosaic using least squares result = yp.linalg.lstsq(bayer_coupling_matrix, frame_vec) result -= yp.amin(result) result /= yp.amax(result) for channel in range(3): values = result[channel] frame_out[:, :, channel] = yp.reshape(values, ((yp.shape(frame_out)[0], yp.shape(frame_out)[1]))) if white_balance: frame_out[:, :, channel] -= yp.amin(frame_out[:, :, channel]) frame_out[:, :, channel] /= yp.amax(frame_out[:, :, channel]) return frame_out else: frame_out = yp.zeros((int(yp.shape(frame)[0] / 2), int(yp.shape(frame)[1] / 2), 3), dtype=yp.getDatatype(frame), backend=yp.getBackend(frame)) # Get color order from order variable b_index = order.find('b') r_index = order.find('r') g1_index = order.find('g') # Get g2 from intersection of sets g2_index = set(list(range(4))).difference({b_index, r_index, g1_index}).pop() # +-----+-----+ # | 0 | 1 | # +-----+-----| # | 2 | 3 | # +-----+-----| if debug: import matplotlib.pyplot as plt plt.figure() plt.imshow(frame[:12, :12]) r_start = (int(r_index in [2, 3]), int(r_index in [1, 3])) g1_start = (int(g1_index in [2, 3]), int(g1_index in [1, 3])) g2_start = (int(g2_index in [2, 3]), int(g2_index in [1, 3])) b_start = (int(b_index in [2, 3]), int(b_index in [1, 3])) frame_out[:, :, 0] = frame[r_start[0]::2, r_start[1]::2] frame_out[:, :, 1] = (frame[g1_start[0]::2, g1_start[1]::2] + frame[g2_start[0]::2, g2_start[1]::2]) / 2.0 frame_out[:, :, 2] = frame[b_start[0]::2, b_start[1]::2] # normalize frame_out /= yp.max(frame_out) # Perform white balancing if desired if white_balance: clims = [] for channel in range(3): clims.append(yp.max(frame_out[:, :, channel])) frame_out[:, :, channel] /= yp.max(frame_out[:, :, channel]) # Return frame return frame_out