def estimate_flat_field(self): """ Estimates flat field correction image. """ # flat_field constant over time, so use first time idx. And use only first # slice if multiple are present time_idx = self.frames_metadata['time_idx'].unique()[0] for channel_idx in self.channels_ids: row_idx = aux_utils.get_row_idx( frames_metadata=self.frames_metadata, time_idx=time_idx, channel_idx=channel_idx, slice_idx=self.slice_ids[0], ) channel_metadata = self.frames_metadata[row_idx] summed_image = None # Average over all positions for idx, row in channel_metadata.iterrows(): file_path = os.path.join(self.input_dir, row['file_name']) im = read_image(file_path) if len(im.shape) == 3: im = np.mean(im, axis=2) if summed_image is None: summed_image = im.astype('float64') else: summed_image += im mean_image = summed_image / len(row_idx) # TODO (Jenny): it currently samples median values from a mean # images, not very statistically meaningful but easier than # computing median of image stack flatfield = self.get_flatfield(mean_image) fname = 'flat-field_channel-{}.npy'.format(channel_idx) cur_fname = os.path.join(self.flat_field_dir, fname) np.save(cur_fname, flatfield, allow_pickle=True, fix_imports=True)
def test_read_image(self): file_path = os.path.join( self.temp_path, self.frames_meta['file_name'][0], ) im = image_utils.read_image(file_path) np.testing.assert_array_equal(im, self.sph[..., 0])
def get_mask(self, cur_row, transpose=False): """Get mask, either from image or mask dir :param pd.Series/dict cur_row: row containing indices :param bool transpose: Changes image format from xyz to zxy :return np.array mask: Mask """ mask_idx = aux_utils.get_meta_idx( self.mask_meta, time_idx=cur_row['time_idx'], channel_idx=self.masks_dict['mask_channel'], slice_idx=cur_row['slice_idx'], pos_idx=cur_row['pos_idx'], ) mask_fname = self.mask_meta.loc[mask_idx, 'file_name'] mask = image_utils.read_image(os.path.join(self.mask_dir, mask_fname), ) # Need metrics mask to be cropped the same way as inference dataset mask = image_utils.crop2base(mask) if self.crop_shape is not None: mask = image_utils.center_crop_to_shape( mask, self.crop_shape, self.image_format, ) # moves z from last axis to first axis if transpose and len(mask.shape) > 2: mask = np.transpose(mask, [2, 0, 1]) return mask
def test_create_save_mask_otsu(self): """test create_save_mask otsu""" self.write_mask_data() for sl_idx in range(8): input_fnames = [ 'im_c001_z00{}_t000_p001.png'.format(sl_idx), 'im_c002_z00{}_t000_p001.png'.format(sl_idx) ] input_fnames = [ os.path.join(self.temp_path, fname) for fname in input_fnames ] cur_meta = mp_utils.create_save_mask(tuple(input_fnames), None, str_elem_radius=1, mask_dir=self.output_dir, mask_channel_idx=3, time_idx=self.time_ids, pos_idx=self.pos_ids, slice_idx=sl_idx, int2str_len=3, mask_type='otsu', mask_ext='.png') fname = aux_utils.get_im_name( time_idx=self.time_ids, channel_idx=3, slice_idx=sl_idx, pos_idx=self.pos_ids, ) exp_meta = { 'channel_idx': 3, 'slice_idx': sl_idx, 'time_idx': 0, 'pos_idx': 1, 'file_name': fname } nose.tools.assert_dict_equal(cur_meta, exp_meta) op_fname = os.path.join(self.output_dir, fname) nose.tools.assert_equal(os.path.exists(op_fname), True) mask_image = image_utils.read_image(op_fname) if mask_image.dtype != bool: mask_image = mask_image > 0 input_image = (self.sph_object[:, :, sl_idx], self.rect_object[:, :, sl_idx]) mask_stack = np.stack([ create_otsu_mask(input_image[0], str_elem_size=1), create_otsu_mask(input_image[1], str_elem_size=1) ]) mask_exp = np.any(mask_stack, axis=0) numpy.testing.assert_array_equal(mask_image, mask_exp)
def test_create_save_mask_border_map(self): """test create_save_mask border weight map""" self.write_mask_data() for sl_idx in range(1): input_fnames = ['im_c001_z00{}_t000_p001.png'.format(sl_idx)] input_fnames = [ os.path.join(self.temp_path, fname) for fname in input_fnames ] cur_meta = mp_utils.create_save_mask( tuple(input_fnames), None, str_elem_radius=1, mask_dir=self.output_dir, mask_channel_idx=2, time_idx=self.time_ids, pos_idx=self.pos_ids, slice_idx=sl_idx, int2str_len=3, mask_type='borders_weight_loss_map', mask_ext='.png') fname = aux_utils.get_im_name( time_idx=self.time_ids, channel_idx=2, slice_idx=sl_idx, pos_idx=self.pos_ids, ) exp_meta = { 'channel_idx': 2, 'slice_idx': sl_idx, 'time_idx': 0, 'pos_idx': 1, 'file_name': fname } nose.tools.assert_dict_equal(cur_meta, exp_meta) op_fname = os.path.join(self.output_dir, fname) nose.tools.assert_equal(os.path.exists(op_fname), True) weight_map = image_utils.read_image(op_fname) max_weight_map = np.max(weight_map) # weight map between 20, 16 and 44, 16 should be maximum # as there is more weight when two objects boundaries overlap y_coord = self.params[0][1] for x_coord in range(self.params[0][0] + self.radius, self.params[1][0] - self.radius): distance_near_intersection = weight_map[x_coord, y_coord] nose.tools.assert_equal(max_weight_map, distance_near_intersection)
def rescale_vol_and_save(time_idx, pos_idx, channel_idx, sl_start_idx, sl_end_idx, frames_metadata, output_fname, scale_factor, input_dir, ff_path): """Rescale volumes and save :param int time_idx: time point of input image :param int pos_idx: sample idx of input image :param int channel_idx: channel idx of input image :param int sl_start_idx: start slice idx for the vol to be saved :param int sl_end_idx: end slice idx for the vol to be saved :param pd.Dataframe frames_metadata: metadata for the input slices :param str output_fname: output_fname :param float/list scale_factor: scale factor for resizing :param str input_dir: input dir for 2D images :param str ff_path: path to flat field correction image """ input_stack = [] for sl_idx in range(sl_start_idx, sl_end_idx): meta_idx = aux_utils.get_meta_idx(frames_metadata, time_idx, channel_idx, sl_idx, pos_idx) cur_fname = frames_metadata.loc[meta_idx, 'file_name'] cur_img = image_utils.read_image(os.path.join(input_dir, cur_fname)) if ff_path is not None: ff_image = np.load(ff_path) cur_img = image_utils.apply_flat_field_correction( cur_img, flat_field_image=ff_image ) input_stack.append(cur_img) input_stack = np.stack(input_stack, axis=2) resc_vol = image_utils.rescale_nd_image(input_stack, scale_factor) np.save(output_fname, resc_vol, allow_pickle=True, fix_imports=True)
def resize_and_save(**kwargs): """ Resizing images and saving them :param kwargs: Keyword arguments: str file_path: Path to input image str write_path: Path to image to be written float scale_factor: Scale factor for resizing str ff_path: path to flat field correction image """ im = image_utils.read_image(kwargs['file_path']) if kwargs['ff_path'] is not None: ff_image = np.load(kwargs['ff_path']) im = image_utils.apply_flat_field_correction( im, flat_field_image=ff_image ) im_resized = image_utils.rescale_image( im=im, scale_factor=kwargs['scale_factor'], ) # Write image cv2.imwrite(kwargs['write_path'], im_resized)
def test_read_image_npy(self): im = image_utils.read_image(self.sph_fname) np.testing.assert_array_equal(im, self.sph)