def setUp(self): """ Set up a directory with some images to resample """ self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.output_dir = os.path.join(self.temp_path, 'out_dir') # Start frames meta file self.meta_name = 'frames_meta.csv' self.frames_meta = aux_utils.make_dataframe() # Write images self.time_idx = 5 self.slice_idx = 6 self.pos_idx = 7 self.im = 1500 * np.ones((30, 20), dtype=np.uint16) for c in range(4): for p in range(self.pos_idx, self.pos_idx + 2): im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=self.slice_idx, time_idx=self.time_idx, pos_idx=p, ) cv2.imwrite(os.path.join(self.temp_path, im_name), self.im + c * 100) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) # Write metadata self.frames_meta.to_csv( os.path.join(self.temp_path, self.meta_name), sep=',', )
def setUp(self): """ Set up a directory with some images to resample """ self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.mask_dir = os.path.join(self.temp_path, 'mask_dir') self.tempdir.makedir('mask_dir') self.input_dir = os.path.join(self.temp_path, 'input_dir') self.tempdir.makedir('input_dir') self.mask_channel = 1 self.slice_idx = 7 self.time_idx = 8 # Mask meta file self.csv_name = 'mask_image_matchup.csv' input_meta = aux_utils.make_dataframe() # Make input meta for c in range(4): for p in range(10): im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=self.slice_idx, time_idx=self.time_idx, pos_idx=p, ) input_meta = input_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) input_meta.to_csv( os.path.join(self.input_dir, 'frames_meta.csv'), sep=',', ) # Make mask meta mask_meta = pd.DataFrame() for p in range(10): im_name = aux_utils.get_im_name( channel_idx=self.mask_channel, slice_idx=self.slice_idx, time_idx=self.time_idx, pos_idx=p, ) # Indexing can be different mask_name = 'mask_{}.png'.format(p + 1) mask_meta = mask_meta.append( {'mask_name': mask_name, 'file_name': im_name}, ignore_index=True, ) mask_meta.to_csv( os.path.join(self.mask_dir, self.csv_name), sep=',', )
def resize_frames(self): """ Resize frames for given indices. """ assert isinstance(self.scale_factor, (float, int)), \ 'different scale factors provided for x and y' mp_args = [] resized_metadata = aux_utils.make_dataframe() # Loop through all the indices and resize images for slice_idx in self.slice_ids: for time_idx in self.time_ids: for pos_idx in self.pos_ids: for channel_idx in self.channel_ids: frame_idx = aux_utils.get_meta_idx( self.frames_metadata, time_idx, channel_idx, slice_idx, pos_idx, ) file_name = self.frames_metadata.loc[frame_idx, "file_name"] file_path = os.path.join(self.input_dir, file_name) write_path = os.path.join(self.resize_dir, file_name) ff_path = None if self.flat_field_dir is not None: ff_path = os.path.join( self.flat_field_dir, 'flat-field_channel-{}.npy'.format(channel_idx) ) kwargs = { 'file_path': file_path, 'write_path': write_path, 'scale_factor': self.scale_factor, 'ff_path': ff_path } mp_args.append(kwargs) resized_metadata = resized_metadata.append( self.frames_metadata.iloc[frame_idx], ignore_index=True, ) # Multiprocessing of kwargs mp_utils.mp_resize_save(mp_args, self.num_workers) resized_metadata = resized_metadata.sort_values(by=['file_name']) resized_metadata.to_csv( os.path.join(self.resize_dir, "frames_meta.csv"), sep=',', )
def setUp(self): """ Set up a dataframe for training table """ # Start frames meta file self.meta_name = 'frames_meta.csv' self.frames_meta = aux_utils.make_dataframe() self.time_ids = [3, 4, 5] self.pos_ids = [7, 8, 10, 12, 15] self.channel_ids = [0, 1, 2, 3] self.slice_ids = [0, 1, 2, 3, 4, 5] # Tiles will typically be split into image subsections # but it doesn't matter for testing for c in self.channel_ids: for p in self.pos_ids: for z in self.slice_ids: for t in self.time_ids: im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=z, time_idx=t, pos_idx=p, ) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) self.tiles_meta = aux_utils.sort_meta_by_channel(self.frames_meta) self.input_channels = [0, 2] self.target_channels = [3] self.mask_channels = [1] self.split_ratio = { 'train': 0.6, 'val': 0.2, 'test': 0.2, } # Instantiate class self.table_inst = training_table.BaseTrainingTable( df_metadata=self.tiles_meta, input_channels=self.input_channels, target_channels=self.target_channels, split_by_column='pos_idx', split_ratio=self.split_ratio, mask_channels=[1], random_seed=42, )
def test_adjust_slice_indices(self): # First create new frames meta with more slices temp_meta = aux_utils.make_dataframe() for s in range(10): im_name = aux_utils.get_im_name( time_idx=2, channel_idx=4, slice_idx=s, pos_idx=6, ) temp_meta = temp_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) self.data_inst.iteration_meta = temp_meta self.data_inst.depth = 5 # This should remove first and last two slices self.data_inst.adjust_slice_indices() # Original slice ids are 0-9 so after removing margins should be 2-7 self.assertListEqual( self.data_inst.iteration_meta.slice_idx.unique().tolist(), [2, 3, 4, 5, 6, 7])
def test_generate_masks_nonuni(self): """Test generate_masks with non-uniform structure""" rec = self.rec_object[:, :, 3:6] channel_ids = 0 time_ids = 0 pos_ids = [1, 2] frames_meta = aux_utils.make_dataframe() for z in range(self.sph_object.shape[2]): im_name = aux_utils.get_im_name( time_idx=time_ids, channel_idx=channel_ids, slice_idx=z, pos_idx=pos_ids[0], ) sk_im_io.imsave(os.path.join(self.temp_path, im_name), self.sph_object[:, :, z].astype('uint8')) frames_meta = frames_meta.append(aux_utils.parse_idx_from_name( im_name, aux_utils.DF_NAMES), ignore_index=True) for z in range(rec.shape[2]): im_name = aux_utils.get_im_name( time_idx=time_ids, channel_idx=channel_ids, slice_idx=z, pos_idx=pos_ids[1], ) sk_im_io.imsave(os.path.join(self.temp_path, im_name), rec[:, :, z].astype('uint8')) frames_meta = frames_meta.append(aux_utils.parse_idx_from_name( im_name, aux_utils.DF_NAMES), ignore_index=True) # Write metadata frames_meta.to_csv(os.path.join(self.temp_path, self.meta_fname), sep=',') self.output_dir = os.path.join(self.temp_path, 'mask_dir') mask_gen_inst = MaskProcessor(input_dir=self.temp_path, output_dir=self.output_dir, channel_ids=channel_ids, uniform_struct=False) exp_nested_id_dict = { 0: { 0: { 1: [0, 1, 2, 3, 4, 5, 6, 7], 2: [0, 1, 2] } } } numpy.testing.assert_array_equal(mask_gen_inst.nested_id_dict[0][0][1], exp_nested_id_dict[0][0][1]) numpy.testing.assert_array_equal(mask_gen_inst.nested_id_dict[0][0][2], exp_nested_id_dict[0][0][2]) mask_gen_inst.generate_masks(str_elem_radius=1) frames_meta = pd.read_csv( os.path.join(mask_gen_inst.get_mask_dir(), 'frames_meta.csv'), index_col=0, ) # pos1: 8 slices, pos2: 3 slices exp_len = 8 + 3 nose.tools.assert_equal(len(frames_meta), exp_len) mask_fnames = frames_meta['file_name'].tolist() exp_mask_fnames = [ 'im_c001_z000_t000_p001.npy', 'im_c001_z000_t000_p002.npy', 'im_c001_z001_t000_p001.npy', 'im_c001_z001_t000_p002.npy', 'im_c001_z002_t000_p001.npy', 'im_c001_z002_t000_p002.npy', 'im_c001_z003_t000_p001.npy', 'im_c001_z004_t000_p001.npy', 'im_c001_z005_t000_p001.npy', 'im_c001_z006_t000_p001.npy', 'im_c001_z007_t000_p001.npy' ] nose.tools.assert_list_equal(mask_fnames, exp_mask_fnames)
def setUp(self): """Set up a directory for mask generation, no flatfield""" self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.meta_fname = 'frames_meta.csv' frames_meta = aux_utils.make_dataframe() # create an image with bimodal hist x = np.linspace(-4, 4, 32) y = x.copy() z = np.linspace(-3, 3, 8) xx, yy, zz = np.meshgrid(x, y, z) sph = (xx**2 + yy**2 + zz**2) fg = (sph <= 8) * (8 - sph) fg[fg > 1e-8] = (fg[fg > 1e-8] / np.max(fg)) * 127 + 128 fg = np.around(fg).astype('uint8') bg = np.around((sph > 8) * sph).astype('uint8') object1 = fg + bg # create an image with a rect rec = np.zeros(sph.shape) rec[3:30, 14:18, 3:6] = 120 rec[14:18, 3:30, 3:6] = 120 self.sph_object = object1 self.rec_object = rec self.channel_ids = [1, 2] self.time_ids = 0 self.pos_ids = 1 self.int2str_len = 3 for z in range(sph.shape[2]): im_name = aux_utils.get_im_name( time_idx=self.time_ids, channel_idx=1, slice_idx=z, pos_idx=self.pos_ids, ) with warnings.catch_warnings(): warnings.simplefilter("ignore") sk_im_io.imsave( os.path.join(self.temp_path, im_name), object1[:, :, z].astype('uint8'), ) frames_meta = frames_meta.append(aux_utils.parse_idx_from_name( im_name, aux_utils.DF_NAMES), ignore_index=True) for z in range(rec.shape[2]): im_name = aux_utils.get_im_name( time_idx=self.time_ids, channel_idx=2, slice_idx=z, pos_idx=self.pos_ids, ) with warnings.catch_warnings(): warnings.simplefilter("ignore") sk_im_io.imsave( os.path.join(self.temp_path, im_name), rec[:, :, z].astype('uint8'), ) frames_meta = frames_meta.append(aux_utils.parse_idx_from_name( im_name, aux_utils.DF_NAMES), ignore_index=True) # Write metadata frames_meta.to_csv(os.path.join(self.temp_path, self.meta_fname), sep=',') self.output_dir = os.path.join(self.temp_path, 'mask_dir') self.mask_gen_inst = MaskProcessor(input_dir=self.temp_path, output_dir=self.output_dir, channel_ids=self.channel_ids)
def test_resize_volumes(self): """Test resizing volumes""" # set up a volume with 5 slices, 2 channels slice_ids = [0, 1, 2, 3, 4] channel_ids = [2, 3] frames_meta = aux_utils.make_dataframe() exp_meta_dict = [] for c in channel_ids: for s in slice_ids: im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=s, time_idx=self.time_idx, pos_idx=self.pos_idx, ) cv2.imwrite(os.path.join(self.temp_path, im_name), self.im + c * 100) frames_meta = frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) op_fname = 'im_c00{}_z000_t005_p007_3.3-0.8-1.0.npy'.format(c) exp_meta_dict.append({'time_idx': self.time_idx, 'pos_idx': self.pos_idx, 'channel_idx': c, 'slice_idx': 0, 'file_name': op_fname}) # Write metadata frames_meta.to_csv( os.path.join(self.temp_path, self.meta_name), sep=',', ) scale_factor = [3.3, 0.8, 1.0] resize_inst = resize_images.ImageResizer( input_dir=self.temp_path, output_dir=self.output_dir, scale_factor=scale_factor, ) # save all slices in one volume resize_inst.resize_volumes() saved_meta = pd.read_csv(os.path.join(self.output_dir, 'resized_images', 'frames_meta.csv')) del saved_meta['Unnamed: 0'] exp_meta_df = pd.DataFrame.from_dict(exp_meta_dict) pd.testing.assert_frame_equal(saved_meta, exp_meta_df) # num_slices_subvolume = 3, save vol chunks exp_meta_dict = [] for c in channel_ids: for s in [0, 2]: op_fname = 'im_c00{}_z00{}_t005_p007_3.3-0.8-1.0.npy'.format(c, s) exp_meta_dict.append({'time_idx': self.time_idx, 'pos_idx': self.pos_idx, 'channel_idx': c, 'slice_idx': s, 'file_name': op_fname}) resize_inst.resize_volumes(num_slices_subvolume=3) saved_meta = pd.read_csv(os.path.join(self.output_dir, 'resized_images', 'frames_meta.csv')) del saved_meta['Unnamed: 0'] exp_meta_df = pd.DataFrame.from_dict(exp_meta_dict) pd.testing.assert_frame_equal(saved_meta, exp_meta_df)
def setUp(self): """Set up a dir for tiling with flatfield""" self.tempdir = TempDirectory() self.temp_path = self.tempdir.path # Start frames meta file self.meta_name = 'frames_meta.csv' frames_meta = aux_utils.make_dataframe() self.im = 127 * np.ones((15, 11), dtype=np.uint8) self.im2 = 234 * np.ones((15, 11), dtype=np.uint8) self.int2str_len = 3 self.channel_idx = [1, 2] self.pos_idx1 = 7 self.pos_idx2 = 8 # write pos1 with 3 time points and 5 slices for z in range(5): for t in range(3): for c in self.channel_idx: im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=z, time_idx=t, pos_idx=self.pos_idx1, ) with warnings.catch_warnings(): warnings.simplefilter("ignore") sk_im_io.imsave( os.path.join(self.temp_path, im_name), self.im, ) frames_meta = frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) # write pos2 with 2 time points and 3 slices for z in range(3): for t in range(2): for c in self.channel_idx: im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=z, time_idx=t, pos_idx=self.pos_idx2, ) with warnings.catch_warnings(): warnings.simplefilter("ignore") sk_im_io.imsave( os.path.join(self.temp_path, im_name), self.im, ) frames_meta = frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) # Write metadata frames_meta.to_csv(os.path.join(self.temp_path, self.meta_name), sep=',',) # Instantiate tiler class self.output_dir = os.path.join(self.temp_path, 'tile_dir') self.tile_inst = tile_images.ImageTilerNonUniform( input_dir=self.temp_path, output_dir=self.output_dir, tile_size=[5, 5], step_size=[4, 4], depths=3, channel_ids=[1, 2], normalize_channels=[False, True] )
def test_make_dataframe(): test_meta = aux_utils.make_dataframe(3) nose.tools.assert_tuple_equal(test_meta.shape, (3, 6)) nose.tools.assert_list_equal(list(test_meta), aux_utils.DF_NAMES)
def setUp(self, mock_model): """ Set up a directory with images """ mock_model.return_value = 'dummy_model' self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.tempdir.makedir('image_dir') self.tempdir.makedir('mask_dir') self.tempdir.makedir('model_dir') self.image_dir = os.path.join(self.temp_path, 'image_dir') self.mask_dir = os.path.join(self.temp_path, 'mask_dir') self.model_dir = os.path.join(self.temp_path, 'model_dir') # Create a temp image dir self.im = np.zeros((10, 16), dtype=np.uint8) self.frames_meta = aux_utils.make_dataframe() self.time_idx = 2 for p in range(5): for c in range(3): for z in range(6): im_name = aux_utils.get_im_name( time_idx=self.time_idx, channel_idx=c, slice_idx=z, pos_idx=p, ) cv2.imwrite(os.path.join(self.image_dir, im_name), self.im + c * 10) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) # Write frames meta to image dir too self.frames_meta.to_csv(os.path.join(self.image_dir, 'frames_meta.csv')) # Save masks and mask meta self.mask_meta = aux_utils.make_dataframe() self.mask_channel = 50 for p in range(5): for z in range(6): im_name = aux_utils.get_im_name( time_idx=self.time_idx, channel_idx=self.mask_channel, slice_idx=z, pos_idx=p, ) cv2.imwrite(os.path.join(self.mask_dir, im_name), self.im + 1) self.mask_meta = self.mask_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) # Write frames meta to mask dir too self.mask_meta.to_csv(os.path.join(self.mask_dir, 'frames_meta.csv')) # Setup model dir split_samples = { "train": [0, 1], "val": [2], "test": [3, 4], } aux_utils.write_json( split_samples, os.path.join(self.model_dir, 'split_samples.json'), ) # Make configs with fields necessary for 2.5D segmentation inference self.train_config = { 'network': { 'class': 'UNetStackTo2D', 'data_format': 'channels_first', 'depth': 5, 'width': 10, 'height': 10 }, 'dataset': { 'split_by_column': 'pos_idx', 'input_channels': [1], 'target_channels': [self.mask_channel], 'model_task': 'segmentation', }, } self.inference_config = { 'model_dir': self.model_dir, 'model_fname': 'dummy_weights.hdf5', 'image_dir': self.image_dir, 'data_split': 'test', 'images': { 'image_format': 'zyx', 'image_ext': '.png', }, 'metrics': { 'metrics': ['dice'], 'metrics_orientations': ['xy'], }, 'masks': { 'mask_dir': self.mask_dir, 'mask_type': 'target', 'mask_channel': 50, } } # Instantiate class self.infer_inst = image_inference.ImagePredictor( train_config=self.train_config, inference_config=self.inference_config, )
def validate_mask_meta(mask_dir, input_dir, csv_name=None, mask_channel=None): """ If user provides existing masks, the mask directory should also contain a csv file (not named frames_meta.csv which is reserved for output) with two column names: mask_name and file_name. Each row should describe the mask name and the corresponding file name. Each file_name should exist in input_dir and belong to the same channel. This function checks that all file names exist in input_dir and writes a frames_meta csv containing mask names with indices corresponding to the matched file_name. It also assigns a mask channel number for future preprocessing steps like tiling. :param str mask_dir: Mask directory :param str input_dir: Input image directory, to match masks with images :param int/None mask_channel: Channel idx assigned to masks :return int mask_channel: New channel index for masks for writing tiles :raises IOError: If no csv file is present in mask_dir :raises IOError: If more than one csv file exists in mask_dir and no csv_name is provided to resolve ambiguity :raises AssertionError: If csv doesn't consist of two columns named 'mask_name' and 'file_name' :raises IndexError: If unable to match file_name in mask_dir csv with file_name in input_dir for any given mask row """ input_meta = aux_utils.read_meta(input_dir) if mask_channel is None: mask_channel = int( input_meta['channel_idx'].max() + 1 ) # Make sure there is a csv file file if csv_name is not None: csv_name = glob.glob(os.path.join(mask_dir, csv_name)) if len(csv_name) == 1: # Use the one existing csv name csv_name = csv_name[0] else: csv_name = None # No csv name given, search for it if csv_name is None: csv_name = glob.glob(os.path.join(mask_dir, '*.csv')) if len(csv_name) == 0: raise IOError("No csv file present in mask dir") else: # See if frames_meta is already present, if so, move on has_meta = next((s for s in csv_name if 'frames_meta.csv' in s), None) if isinstance(has_meta, str): # Return existing mask channel from frames_meta frames_meta = pd.read_csv( os.path.join(mask_dir, 'frames_meta.csv'), ) mask_channel = np.unique(frames_meta['channel_idx']) if isinstance(mask_channel, list): assert len(mask_channel) == 1,\ "Found more than one mask channel: {}".format(mask_channel) mask_channel = mask_channel[0] if type(mask_channel).__module__ == 'numpy': mask_channel = mask_channel.item() return mask_channel elif len(csv_name) == 1: # Use the one existing csv name csv_name = csv_name[0] else: # More than one csv file in dir raise IOError("More than one csv file present in mask dir", "use csv_name to specify which one to use") # Read csv with masks and corresponding input file names mask_meta = aux_utils.read_meta(input_dir=mask_dir, meta_fname=csv_name) assert len(set(mask_meta).difference({'file_name', 'mask_name'})) == 0,\ "mask csv should have columns mask_name and file_name " +\ "(corresponding to the file_name in input_dir)" # Check that file_name for each mask_name matches files in input_dir file_names = input_meta['file_name'] # Create dataframe that will store all indices for masks out_meta = aux_utils.make_dataframe(nbr_rows=mask_meta.shape[0]) for i, row in mask_meta.iterrows(): try: file_loc = file_names[file_names == row.file_name].index[0] except IndexError as e: msg = "Can't find image file name match for {}, error {}".format( row.file_name, e) raise IndexError(msg) # Fill dataframe with row indices from matched image in input dir out_meta.iloc[i] = input_meta.iloc[file_loc] # Write back the mask name out_meta.iloc[i]['file_name'] = row.mask_name assert len(out_meta.channel_idx.unique()) == 1,\ "Masks should match one input channel only" assert mask_channel not in set(input_meta.channel_idx.unique()),\ "Mask channel {} already exists in image dir".format(mask_channel) # Replace channel_idx new mask channel idx out_meta['channel_idx'] = mask_channel # Write mask metadata with indices that match input images meta_filename = os.path.join(mask_dir, 'frames_meta.csv') out_meta.to_csv(meta_filename, sep=",") return mask_channel
def setUp(self): """ Set up a directory with some images to generate frames_meta.csv for """ self.tempdir = TempDirectory() self.temp_dir = self.tempdir.path self.model_dir = os.path.join(self.temp_dir, 'model_dir') self.pred_dir = os.path.join(self.model_dir, 'predictions') self.image_dir = os.path.join(self.temp_dir, 'image_dir') self.tempdir.makedir(self.model_dir) self.tempdir.makedir(self.pred_dir) self.tempdir.makedir(self.image_dir) # Write images self.time_idx = 5 self.pos_idx = 7 self.im = 1500 * np.ones((30, 20), dtype=np.uint16) im_add = np.zeros((30, 20), dtype=np.uint16) im_add[15:, :] = 10 self.ext = '.tif' # Start frames meta file self.meta_name = 'frames_meta.csv' self.frames_meta = aux_utils.make_dataframe() for c in range(3): for z in range(5, 10): im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=z, time_idx=self.time_idx, pos_idx=self.pos_idx, ext=self.ext, ) cv2.imwrite(os.path.join(self.image_dir, im_name), self.im) if c == 2: norm_im = normalize.zscore(self.im + im_add).astype(np.float32) cv2.imwrite( os.path.join(self.pred_dir, im_name), norm_im, ) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) # Write metadata self.frames_meta.to_csv( os.path.join(self.image_dir, self.meta_name), sep=',', ) # Write as test metadata in model dir too self.frames_meta.to_csv( os.path.join(self.model_dir, 'test_metadata.csv'), sep=',', ) # Write split samples split_idx_fname = os.path.join(self.model_dir, 'split_samples.json') split_samples = {'test': [5, 6, 7, 8, 9]} aux_utils.write_json(split_samples, split_idx_fname) # Write config in model dir config = { 'dataset': { 'input_channels': [0, 1], 'target_channels': [2], 'split_by_column': 'slice_idx' }, 'network': {} } config_name = os.path.join(self.model_dir, 'config.yml') with open(config_name, 'w') as outfile: yaml.dump(config, outfile, default_flow_style=False)
def setUp(self): """ Set up a directory with some images to resample """ self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.image_dir = self.temp_path self.output_dir = os.path.join(self.temp_path, 'out_dir') self.tempdir.makedir(self.output_dir) # Start frames meta file self.meta_name = 'frames_meta.csv' self.frames_meta = aux_utils.make_dataframe() # Write images self.time_idx = 0 self.pos_ids = [7, 8, 10] self.channel_ids = [0, 1, 2, 3] self.slice_ids = [0, 1, 2, 3, 4, 5] self.im = 1500 * np.ones((30, 20), dtype=np.uint16) self.im[10:20, 5:15] = 3000 for c in self.channel_ids: for p in self.pos_ids: for z in self.slice_ids: im_name = aux_utils.get_im_name( channel_idx=c, slice_idx=z, time_idx=self.time_idx, pos_idx=p, ) cv2.imwrite( os.path.join(self.image_dir, im_name), self.im + c * 100, ) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) # Write metadata self.frames_meta.to_csv( os.path.join(self.image_dir, self.meta_name), sep=',', ) # Make input masks self.input_mask_channel = 111 self.input_mask_dir = os.path.join(self.temp_path, 'input_mask_dir') self.tempdir.makedir(self.input_mask_dir) # Must have at least two foreground classes in mask for weight map to work mask = np.zeros((30, 20), dtype=np.uint16) mask[5:10, 5:15] = 1 mask[20:25, 5:10] = 2 mask_meta = aux_utils.make_dataframe() for p in self.pos_ids: for z in self.slice_ids: im_name = aux_utils.get_im_name( channel_idx=self.input_mask_channel, slice_idx=z, time_idx=self.time_idx, pos_idx=p, ) cv2.imwrite( os.path.join(self.input_mask_dir, im_name), mask, ) mask_meta = mask_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) mask_meta.to_csv( os.path.join(self.input_mask_dir, self.meta_name), sep=',', ) # Create preprocessing config self.pp_config = { 'output_dir': self.output_dir, 'input_dir': self.image_dir, 'channel_ids': [0, 1, 3], 'num_workers': 4, 'flat_field': {'estimate': True, 'block_size': 2, 'correct': True}, 'masks': {'channels': [3], 'str_elem_radius': 3, 'normalize_im': False}, 'tile': {'tile_size': [10, 10], 'step_size': [10, 10], 'depths': [1, 1, 1], 'mask_depth': 1, 'image_format': 'zyx', 'normalize_channels': [True, True, True] }, } # Create base config, generated party from pp_config in script self.base_config = { 'input_dir': self.image_dir, 'output_dir': self.output_dir, 'slice_ids': -1, 'time_ids': -1, 'pos_ids': -1, 'channel_ids': self.pp_config['channel_ids'], 'uniform_struct': True, 'int2strlen': 3, 'num_workers': 4, 'normalize_channels': [True, True, True] }
def setUp(self): """Set up a dir for tiling with flatfield""" self.tempdir = TempDirectory() self.temp_path = self.tempdir.path # Start frames meta file self.meta_name = 'frames_meta.csv' frames_meta = aux_utils.make_dataframe() # Write images self.im = 127 * np.ones((15, 11), dtype=np.uint8) self.im2 = 234 * np.ones((15, 11), dtype=np.uint8) self.channel_idx = 1 self.time_idx = 5 self.pos_idx1 = 7 self.pos_idx2 = 8 self.int2str_len = 3 # Write test images with 4 z and 2 pos idx for z in range(15, 20): im_name = aux_utils.get_im_name( channel_idx=self.channel_idx, slice_idx=z, time_idx=self.time_idx, pos_idx=self.pos_idx1, ) cv2.imwrite( os.path.join(self.temp_path, im_name), self.im, ) frames_meta = frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) for z in range(15, 20): im_name = aux_utils.get_im_name( channel_idx=self.channel_idx, slice_idx=z, time_idx=self.time_idx, pos_idx=self.pos_idx2, ) cv2.imwrite( os.path.join(self.temp_path, im_name), self.im2, ) frames_meta = frames_meta.append( aux_utils.parse_idx_from_name(im_name), ignore_index=True, ) # Write metadata frames_meta.to_csv( os.path.join(self.temp_path, self.meta_name), sep=',', ) # Add flatfield self.flat_field_dir = os.path.join(self.temp_path, 'ff_dir') self.tempdir.makedir('ff_dir') self.ff_im = 4. * np.ones((15, 11)) self.ff_name = os.path.join( self.flat_field_dir, 'flat-field_channel-1.npy', ) np.save(self.ff_name, self.ff_im, allow_pickle=True, fix_imports=True) # Instantiate tiler class self.output_dir = os.path.join(self.temp_path, 'tile_dir') self.tile_inst = tile_images.ImageTilerUniform( input_dir=self.temp_path, output_dir=self.output_dir, tile_size=[5, 5], step_size=[4, 4], depths=3, channel_ids=[1], normalize_channels=[True], flat_field_dir=self.flat_field_dir, ) exp_fnames = [ 'im_c001_z015_t005_p007.png', 'im_c001_z016_t005_p007.png', 'im_c001_z017_t005_p007.png' ] self.exp_fnames = [ os.path.join(self.temp_path, fname) for fname in exp_fnames ] self.exp_tile_indices = [ [0, 5, 0, 5], [0, 5, 4, 9], [0, 5, 6, 11], [10, 15, 0, 5], [10, 15, 4, 9], [10, 15, 6, 11], [4, 9, 0, 5], [4, 9, 4, 9], [4, 9, 6, 11], [8, 13, 0, 5], [8, 13, 4, 9], [8, 13, 6, 11], ] # create a mask mask_dir = os.path.join(self.temp_path, 'mask_dir') os.makedirs(mask_dir, exist_ok=True) mask_images = np.zeros((15, 11, 5), dtype='bool') mask_images[4:12, 4:9, 2:4] = 1 # write mask images and add meta to frames_meta self.mask_channel = 3 mask_meta = [] for z in range(5): cur_im = mask_images[:, :, z] im_name = aux_utils.get_im_name( channel_idx=3, slice_idx=z + 15, time_idx=self.time_idx, pos_idx=self.pos_idx1, ext='.npy', ) np.save(os.path.join(mask_dir, im_name), cur_im) cur_meta = { 'channel_idx': 3, 'slice_idx': z + 15, 'time_idx': self.time_idx, 'pos_idx': self.pos_idx1, 'file_name': im_name } mask_meta.append(cur_meta) mask_meta_df = pd.DataFrame.from_dict(mask_meta) mask_meta_df.to_csv(os.path.join(mask_dir, 'frames_meta.csv'), sep=',') self.mask_dir = mask_dir exp_tile_indices = [[0, 5, 0, 5], [0, 5, 4, 9], [0, 5, 6, 11], [10, 15, 0, 5], [10, 15, 4, 9], [10, 15, 6, 11], [4, 9, 0, 5], [4, 9, 4, 9], [4, 9, 6, 11], [8, 13, 0, 5], [8, 13, 4, 9], [8, 13, 6, 11]] self.exp_tile_indices = exp_tile_indices
def setUp(self, mock_model): """ Set up a directory with 3D images """ mock_model.return_value = 'dummy_model' self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.tempdir.makedir('image_dir') self.tempdir.makedir('mask_dir') self.tempdir.makedir('model_dir') self.image_dir = os.path.join(self.temp_path, 'image_dir') self.mask_dir = os.path.join(self.temp_path, 'mask_dir') self.model_dir = os.path.join(self.temp_path, 'model_dir') # Create a temp image dir self.im = np.zeros((10, 10, 8), dtype=np.uint8) self.frames_meta = aux_utils.make_dataframe() self.time_idx = 2 self.slice_idx = 0 for p in range(5): for c in range(3): im_name = aux_utils.get_im_name( time_idx=self.time_idx, channel_idx=c, slice_idx=self.slice_idx, pos_idx=p, ext='.npy', ) np.save(os.path.join(self.image_dir, im_name), self.im + c * 10, allow_pickle=True, fix_imports=True) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) # Write frames meta to image dir too self.frames_meta.to_csv(os.path.join(self.image_dir, 'frames_meta.csv')) # Save masks and mask meta self.mask_meta = aux_utils.make_dataframe() self.mask_channel = 50 # Mask half the image mask = np.zeros_like(self.im) mask[:5, ...] = 1 for p in range(5): im_name = aux_utils.get_im_name( time_idx=self.time_idx, channel_idx=self.mask_channel, slice_idx=self.slice_idx, pos_idx=p, ext='.npy', ) np.save(os.path.join(self.mask_dir, im_name), mask) self.mask_meta = self.mask_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) # Write frames meta to mask dir too self.mask_meta.to_csv(os.path.join(self.mask_dir, 'frames_meta.csv')) # Setup model dir split_samples = { "train": [0, 1], "val": [2], "test": [3, 4], } aux_utils.write_json( split_samples, os.path.join(self.model_dir, 'split_samples.json'), ) # Make configs with fields necessary for 2.5D segmentation inference self.train_config = { 'network': { 'class': 'UNet3D', 'data_format': 'channels_first', 'num_filters_per_block': [8, 16], 'depth': 5, 'width': 5, 'height': 5 }, 'dataset': { 'split_by_column': 'pos_idx', 'input_channels': [1], 'target_channels': [2], 'model_task': 'regression', }, } self.inference_config = { 'model_dir': self.model_dir, 'model_fname': 'dummy_weights.hdf5', 'image_dir': self.image_dir, 'data_split': 'test', 'images': { 'image_format': 'zyx', 'image_ext': '.png', }, 'metrics': { 'metrics': ['mse'], 'metrics_orientations': ['xyz'], }, 'masks': { 'mask_dir': self.mask_dir, 'mask_type': 'metrics', 'mask_channel': 50, }, 'inference_3d': { 'tile_shape': [5, 5, 5], 'num_overlap': [1, 1, 1], 'overlap_operation': 'mean', }, } # Instantiate class self.infer_inst = image_inference.ImagePredictor( train_config=self.train_config, inference_config=self.inference_config, )
def setUp(self): """ Set up a directory with images """ self.tempdir = TempDirectory() self.temp_path = self.tempdir.path self.tempdir.makedir('image_dir') self.tempdir.makedir('model_dir') self.tempdir.makedir('mask_dir') self.image_dir = os.path.join(self.temp_path, 'image_dir') self.model_dir = os.path.join(self.temp_path, 'model_dir') self.mask_dir = os.path.join(self.temp_path, 'mask_dir') # Create a temp image dir im = np.zeros((10, 16), dtype=np.uint8) self.frames_meta = aux_utils.make_dataframe() self.time_idx = 2 for p in range(5): for z in range(4): for c in range(3): im_name = aux_utils.get_im_name( time_idx=self.time_idx, channel_idx=c, slice_idx=z, pos_idx=p, ) cv2.imwrite(os.path.join(self.image_dir, im_name), im + c * 10) self.frames_meta = self.frames_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) # Write frames meta to image dir too self.frames_meta.to_csv(os.path.join(self.image_dir, 'frames_meta.csv')) # Save masks and mask meta self.mask_meta = aux_utils.make_dataframe() self.mask_channel = 50 for p in range(5): for z in range(4): im_name = aux_utils.get_im_name( time_idx=2, channel_idx=self.mask_channel, slice_idx=z, pos_idx=p, ) cv2.imwrite(os.path.join(self.mask_dir, im_name), im + 1) self.mask_meta = self.mask_meta.append( aux_utils.parse_idx_from_name(im_name, aux_utils.DF_NAMES), ignore_index=True, ) # Write frames meta to image dir too self.mask_meta.to_csv(os.path.join(self.mask_dir, 'frames_meta.csv')) # Select inference split of dataset self.split_col_ids = ('pos_idx', [1, 3]) # Make configs with fields necessary for inference dataset dataset_config = { 'input_channels': [2], 'target_channels': [self.mask_channel], 'model_task': 'segmentation', } self.network_config = { 'class': 'UNetStackTo2D', 'depth': 3, 'data_format': 'channels_first', } # Instantiate class self.data_inst = inference_dataset.InferenceDataSet( image_dir=self.image_dir, dataset_config=dataset_config, network_config=self.network_config, split_col_ids=self.split_col_ids, mask_dir=self.mask_dir, )
import glob import inspect import json import logging import nose.tools import os from testfixtures import TempDirectory import micro_dl.utils.aux_utils as aux_utils # Create test metadata table meta_df = aux_utils.make_dataframe() channel_idx = 5 time_idx = 6 for s in range(3): for p in range(4): im_temp = aux_utils.get_im_name( channel_idx=channel_idx, slice_idx=s, time_idx=time_idx, pos_idx=p, ) meta_df = meta_df.append( aux_utils.parse_idx_from_name(im_temp), ignore_index=True, ) def test_import_object(): module_name = 'networks' class_name = 'InterpUpSampling2D'