def test_bad_mask(self): # test bad transform with self.assertRaises(ValueError): mask = np.random.randint(3, size=(5, 30, 30, 1)) image_generators._transform_masks(mask, transform='unknown') # test bad channel axis 2D with self.assertRaises(ValueError): mask = np.random.randint(3, size=(5, 30, 30, 2)) image_generators._transform_masks(mask, transform=None) # test bad channel axis 3D with self.assertRaises(ValueError): mask = np.random.randint(3, size=(5, 10, 30, 30, 2)) image_generators._transform_masks(mask, transform=None) # test ndim < 4 with self.assertRaises(ValueError): mask = np.random.randint(3, size=(5, 30, 1)) image_generators._transform_masks(mask, transform=None) # test ndim > 5 with self.assertRaises(ValueError): mask = np.random.randint(3, size=(5, 10, 30, 30, 10, 1)) image_generators._transform_masks(mask, transform=None)
def __init__(self, train_dict, image_data_generator, batch_size=1, skip=None, shuffle=False, transform=None, transform_kwargs={}, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): X, y = train_dict['X'], train_dict['y'] if X.shape[0] != y.shape[0]: raise ValueError('Training batches and labels should have the same' 'length. Found X.shape: {} y.shape: {}'.format( X.shape, y.shape)) self.x = np.asarray(X, dtype=K.floatx()) if self.x.ndim != 4: raise ValueError('Input data in `ImageFullyConvIterator` ' 'should have rank 4. You passed an array ' 'with shape', self.x.shape) self.y = _transform_masks(y, transform, data_format=data_format, **transform_kwargs) self.channel_axis = 3 if data_format == 'channels_last' else 1 self.skip = skip self.image_data_generator = image_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format super(ImageFullyConvIterator, self).__init__( self.x.shape[0], batch_size, shuffle, seed)
def __init__(self, train_dict, image_data_generator, batch_size=32, shuffle=False, window_size=(30, 30), transform=None, transform_kwargs={}, balance_classes=False, max_class_samples=None, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): X, y = train_dict['X'], train_dict['y'] if y is not None and X.shape[0] != y.shape[0]: raise ValueError('Training batches and labels should have the same' 'length. Found X.shape: {} y.shape: {}'.format( X.shape, y.shape)) self.x = np.asarray(X, dtype=K.floatx()) if self.x.ndim != 4: raise ValueError( 'Input data in `ImageSampleArrayIterator` ' 'should have rank 4. You passed an array ' 'with shape', self.x.shape) window_size = conv_utils.normalize_tuple(window_size, 2, 'window_size') y = _transform_masks(y, transform, data_format=data_format, **transform_kwargs) pixels_x, pixels_y, batch, y = sample_label_matrix( y=y, padding='valid', window_size=window_size, max_training_examples=None, data_format=data_format) self.y = y self.channel_axis = 3 if data_format == 'channels_last' else 1 self.batch = batch self.pixels_x = pixels_x self.pixels_y = pixels_y self.win_x = window_size[0] self.win_y = window_size[1] self.image_data_generator = image_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format self.class_balance(max_class_samples, balance_classes, seed=seed) self.y = to_categorical(self.y).astype('int32') super(ImageSampleArrayIterator, self).__init__(len(self.y), batch_size, shuffle, seed)
def test_watershed_transform(self): distance_bins = 4 erosion_width = 1 # test 2D masks mask = np.random.randint(3, size=(5, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform='watershed', distance_bins=distance_bins, erosion_width=erosion_width, data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 30, 30, distance_bins)) distance_bins = 6 mask = np.random.randint(3, size=(5, 1, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform='watershed', distance_bins=distance_bins, erosion_width=erosion_width, data_format='channels_first') self.assertEqual(mask_transform.shape, (5, distance_bins, 30, 30)) # test 3D masks distance_bins = 5 mask = np.random.randint(3, size=(5, 10, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform='watershed', distance_bins=distance_bins, erosion_width=erosion_width, data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 10, 30, 30, distance_bins)) distance_bins = 4 mask = np.random.randint(3, size=(5, 1, 10, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform='watershed', distance_bins=distance_bins, erosion_width=erosion_width, data_format='channels_first') self.assertEqual(mask_transform.shape, (5, distance_bins, 10, 30, 30))
def test_fgbg_transform(self): num_classes = 2 # always 2 for fg and bg # test 2D masks mask = np.random.randint(3, size=(5, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform='fgbg', data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 30, 30, num_classes)) mask = np.random.randint(3, size=(5, 1, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform='fgbg', data_format='channels_first') self.assertEqual(mask_transform.shape, (5, num_classes, 30, 30)) # test 3D masks mask = np.random.randint(3, size=(5, 10, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform='fgbg', data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 10, 30, 30, num_classes)) mask = np.random.randint(3, size=(5, 1, 10, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform='fgbg', data_format='channels_first') self.assertEqual(mask_transform.shape, (5, num_classes, 10, 30, 30))
def test_no_transform(self): num_classes = np.random.randint(5, size=1)[0] # test 2D masks mask = np.random.randint(num_classes, size=(5, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform=None, data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 30, 30, num_classes)) mask = np.random.randint(num_classes, size=(5, 1, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform=None, data_format='channels_first') self.assertEqual(mask_transform.shape, (5, num_classes, 30, 30)) # test 3D masks mask = np.random.randint(num_classes, size=(5, 10, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform=None, data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 10, 30, 30, num_classes)) mask = np.random.randint(num_classes, size=(5, 1, 10, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform=None, data_format='channels_first') self.assertEqual(mask_transform.shape, (5, num_classes, 10, 30, 30))
def test_deepcell_transform(self): num_classes = 4 # test 2D masks mask = np.random.randint(3, size=(5, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform='deepcell', data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 30, 30, num_classes)) mask = np.random.randint(3, size=(5, 1, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform='deepcell', data_format='channels_first') self.assertEqual(mask_transform.shape, (5, num_classes, 30, 30)) # test 3D masks mask = np.random.randint(3, size=(5, 10, 30, 30, 1)) mask_transform = image_generators._transform_masks( mask, transform='deepcell', data_format='channels_last') self.assertEqual(mask_transform.shape, (5, 10, 30, 30, num_classes)) mask = np.random.randint(3, size=(5, 1, 10, 30, 30)) mask_transform = image_generators._transform_masks( mask, transform='deepcell', data_format='channels_first') self.assertEqual(mask_transform.shape, (5, num_classes, 10, 30, 30))
def __init__(self, train_dict, movie_data_generator, batch_size=32, frames_per_batch=10, skip=None, transform=None, transform_kwargs={}, shuffle=False, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): X, y = train_dict['X'], train_dict['y'] if y is not None and X.shape[0] != y.shape[0]: raise ValueError('`X` (movie data) and `y` (labels) ' 'should have the same size. Found ' 'Found x.shape = {}, y.shape = {}'.format( X.shape, y.shape)) self.channel_axis = 4 if data_format == 'channels_last' else 1 self.time_axis = 1 if data_format == 'channels_last' else 2 self.x = np.asarray(X, dtype=K.floatx()) self.y = _transform_masks(y, transform, data_format=data_format, **transform_kwargs) if self.x.ndim != 5: raise ValueError( 'Input data in `MovieArrayIterator` ' 'should have rank 5. You passed an array ' 'with shape', self.x.shape) if self.x.shape[self.time_axis] - frames_per_batch < 0: raise ValueError( 'The number of frames used in each training batch should ' 'be less than the number of frames in the training data!') self.frames_per_batch = frames_per_batch self.skip = skip self.movie_data_generator = movie_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format super(MovieArrayIterator, self).__init__(len(self.y), batch_size, shuffle, seed)
def __init__(self, train_dict, movie_data_generator, compute_shapes=guess_shapes, anchor_params=None, pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'], min_objects=3, num_classes=1, frames_per_batch=2, clear_borders=False, include_masks=False, include_final_detection_layer=False, panoptic=False, transforms=['watershed'], transforms_kwargs={}, batch_size=32, shuffle=False, seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): X, y = train_dict['X'], train_dict['y'] if X.shape[0] != y.shape[0]: raise ValueError('Training batches and labels should have the same' 'length. Found X.shape: {} y.shape: {}'.format( X.shape, y.shape)) if X.ndim != 5: raise ValueError( 'Input data in `RetinaNetIterator` ' 'should have rank 4. You passed an array ' 'with shape', X.shape) self.x = np.asarray(X, dtype=K.floatx()) self.y = np.asarray(y, dtype='int32') # `compute_shapes` changes based on the model backbone. self.compute_shapes = compute_shapes self.anchor_params = anchor_params self.pyramid_levels = [int(l[1:]) for l in pyramid_levels] self.min_objects = min_objects self.num_classes = num_classes self.frames_per_batch = frames_per_batch self.include_masks = include_masks self.include_final_detection_layer = include_final_detection_layer self.panoptic = panoptic self.transforms = transforms self.transforms_kwargs = transforms_kwargs self.channel_axis = 4 if data_format == 'channels_last' else 1 self.time_axis = 1 if data_format == 'channels_last' else 2 self.row_axis = 2 if data_format == 'channels_last' else 3 self.col_axis = 3 if data_format == 'channels_last' else 4 self.movie_data_generator = movie_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format self.y_semantic_list = [] # optional semantic segmentation targets if X.shape[self.time_axis] - frames_per_batch < 0: raise ValueError( 'The number of frames used in each training batch should ' 'be less than the number of frames in the training data!') # Add semantic segmentation targets if panoptic segmentation # flag is True if panoptic: # Create a list of all the semantic targets. We need to be able # to have multiple semantic heads # Add all the keys that contain y_semantic for key in train_dict: if 'y_semantic' in key: self.y_semantic_list.append(train_dict[key]) # Add transformed masks for transform in transforms: transform_kwargs = transforms_kwargs.get(transform, dict()) y_transforms = [] for time in range(y.shape[self.time_axis]): if data_format == 'channels_first': y_temp = y[:, :, time, ...] else: y_temp = y[:, time, ...] y_temp_transform = _transform_masks( y_temp, transform, data_format=data_format, **transform_kwargs) y_temp_transform = np.asarray(y_temp_transform, dtype='int32') y_transforms.append(y_temp_transform) y_transform = np.stack(y_transforms, axis=self.time_axis) self.y_semantic_list.append(y_transform) invalid_batches = [] # Remove images with small numbers of cells for b in range(self.x.shape[0]): y_batch = np.squeeze(self.y[b], axis=self.channel_axis - 1) y_batch = clear_border(y_batch) if clear_borders else y_batch y_batch = np.expand_dims(y_batch, axis=self.channel_axis - 1) self.y[b] = y_batch if len(np.unique(self.y[b])) - 1 < self.min_objects: invalid_batches.append(b) invalid_batches = np.array(invalid_batches, dtype='int') if invalid_batches.size > 0: logging.warning( 'Removing %s of %s images with fewer than %s ' 'objects.', invalid_batches.size, self.x.shape[0], self.min_objects) self.y = np.delete(self.y, invalid_batches, axis=0) self.x = np.delete(self.x, invalid_batches, axis=0) self.y_semantic_list = [ np.delete(y, invalid_batches, axis=0) for y in self.y_semantic_list ] super(RetinaMovieIterator, self).__init__(self.x.shape[0], batch_size, shuffle, seed)
def __init__(self, train_dict, image_data_generator, batch_size=1, shuffle=False, transforms=['watershed-cont'], transforms_kwargs={}, seed=None, min_objects=3, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): X, y = train_dict['X'], train_dict['y'] if X.shape[0] != y.shape[0]: raise ValueError('Training batches and labels should have the same' 'length. Found X.shape: {} y.shape: {}'.format( X.shape, y.shape)) if X.ndim != 4: raise ValueError('Input data in `SemanticIterator` ' 'should have rank 4. You passed an array ' 'with shape', X.shape) if y is None: raise ValueError('Instance masks are required for the SemanticIterator') self.x = np.asarray(X, dtype=K.floatx()) self.y = np.asarray(y, dtype='int32') self.channel_axis = 3 if data_format == 'channels_last' else 1 self.image_data_generator = image_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format self.min_objects = min_objects self.y_semantic_list = [] # optional semantic segmentation targets # Create a list of all the semantic targets. We need to be able # to have multiple semantic heads # Add all the keys that contain y_semantic # Add transformed masks for transform in transforms: transform_kwargs = transforms_kwargs.get(transform, dict()) y_transform = _transform_masks(y, transform, data_format=data_format, **transform_kwargs) if y_transform.shape[self.channel_axis] > 1: y_transform = np.asarray(y_transform, dtype='int32') elif y_transform.shape[self.channel_axis] == 1: y_transform = np.asarray(y_transform, dtype=K.floatx()) self.y_semantic_list.append(y_transform) invalid_batches = [] # Remove images with small numbers of cells for b in range(self.x.shape[0]): y_batch = np.squeeze(self.y[b], axis=self.channel_axis - 1) y_batch = np.expand_dims(y_batch, axis=self.channel_axis - 1) self.y[b] = y_batch if len(np.unique(self.y[b])) - 1 < self.min_objects: invalid_batches.append(b) invalid_batches = np.array(invalid_batches, dtype='int') if invalid_batches.size > 0: logging.warning('Removing %s of %s images with fewer than %s ' 'objects.', invalid_batches.size, self.x.shape[0], self.min_objects) self.x = np.delete(self.x, invalid_batches, axis=0) self.y = np.delete(self.y, invalid_batches, axis=0) self.y_semantic_list = [np.delete(y, invalid_batches, axis=0) for y in self.y_semantic_list] super(SemanticIterator, self).__init__( self.x.shape[0], batch_size, shuffle, seed)
def __init__(self, train_dict, movie_data_generator, batch_size=1, frames_per_batch=5, shuffle=False, transforms=['watershed-cont'], transforms_kwargs={}, seed=None, min_objects=3, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): # Load data if 'X' not in train_dict: raise ValueError('No training data found in train_dict') if 'y' not in train_dict: raise ValueError('Instance masks are required for the ' 'SemanticMovieIterator') X, y = train_dict['X'], train_dict['y'] if X.shape[0] != y.shape[0]: raise ValueError('Training batches and labels should have the same' 'length. Found X.shape: {} y.shape: {}'.format( X.shape, y.shape)) if X.ndim != 5: raise ValueError( 'Input data in `SemanticMovieIterator` ' 'should have rank 5. You passed an array ' 'with shape', X.shape) self.x = np.asarray(X, dtype=K.floatx()) self.y = np.asarray(y, dtype='int32') self.frames_per_batch = frames_per_batch self.transforms = transforms self.transforms_kwargs = transforms_kwargs self.channel_axis = 4 if data_format == 'channels_last' else 1 self.time_axis = 1 if data_format == 'channels_last' else 2 self.row_axis = 2 if data_format == 'channels_last' else 3 self.col_axis = 3 if data_format == 'channels_last' else 4 self.movie_data_generator = movie_data_generator self.data_format = data_format self.min_objects = min_objects self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format self.y_semantic_list = [] # optional semantic segmentation targets if X.shape[self.time_axis] - frames_per_batch < 0: raise ValueError( 'The number of frames used in each training batch should ' 'be less than the number of frames in the training data!') # Create a list of all the semantic targets. We need to be able # to have multiple semantic heads # Add all the keys that contain y_semantic # Add transformed masks for transform in transforms: transform_kwargs = transforms_kwargs.get(transform, dict()) y_transform = _transform_masks(y, transform, data_format=data_format, **transform_kwargs) if y_transform.shape[self.channel_axis] > 1: y_transform = np.asarray(y_transform, dtype='int32') elif y_transform.shape[self.channel_axis] == 1: y_transform = np.asarray(y_transform, dtype=K.floatx()) self.y_semantic_list.append(y_transform) invalid_batches = [] # Remove images with small numbers of cells for b in range(self.x.shape[0]): if len(np.unique(self.y[b])) - 1 < self.min_objects: invalid_batches.append(b) invalid_batches = np.array(invalid_batches, dtype='int') if invalid_batches.size > 0: logging.warning( 'Removing %s of %s images with fewer than %s ' 'objects.', invalid_batches.size, self.x.shape[0], self.min_objects) self.x = np.delete(self.x, invalid_batches, axis=0) self.y = np.delete(self.y, invalid_batches, axis=0) self.y_semantic_list = [ np.delete(y, invalid_batches, axis=0) for y in self.y_semantic_list ] super(SemanticMovieIterator, self).__init__(self.x.shape[0], batch_size, shuffle, seed)
def __init__(self, train_dict, movie_data_generator, batch_size=32, shuffle=False, transform=None, transform_kwargs={}, balance_classes=False, max_class_samples=None, window_size=(30, 30, 5), seed=None, data_format='channels_last', save_to_dir=None, save_prefix='', save_format='png'): X, y = train_dict['X'], train_dict['y'] if y is not None and X.shape[0] != y.shape[0]: raise ValueError('`X` (movie data) and `y` (labels) ' 'should have the same size. Found ' 'Found x.shape = {}, y.shape = {}'.format( X.shape, y.shape)) self.channel_axis = 4 if data_format == 'channels_last' else 1 self.time_axis = 1 if data_format == 'channels_last' else 2 self.x = np.asarray(X, dtype=K.floatx()) y = _transform_masks(y, transform, data_format=data_format, **transform_kwargs) if self.x.ndim != 5: raise ValueError( 'Input data in `SampleMovieArrayIterator` ' 'should have rank 5. You passed an array ' 'with shape', self.x.shape) window_size = conv_utils.normalize_tuple(window_size, 3, 'window_size') pixels_z, pixels_x, pixels_y, batch, y = sample_label_movie( y=y, padding='valid', window_size=window_size, max_training_examples=None, data_format=data_format) self.y = y self.win_x = window_size[0] self.win_y = window_size[1] self.win_z = window_size[2] self.pixels_x = pixels_x self.pixels_y = pixels_y self.pixels_z = pixels_z self.batch = batch self.movie_data_generator = movie_data_generator self.data_format = data_format self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format self.class_balance(max_class_samples, balance_classes, seed=seed) self.y = to_categorical(self.y).astype('int32') super(SampleMovieArrayIterator, self).__init__(len(self.y), batch_size, shuffle, seed)