def __getitem__(self, index): image_path, mask_path = self._image_mask_paths[index] image = PIL.Image.open(image_path) mask = PIL.Image.open(mask_path) # Data augmentation # Rotate # image, mask = preprocess_utils.random_rotate(image, mask) # Resize image, mask = preprocess_utils.random_resize( image, mask, self._min_scale_factor, self._max_scale_factor, self._scale_factor_step_size) # Pad image = preprocess_utils.pad(image, 0, 0, self._output_height, self._output_width, pad_value=(128, 128, 128)) mask = preprocess_utils.pad(mask, 0, 0, self._output_height, self._output_width, pad_value=0) # Crop image, mask = preprocess_utils.random_crop(image, mask, self._output_height, self._output_width) # Flip image, mask = preprocess_utils.random_flip(image, mask) image = PIL.Image.fromarray(np.array(image)) mask = torch.LongTensor(np.array(mask)) image_preprocessed = self._transforms(image) return image_preprocessed, mask
def testReturnCorrectCropOfSingleImage(self): np.random.seed(0) height, width = 10, 20 image = np.random.randint(0, 256, size=(height, width, 3)) crop_height, crop_width = 2, 4 image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) [cropped] = preprocess_utils.random_crop([image_placeholder], crop_height, crop_width) with self.test_session(): cropped_image = cropped.eval(feed_dict={image_placeholder: image}) # Ensure we can find the cropped image in the original: is_found = False for x in range(0, width - crop_width + 1): for y in range(0, height - crop_height + 1): if np.isclose(image[y:y + crop_height, x:x + crop_width, :], cropped_image).all(): is_found = True break self.assertTrue(is_found)
def preprocess_image_and_label_yjy(image, label, crop_height, crop_width, min_scale_factor=1., max_scale_factor=1., is_training=True): if is_training and label is None: raise ValueError('During training, label must be provided.') # Keep reference to original image and label. original_image = image original_label = label processed_image = tf.cast(image, tf.float32) # randomly rotate # todo: rotate factor should be determined by command line rotate_factor = preprocess_utils.get_rotate_scale(min_rotate_factor=0, max_rotate_factor=359, step_size=90) processed_image, label = preprocess_utils.randomly_rotate_image_and_label( processed_image, label, rotate_factor) processed_image.set_shape([None, None, 3]) # randomly scale scale_factor = preprocess_utils.get_random_scale(min_scale_factor, max_scale_factor, 0) processed_image, label = preprocess_utils.randomly_scale_image_and_label( processed_image, label, scale_factor) processed_image.set_shape([None, None, 3]) # Pad image to have dimensions >= [crop_height, crop_width] image_shape = tf.shape(processed_image) image_height = image_shape[0] image_width = image_shape[1] target_height = image_height + tf.maximum(crop_height - image_height, 0) target_width = image_width + tf.maximum(crop_width - image_width, 0) # Pad image with mean pixel value. mean_pixel = tf.reshape([0., 0., 0.], [1, 1, 3]) # [127.5, 127.5, 127.5] processed_image = preprocess_utils.pad_to_bounding_box( processed_image, 0, 0, target_height, target_width, mean_pixel) # crop to [crop_height,crop_width] processed_image, label = preprocess_utils.random_crop( processed_image, label, crop_height, crop_width) if label is not None: label.set_shape([6]) if original_label is not None: original_label.set_shape([6]) return original_image, processed_image, original_label, label
def testReturnDifferentCropAreasOnTwoEvals(self): tf.set_random_seed(0) crop_height, crop_width = 2, 3 image = np.random.randint(0, 256, size=(100, 200, 3)) image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) [cropped] = preprocess_utils.random_crop([image_placeholder], crop_height, crop_width) with self.test_session(): crop0 = cropped.eval(feed_dict={image_placeholder: image}) crop1 = cropped.eval(feed_dict={image_placeholder: image}) self.assertFalse(np.isclose(crop0, crop1).all())
def testRandomCropMaintainsNumberOfChannels(self): np.random.seed(0) crop_height, crop_width = 10, 20 image = np.random.randint(0, 256, size=(100, 200, 3)) tf.set_random_seed(37) image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) [cropped] = preprocess_utils.random_crop([image_placeholder], crop_height, crop_width) with self.test_session(): cropped_image = cropped.eval(feed_dict={image_placeholder: image}) self.assertTupleEqual(cropped_image.shape, (crop_height, crop_width, 3))
def testDieOnRandomCropWhenImagesWithDifferentWidth(self): crop_height, crop_width = 2, 3 image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) cropped = preprocess_utils.random_crop([image1, image2], crop_height, crop_width) with self.test_session() as sess: with self.assertRaises(errors.InvalidArgumentError): sess.run(cropped, feed_dict={ image1: np.random.rand(4, 5, 3), image2: np.random.rand(4, 6, 1) })
def testDieOnRandomCropWhenCropSizeIsGreaterThanImage(self): crop_height, crop_width = 5, 9 image1 = tf.placeholder(tf.float32, name='image1', shape=(None, None, 3)) image2 = tf.placeholder(tf.float32, name='image2', shape=(None, None, 1)) cropped = preprocess_utils.random_crop([image1, image2], crop_height, crop_width) with self.test_session() as sess: with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, 'Crop size greater than the image size.'): sess.run(cropped, feed_dict={ image1: np.random.rand(4, 5, 3), image2: np.random.rand(4, 5, 1) })
def testReturnConsistenCropsOfImagesInTheList(self): tf.set_random_seed(0) height, width = 10, 20 crop_height, crop_width = 2, 3 labels = np.linspace(0, height * width - 1, height * width) labels = labels.reshape((height, width, 1)) image = np.tile(labels, (1, 1, 3)) image_placeholder = tf.placeholder(tf.int32, shape=(None, None, 3)) label_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1)) [cropped_image, cropped_label] = preprocess_utils.random_crop( [image_placeholder, label_placeholder], crop_height, crop_width) with self.test_session() as sess: cropped_image, cropped_labels = sess.run( [cropped_image, cropped_label], feed_dict={ image_placeholder: image, label_placeholder: labels }) for i in range(3): self.assertAllEqual(cropped_image[:, :, i], cropped_labels.squeeze())
def preprocess_image_and_label(image, label, crop_height, crop_width, min_resize_value=None, max_resize_value=None, resize_factor=None, min_scale_factor=1., max_scale_factor=1., scale_factor_step_size=0, ignore_label=255, is_training=True, model_variant=None): """Preprocesses the image and label. Args: image: Input image. label: Ground truth annotation label. crop_height: The height value used to crop the image and label. crop_width: The width value used to crop the image and label. min_resize_value: Desired size of the smaller image side. max_resize_value: Maximum allowed size of the larger image side. resize_factor: Resized dimensions are multiple of factor plus one. min_scale_factor: Minimum scale factor value. max_scale_factor: Maximum scale factor value. scale_factor_step_size: The step size from min scale factor to max scale factor. The input is randomly scaled based on the value of (min_scale_factor, max_scale_factor, scale_factor_step_size). ignore_label: The label value which will be ignored for training and evaluation. is_training: If the preprocessing is used for training or not. model_variant: Model variant (string) for choosing how to mean-subtract the images. See feature_extractor.network_map for supported model variants. Returns: original_image: Original image (could be resized). processed_image: Preprocessed image. label: Preprocessed ground truth segmentation label. Raises: ValueError: Ground truth label not provided during training. """ if is_training and label is None: raise ValueError('During training, label must be provided.') if model_variant is None: tf.logging.warning( 'Default mean-subtraction is performed. Please specify ' 'a model_variant. See feature_extractor.network_map for ' 'supported model variants.') # Keep reference to original image. original_image = image processed_image = tf.cast(image, tf.float32) if label is not None: label = tf.cast(label, tf.int32) # Resize image and label to the desired range. if min_resize_value is not None or max_resize_value is not None: [processed_image, label] = (preprocess_utils.resize_to_range(image=processed_image, label=label, min_size=min_resize_value, max_size=max_resize_value, factor=resize_factor, align_corners=True)) # The `original_image` becomes the resized image. original_image = tf.identity(processed_image) # Data augmentation by randomly scaling the inputs. if is_training: scale = preprocess_utils.get_random_scale(min_scale_factor, max_scale_factor, scale_factor_step_size) processed_image, label = preprocess_utils.randomly_scale_image_and_label( processed_image, label, scale) processed_image.set_shape([None, None, 3]) # Pad image and label to have dimensions >= [crop_height, crop_width] image_shape = tf.shape(processed_image) image_height = image_shape[0] image_width = image_shape[1] target_height = image_height + tf.maximum(crop_height - image_height, 0) target_width = image_width + tf.maximum(crop_width - image_width, 0) # Pad image with mean pixel value. mean_pixel = tf.reshape(feature_extractor.mean_pixel(model_variant), [1, 1, 3]) processed_image = preprocess_utils.pad_to_bounding_box( processed_image, 0, 0, target_height, target_width, mean_pixel) if label is not None: label = preprocess_utils.pad_to_bounding_box(label, 0, 0, target_height, target_width, ignore_label) # Randomly crop the image and label. if is_training and label is not None: processed_image, label = preprocess_utils.random_crop( [processed_image, label], crop_height, crop_width) processed_image.set_shape([crop_height, crop_width, 3]) if label is not None: label.set_shape([crop_height, crop_width, 1]) if is_training: # Randomly left-right flip the image and label. processed_image, label, _ = preprocess_utils.flip_dim( [processed_image, label], _PROB_OF_FLIP, dim=1) return original_image, processed_image, label
def preprocess_image_and_label(image, label, crop_height, crop_width, min_resize_value=None, max_resize_value=None, resize_factor=None, min_scale_factor=1., max_scale_factor=1., scale_factor_step_size=0, ignore_label=255, is_training=True, model_variant=None): """Preprocesses the image and label. Args: image: Input image. label: Ground truth annotation label. crop_height: The height value used to crop the image and label. crop_width: The width value used to crop the image and label. min_resize_value: Desired size of the smaller image side. max_resize_value: Maximum allowed size of the larger image side. resize_factor: Resized dimensions are multiple of factor plus one. min_scale_factor: Minimum scale factor value. max_scale_factor: Maximum scale factor value. scale_factor_step_size: The step size from min scale factor to max scale factor. The input is randomly scaled based on the value of (min_scale_factor, max_scale_factor, scale_factor_step_size). ignore_label: The label value which will be ignored for training and evaluation. is_training: If the preprocessing is used for training or not. model_variant: Model variant (string) for choosing how to mean-subtract the images. See feature_extractor.network_map for supported model variants. Returns: original_image: Original image (could be resized). processed_image: Preprocessed image. label: Preprocessed ground truth segmentation label. Raises: ValueError: Ground truth label not provided during training. """ if is_training and label is None: raise ValueError('During training, label must be provided.') if model_variant is None: tf.logging.warning('Default mean-subtraction is performed. Please specify ' 'a model_variant. See feature_extractor.network_map for ' 'supported model variants.') # Keep reference to original image. original_image = image processed_image = tf.cast(image, tf.float32) if label is not None: label = tf.cast(label, tf.int32) # Resize image and label to the desired range. if min_resize_value is not None or max_resize_value is not None: [processed_image, label] = ( preprocess_utils.resize_to_range( image=processed_image, label=label, min_size=min_resize_value, max_size=max_resize_value, factor=resize_factor, align_corners=True)) # The `original_image` becomes the resized image. original_image = tf.identity(processed_image) # Data augmentation by randomly scaling the inputs. scale = preprocess_utils.get_random_scale( min_scale_factor, max_scale_factor, scale_factor_step_size) processed_image, label = preprocess_utils.randomly_scale_image_and_label( processed_image, label, scale) processed_image.set_shape([None, None, 3]) # Pad image and label to have dimensions >= [crop_height, crop_width] image_shape = tf.shape(processed_image) image_height = image_shape[0] image_width = image_shape[1] target_height = image_height + tf.maximum(crop_height - image_height, 0) target_width = image_width + tf.maximum(crop_width - image_width, 0) # Pad image with mean pixel value. mean_pixel = tf.reshape( feature_extractor.mean_pixel(model_variant), [1, 1, 3]) processed_image = preprocess_utils.pad_to_bounding_box( processed_image, 0, 0, target_height, target_width, mean_pixel) if label is not None: label = preprocess_utils.pad_to_bounding_box( label, 0, 0, target_height, target_width, ignore_label) # Randomly crop the image and label. if is_training and label is not None: processed_image, label = preprocess_utils.random_crop( [processed_image, label], crop_height, crop_width) processed_image.set_shape([crop_height, crop_width, 3]) if label is not None: label.set_shape([crop_height, crop_width, 1]) if is_training: # Randomly left-right flip the image and label. processed_image, label, _ = preprocess_utils.flip_dim( [processed_image, label], _PROB_OF_FLIP, dim=1) return original_image, processed_image, label
def preprocess_image_and_label_seq( image, label, prior_segs, crop_height, crop_width, channel, seq_length, label_for_each_frame, pre_crop_height=None, pre_crop_width=None, num_class=None, HU_window=None, min_resize_value=None, max_resize_value=None, resize_factor=None, min_scale_factor=1., max_scale_factor=1., scale_factor_step_size=0, ignore_label=255, rotate_angle=None, is_training=True, model_variant=None, ): """Preprocesses the image and label. Args: image: Input image. label: Ground truth annotation label. crop_height: The height value used to crop the image and label. crop_width: The width value used to crop the image and label. min_resize_value: Desired size of the smaller image side. max_resize_value: Maximum allowed size of the larger image side. resize_factor: Resized dimensions are multiple of factor plus one. min_scale_factor: Minimum scale factor value. max_scale_factor: Maximum scale factor value. scale_factor_step_size: The step size from min scale factor to max scale factor. The input is randomly scaled based on the value of (min_scale_factor, max_scale_factor, scale_factor_step_size). ignore_label: The label value which will be ignored for training and evaluation. is_training: If the preprocessing is used for training or not. model_variant: Model variant (string) for choosing how to mean-subtract the images. See feature_extractor.network_map for supported model variants. Returns: original_image: Original image (could be resized). processed_image: Preprocessed image. label: Preprocessed ground truth segmentation label. Raises: ValueError: Ground truth label not provided during training. """ if is_training and label is None: raise ValueError('During training, label must be provided.') # if (prior_num_slice is not None) != (prior_imgs is not None or prior_segs is not None): # raise ValueError('prior_num_slice should exist when import prior and vice versa') if model_variant is None: tf.logging.warning( 'Default mean-subtraction is performed. Please specify ' 'a model_variant. See feature_extractor.network_map for ' 'supported model variants.') # Keep reference to original image. original_image = image original_label = label # sample prior if exist # TODO: sample problem (consider z gt) # data type and value convert if HU_window is not None: image = preprocess_utils.HU_to_pixelvalue(image, HU_window) processed_image = tf.cast(image, tf.float32) if label is not None: label = tf.cast(label, tf.int32) if pre_crop_height is not None and pre_crop_width is not None: if is_training and label is not None: if prior_segs is not None: processed_image, label, prior_segs = preprocess_utils.random_crop( [processed_image, label, prior_segs], pre_crop_height, pre_crop_width) else: processed_image, label = preprocess_utils.random_crop( [processed_image, label], pre_crop_height, pre_crop_width) # Resize image and label to the desired range. # TODO: interface for this func. if min_resize_value or max_resize_value: [processed_image, label] = (preprocess_utils.resize_to_range(image=processed_image, label=label, min_size=min_resize_value, max_size=max_resize_value, factor=resize_factor, align_corners=True)) # The `original_image` becomes the resized image. original_image = tf.identity(processed_image) if prior_segs is not None: # prior_segs = tf.cast(prior_segs, tf.int32) prior_segs, _ = (preprocess_utils.resize_to_range( image=prior_segs, min_size=min_resize_value, max_size=max_resize_value, factor=resize_factor, align_corners=True)) # Data augmentation by randomly scaling the inputs. if is_training: scale = preprocess_utils.get_random_scale(min_scale_factor, max_scale_factor, scale_factor_step_size) processed_image, label = preprocess_utils.randomly_scale_image_and_label( processed_image, label, scale) processed_image.set_shape([None, None, seq_length * channel]) if prior_segs is not None: prior_segs = preprocess_utils.scale_image_data(prior_segs, scale) # Pad image and label to have dimensions >= [crop_height, crop_width] image_shape = tf.shape(processed_image) # image_shape = processed_image.get_shape().as_list() image_height = image_shape[0] image_width = image_shape[1] target_height = image_height + tf.maximum(crop_height - image_height, 0) target_width = image_width + tf.maximum(crop_width - image_width, 0) # Pad image with mean pixel value. # TODO: check padding value # mean_pixel = tf.reshape( # features_extractor.mean_pixel(model_variant), [1, 1, 3]) mean_pixel = 0.0 processed_image = preprocess_utils.pad_to_bounding_box( processed_image, 0, 0, target_height, target_width, mean_pixel) if label is not None: label = preprocess_utils.pad_to_bounding_box(label, 0, 0, target_height, target_width, 0) if prior_segs is not None: prior_segs = preprocess_utils.pad_to_bounding_box( prior_segs, 0, 0, target_height, target_width, 0) # Randomly crop the image and label. # TODO: Do it in the right way # TODO: offset_height, offset_width for input # processed_image, label = preprocess_utils.random_crop( # [processed_image, label], crop_height, crop_width) if is_training and label is not None: if prior_segs is not None: processed_image, label, prior_segs = preprocess_utils.random_crop( [processed_image, label, prior_segs], crop_height, crop_width) else: processed_image, label = preprocess_utils.random_crop( [processed_image, label], crop_height, crop_width) processed_image.set_shape([crop_height, crop_width, seq_length * channel]) if label is not None: label.set_shape([crop_height, crop_width, seq_length * channel]) # if label_for_each_frame: # label.set_shape([crop_height, crop_width, seq_length*channel]) # else: # label.set_shape([crop_height, crop_width, 1]) if prior_segs is not None: prior_segs = tf.squeeze(tf.image.resize_bilinear( tf.expand_dims(prior_segs, axis=0), [crop_height, crop_width]), axis=0) # prior_segs.set_shape([crop_height,crop_width,num_class]) if is_training: # Randomly left-right flip the image and label. if prior_segs is not None: processed_image, label, prior_segs, _ = preprocess_utils.flip_dim( [processed_image, label, prior_segs], _PROB_OF_FLIP, dim=1) else: processed_image, label, _ = preprocess_utils.flip_dim( [processed_image, label], _PROB_OF_FLIP, dim=1) # TODO: coplete random rotate method # Randomly rotate the image and label. if rotate_angle is not None: pass # processed_image, label, _ = preprocess_utils.random_rotate([processed_image, label], _PROB_OF_ROT, rotate_angle) return original_image, processed_image, label, original_label, prior_segs
def preprocess_image_and_label(image, label, crop_height, crop_width, min_resize_value=None, max_resize_value=None, resize_factor=None, min_scale_factor=1., max_scale_factor=1., scale_factor_step_size=0, ignore_label=255, is_training=True, model_variant=None, strong_weak=False, output_valid=False): """Preprocesses the image and label. Args: image: Input image. label: Ground truth annotation label. crop_height: The height value used to crop the image and label. crop_width: The width value used to crop the image and label. min_resize_value: Desired size of the smaller image side. max_resize_value: Maximum allowed size of the larger image side. resize_factor: Resized dimensions are multiple of factor plus one. min_scale_factor: Minimum scale factor value. max_scale_factor: Maximum scale factor value. scale_factor_step_size: The step size from min scale factor to max scale factor. The input is randomly scaled based on the value of (min_scale_factor, max_scale_factor, scale_factor_step_size). ignore_label: The label value which will be ignored for training and evaluation. is_training: If the preprocessing is used for training or not. model_variant: Model variant (string) for choosing how to mean-subtract the images. See feature_extractor.network_map for supported model variants. strong_weak: Generate a pair of (strong, weak) augmented images for consistency. Also, need to get the valid region exluding padding. output_valid: Output valid region excluding padding or not. Returns: original_image: Original image (could be resized). processed_image: Preprocessed image. label: Preprocessed ground truth segmentation label. Raises: ValueError: Ground truth label not provided during training. """ if is_training and label is None: raise ValueError('During training, label must be provided.') if model_variant is None: tf.logging.warning( 'Default mean-subtraction is performed. Please specify ' 'a model_variant. See feature_extractor.network_map for ' 'supported model variants.') # Keep reference to original image. original_image = image processed_image = tf.cast(image, tf.float32) if label is not None: label = tf.cast(label, tf.int32) # Resize image and label to the desired range. if min_resize_value or max_resize_value: [processed_image, label] = (preprocess_utils.resize_to_range(image=processed_image, label=label, min_size=min_resize_value, max_size=max_resize_value, factor=resize_factor, align_corners=True)) # The `original_image` becomes the resized image. original_image = tf.identity(processed_image) # Data augmentation by randomly scaling the inputs. if is_training: scale = preprocess_utils.get_random_scale(min_scale_factor, max_scale_factor, scale_factor_step_size) processed_image, label = preprocess_utils.randomly_scale_image_and_label( processed_image, label, scale) processed_image.set_shape([None, None, 3]) # Pad image and label to have dimensions >= [crop_height, crop_width] image_shape = tf.shape(processed_image) image_height = image_shape[0] image_width = image_shape[1] target_height = image_height + tf.maximum(crop_height - image_height, 0) target_width = image_width + tf.maximum(crop_width - image_width, 0) if strong_weak: # # Color distortion (operates in [0, 1]) strong = processed_image / 255. strong = preprocess_utils.random_color_jitter(strong, _PROB_OF_JITTER) strong = strong * 255. # Pad image with mean pixel value. mean_pixel = tf.reshape(feature_extractor.mean_pixel(model_variant), [1, 1, 3]) processed_image = preprocess_utils.pad_to_bounding_box( processed_image, 0, 0, target_height, target_width, mean_pixel) if strong_weak: strong = preprocess_utils.pad_to_bounding_box(strong, 0, 0, target_height, target_width, mean_pixel) if label is not None: label = preprocess_utils.pad_to_bounding_box(label, 0, 0, target_height, target_width, ignore_label) # Randomly crop the image and label. if is_training and label is not None: if not strong_weak: processed_image, label = preprocess_utils.random_crop( [processed_image, label], crop_height, crop_width) else: processed_image, label, strong = preprocess_utils.random_crop( [processed_image, label, strong], crop_height, crop_width) processed_image.set_shape([crop_height, crop_width, 3]) if label is not None: label.set_shape([crop_height, crop_width, 1]) if not is_training and output_valid: # Construct the valid mask excluding the boundary padding xs, ys = tf.meshgrid(tf.range(0, crop_width), tf.range(0, crop_height)) valid_x = tf.cast(tf.less(xs, image_width), tf.int32) valid_y = tf.cast(tf.less(ys, image_height), tf.int32) valid = tf.reshape(valid_x * valid_y, [crop_height, crop_width, 1]) if is_training: if strong_weak or output_valid: # Construct the valid mask excluding the boundary padding xs, ys = tf.meshgrid(tf.range(0, crop_width), tf.range(0, crop_height)) valid_x = tf.cast(tf.less(xs, image_width), tf.int32) valid_y = tf.cast(tf.less(ys, image_height), tf.int32) valid = tf.reshape(valid_x * valid_y, [crop_height, crop_width, 1]) if strong_weak: # Ignore CutOut region strong, label, valid = preprocess_utils.cutout_with_mask( strong, label, pad_size=50, mean_pixel=mean_pixel, ignore_label=ignore_label, valid=valid) # Randomly left-right flip the image and label. processed_image, strong, valid, label, _ = preprocess_utils.flip_dim( [processed_image, strong, valid, label], _PROB_OF_FLIP, dim=1) return original_image, processed_image, label, strong, valid # Randomly left-right flip the image and label. if not output_valid: processed_image, label, _ = preprocess_utils.flip_dim( [processed_image, label], _PROB_OF_FLIP, dim=1) else: processed_image, valid, label, _ = preprocess_utils.flip_dim( [processed_image, valid, label], _PROB_OF_FLIP, dim=1) if not output_valid: return original_image, processed_image, label else: return original_image, processed_image, label, valid