def preprocess_example(self, example, mode, hparams): image = example["inputs"] # Get resize method. Include a default if not specified, or if it's not in # TensorFlow's collection of pre-implemented resize methods. resize_method = getattr(hparams, "resize_method", "BICUBIC") resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) if resize_method == "DILATED": scaled_images = image_utils.make_multiscale_dilated( image, hparams.resolutions, num_channels=self.num_channels) else: scaled_images = image_utils.make_multiscale( image, hparams.resolutions, resize_method=resize_method, num_channels=self.num_channels) # Pack tuple of scaled images into one tensor. We do this by enforcing the # columns to match for every resolution. # TODO(avaswani, trandustin): We should create tuples because this will not # work if height*width of low res < width of high res highest_res = hparams.resolutions[-1] example["inputs"] = tf.concat([ tf.reshape(scaled_image, [res**2 // highest_res, highest_res, self.num_channels]) for scaled_image, res in zip(scaled_images, hparams.resolutions) ], axis=0) return example
def preprocess_example(self, example, mode, hparams): image = example["inputs"] # Get resize method. Include a default if not specified, or if it's not in # TensorFlow's collection of pre-implemented resize methods. resize_method = getattr(hparams, "resize_method", "BICUBIC") resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) highest_res = hparams.resolutions[-1] if resize_method == "DILATED": # Resize image so that dilated subsampling is properly divisible. scaled_image = image_utils.resize_by_area(image, highest_res) scaled_images = image_utils.make_multiscale_dilated( scaled_image, hparams.resolutions, num_channels=self.num_channels) else: scaled_images = image_utils.make_multiscale( image, hparams.resolutions, resize_method=resize_method, num_channels=self.num_channels) # Pack tuple of scaled images into one tensor. We do this by enforcing the # columns to match for every resolution. example["inputs"] = tf.concat([ tf.reshape(scaled_image, [res**2 // highest_res, highest_res, self.num_channels]) for scaled_image, res in zip(scaled_images, hparams.resolutions)], axis=0) return example
def preprocess_example(self, example, mode, hparams): image = example["inputs"] # Get resize method. Include a default if not specified, or if it's not in # TensorFlow's collection of pre-implemented resize methods. resize_method = getattr(hparams, "resize_method", "BICUBIC") resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) # Remove boundaries in CelebA images. Remove 40 pixels each side # vertically and 20 pixels each side horizontally. image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) highest_res = hparams.resolutions[-1] if resize_method == "DILATED": # Resize image so that dilated subsampling is properly divisible. scaled_image = image_utils.resize_by_area(image, highest_res) scaled_images = image_utils.make_multiscale_dilated( scaled_image, hparams.resolutions, num_channels=self.num_channels) else: scaled_images = image_utils.make_multiscale( image, hparams.resolutions, resize_method=resize_method, num_channels=self.num_channels) # Pack tuple of scaled images into one tensor. We do this by enforcing the # columns to match for every resolution. example["inputs"] = image example["targets"] = tf.concat([ tf.reshape(scaled_image, [res**2 // highest_res, highest_res, self.num_channels]) for scaled_image, res in zip(scaled_images, hparams.resolutions)], axis=0) return example
def preprocess_example(self, example, mode, hparams): image = example["inputs"] # Get resize method. Include a default if not specified, or if it's not in # TensorFlow's collection of pre-implemented resize methods. resize_method = getattr(hparams, "resize_method", "BICUBIC") resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) if resize_method == "DILATED": scaled_images = image_utils.make_multiscale_dilated( image, hparams.resolutions, num_channels=self.num_channels) else: scaled_images = image_utils.make_multiscale( image, hparams.resolutions, resize_method=resize_method, num_channels=self.num_channels) # Pack tuple of scaled images into one tensor. We do this by enforcing the # columns to match for every resolution. # TODO(avaswani, trandustin): We should create tuples because this will not # work if height*width of low res < width of high res highest_res = hparams.resolutions[-1] example["inputs"] = tf.concat([ tf.reshape(scaled_image, [res**2 // highest_res, highest_res, self.num_channels]) for scaled_image, res in zip(scaled_images, hparams.resolutions)], axis=0) return example
def testMakeMultiscaleDilatedDivisible(self): image = tf.random_normal([256, 256, 3]) resolutions = [8, 16, 64, 256] scaled_images = image_utils.make_multiscale_dilated(image, resolutions) self.assertEqual(scaled_images[0].shape, (8, 8, 3)) self.assertEqual(scaled_images[1].shape, (16, 16, 3)) self.assertEqual(scaled_images[2].shape, (64, 64, 3)) self.assertEqual(scaled_images[3].shape, (256, 256, 3))
def testMakeMultiscaleDilatedLarger(self): image = tf.random_normal([256, 256, 3]) resolutions = [257] with self.assertRaisesRegexp(ValueError, "strides.* must be non-zero"): _ = image_utils.make_multiscale_dilated(image, resolutions)
def testMakeMultiscaleDilatedIndivisible(self): image = tf.random_normal([256, 256, 3]) resolutions = [255] scaled_images = image_utils.make_multiscale_dilated(image, resolutions) self.assertEqual(scaled_images[0].shape, (256, 256, 3))