Пример #1
0
    def preprocess_example(self, example, mode, hparams):
        image = example["inputs"]

        if hasattr(hparams, "resize_method"):
            method = getattr(tf.image.ResizeMethod, hparams.resize_method)
        else:  # default
            method = tf.image.ResizeMethod.BICUBIC

        scaled_images = image_utils.make_multiscale(
            image,
            hparams.resolutions,
            resize_method=method,
            num_channels=self.num_channels)

        highest_res = hparams.resolutions[-1]
        # Pack tuple of scaled images into one tensor. We do this by enforcing the
        # columns to match for every resolution.
        # TODO(avaswani, trandustin): We should create tuples because this will not
        # work if height*width of low res < width of high res
        example["inputs"] = tf.concat([
            tf.reshape(scaled_image,
                       [res**2 // highest_res, highest_res, self.num_channels])
            for scaled_image, res in zip(scaled_images, hparams.resolutions)
        ],
                                      axis=0)
        return example
Пример #2
0
    def preprocess_example(self, example, mode, hparams):
        image = example["inputs"]
        # Get resize method. Include a default if not specified, or if it's not in
        # TensorFlow's collection of pre-implemented resize methods.
        resize_method = getattr(hparams, "resize_method", "BICUBIC")
        resize_method = getattr(tf.image.ResizeMethod, resize_method,
                                resize_method)

        if resize_method == "DILATED":
            scaled_images = image_utils.make_multiscale_dilated(
                image, hparams.resolutions, num_channels=self.num_channels)
        else:
            scaled_images = image_utils.make_multiscale(
                image,
                hparams.resolutions,
                resize_method=resize_method,
                num_channels=self.num_channels)

        # Pack tuple of scaled images into one tensor. We do this by enforcing the
        # columns to match for every resolution.
        # TODO(avaswani, trandustin): We should create tuples because this will not
        # work if height*width of low res < width of high res
        highest_res = hparams.resolutions[-1]
        example["inputs"] = tf.concat([
            tf.reshape(scaled_image,
                       [res**2 // highest_res, highest_res, self.num_channels])
            for scaled_image, res in zip(scaled_images, hparams.resolutions)
        ],
                                      axis=0)
        return example
Пример #3
0
    def preprocess_example(self, example, mode, hparams):
        image = example["inputs"]
        if hasattr(hparams, "resize_method"):
            method = getattr(tf.image.ResizeMethod, hparams.resize_method)
        else:  # default
            method = tf.image.ResizeMethod.BICUBIC

        # Remove boundaries in CelebA images. Remove 40 pixels each side
        # vertically and 20 pixels each side horizontally.
        image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80,
                                              178 - 40)

        scaled_images = image_utils.make_multiscale(
            image,
            hparams.resolutions,
            resize_method=method,
            num_channels=self.num_channels)

        # Pack tuple of scaled images into one tensor. We do this by enforcing the
        # columns to match for every resolution.
        highest_res = hparams.resolutions[-1]
        example["inputs"] = image
        example["targets"] = tf.concat([
            tf.reshape(scaled_image,
                       [res**2 // highest_res, highest_res, self.num_channels])
            for scaled_image, res in zip(scaled_images, hparams.resolutions)
        ],
                                       axis=0)
        return example
Пример #4
0
  def preprocess_example(self, example, mode, hparams):
    image = example["inputs"]
    # Get resize method. Include a default if not specified, or if it's not in
    # TensorFlow's collection of pre-implemented resize methods.
    resize_method = getattr(hparams, "resize_method", "BICUBIC")
    resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method)

    highest_res = hparams.resolutions[-1]
    if resize_method == "DILATED":
      # Resize image so that dilated subsampling is properly divisible.
      scaled_image = image_utils.resize_by_area(image, highest_res)
      scaled_images = image_utils.make_multiscale_dilated(
          scaled_image, hparams.resolutions, num_channels=self.num_channels)
    else:
      scaled_images = image_utils.make_multiscale(
          image, hparams.resolutions,
          resize_method=resize_method, num_channels=self.num_channels)

    # Pack tuple of scaled images into one tensor. We do this by enforcing the
    # columns to match for every resolution.
    example["inputs"] = tf.concat([
        tf.reshape(scaled_image,
                   [res**2 // highest_res, highest_res, self.num_channels])
        for scaled_image, res in zip(scaled_images, hparams.resolutions)],
                                  axis=0)
    return example
Пример #5
0
  def preprocess_example(self, example, mode, hparams):
    image = example["inputs"]
    # Get resize method. Include a default if not specified, or if it's not in
    # TensorFlow's collection of pre-implemented resize methods.
    resize_method = getattr(hparams, "resize_method", "BICUBIC")
    resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method)

    # Remove boundaries in CelebA images. Remove 40 pixels each side
    # vertically and 20 pixels each side horizontally.
    image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)

    highest_res = hparams.resolutions[-1]
    if resize_method == "DILATED":
      # Resize image so that dilated subsampling is properly divisible.
      scaled_image = image_utils.resize_by_area(image, highest_res)
      scaled_images = image_utils.make_multiscale_dilated(
          scaled_image, hparams.resolutions, num_channels=self.num_channels)
    else:
      scaled_images = image_utils.make_multiscale(
          image, hparams.resolutions,
          resize_method=resize_method, num_channels=self.num_channels)

    # Pack tuple of scaled images into one tensor. We do this by enforcing the
    # columns to match for every resolution.
    example["inputs"] = image
    example["targets"] = tf.concat([
        tf.reshape(scaled_image,
                   [res**2 // highest_res, highest_res, self.num_channels])
        for scaled_image, res in zip(scaled_images, hparams.resolutions)],
                                   axis=0)
    return example
Пример #6
0
  def preprocess_example(self, example, mode, hparams):
    image = example["inputs"]
    # Get resize method. Include a default if not specified, or if it's not in
    # TensorFlow's collection of pre-implemented resize methods.
    resize_method = getattr(hparams, "resize_method", "BICUBIC")
    resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method)

    if resize_method == "DILATED":
      scaled_images = image_utils.make_multiscale_dilated(
          image, hparams.resolutions, num_channels=self.num_channels)
    else:
      scaled_images = image_utils.make_multiscale(
          image, hparams.resolutions,
          resize_method=resize_method, num_channels=self.num_channels)

    # Pack tuple of scaled images into one tensor. We do this by enforcing the
    # columns to match for every resolution.
    # TODO(avaswani, trandustin): We should create tuples because this will not
    # work if height*width of low res < width of high res
    highest_res = hparams.resolutions[-1]
    example["inputs"] = tf.concat([
        tf.reshape(scaled_image,
                   [res**2 // highest_res, highest_res, self.num_channels])
        for scaled_image, res in zip(scaled_images, hparams.resolutions)],
                                  axis=0)
    return example
Пример #7
0
 def testMakeMultiscaleDivisible(self):
     image = tf.random_normal([256, 256, 3])
     resolutions = [8, 16, 64, 256]
     scaled_images = image_utils.make_multiscale(image, resolutions)
     self.assertEqual(scaled_images[0].shape, (8, 8, 3))
     self.assertEqual(scaled_images[1].shape, (16, 16, 3))
     self.assertEqual(scaled_images[2].shape, (64, 64, 3))
     self.assertEqual(scaled_images[3].shape, (256, 256, 3))
Пример #8
0
 def testMakeMultiscaleDivisible(self):
   image = tf.random_normal([256, 256, 3])
   resolutions = [8, 16, 64, 256]
   scaled_images = image_utils.make_multiscale(image, resolutions)
   self.assertEqual(scaled_images[0].shape, (8, 8, 3))
   self.assertEqual(scaled_images[1].shape, (16, 16, 3))
   self.assertEqual(scaled_images[2].shape, (64, 64, 3))
   self.assertEqual(scaled_images[3].shape, (256, 256, 3))
Пример #9
0
  def preprocess_example(self, example, mode, hparams):
    image = example["inputs"]
    scaled_images = image_utils.make_multiscale(
        image, hparams.resolutions, num_channels=self.num_channels)

    # Pack tuple of scaled images into one tensor. We do this by enforcing the
    # columns to match for every resolution.
    highest_res = hparams.resolutions[-1]
    example["inputs"] = tf.concat([
        tf.reshape(scaled_image,
                   [res**2 // highest_res, highest_res, self.num_channels])
        for scaled_image, res in zip(scaled_images, hparams.resolutions)],
                                  axis=0)
    return example
Пример #10
0
 def testMakeMultiscaleLarger(self):
     image = tf.random_normal([256, 256, 3])
     resolutions = [257]
     scaled_images = image_utils.make_multiscale(image, resolutions)
     self.assertEqual(scaled_images[0].shape, (257, 257, 3))
Пример #11
0
 def testMakeMultiscaleLarger(self):
   image = tf.random_normal([256, 256, 3])
   resolutions = [257]
   scaled_images = image_utils.make_multiscale(image, resolutions)
   self.assertEqual(scaled_images[0].shape, (257, 257, 3))