コード例 #1
0
def path_to_image(path, image_size, num_channels, interpolation):
  img = io_ops.read_file(path)
  img = image_ops.decode_image(
      img, channels=num_channels, expand_animations=False)
  img = image_ops.resize_images_v2(img, image_size, method=interpolation)
  img.set_shape((image_size[0], image_size[1], num_channels))
  return img
コード例 #2
0
def predic(request):

    image = request.FILES['image-file']
    fs = FileSystemStorage()
    imagepath = fs.save(image.name, image)
    imagepath = fs.url(imagepath)
    test_image = '.' + imagepath
    print(test_image)
    #img = tf.keras.preprocessing.image.load_img(test_image)
    #x = tf.keras.preprocessing.image.img_to_array(img)
    #x = tf.data.Dataset.from_tensors(x)
    img = io_ops.read_file(test_image)
    img = image_ops.decode_image(img, channels=3, expand_animations=False)
    img = image_ops.resize_images_v2(img, (256, 256), method='bilinear')
    img.set_shape((256, 256, 3))
    x = tf.data.Dataset.from_tensors(img)
    x = x.batch(1)

    #x = x.batch(1)
    #data = next(iter(x))

    #img_path = os.path.join(BASE_DIR, 'media/photo')
    #img_path = os.path.dirname(img_path)
    # test_dataset = tf.keras.preprocessing.image_dataset_from_directory(
    #    img_path, color_mode='rgb', batch_size=1)

    result = model.predict(x)
    result_ph = np.asarray(result[0][0])
    result_normal = np.asarray(result[0][1])

    contex = {
        'result_ph': result_normal,
        'result_normal': result_ph,
    }
    return render(request, "app/predict.html", context=contex)
コード例 #3
0
    def resize_and_center_cropped_inputs():
      """Deterministically resize to shorter side and center crop."""
      input_shape = array_ops.shape(inputs)
      input_height_t = input_shape[1]
      input_width_t = input_shape[2]
      ratio_cond = (input_height_t / input_width_t > 1.)
      # pylint: disable=g-long-lambda
      resized_height = tf_utils.smart_cond(
          ratio_cond,
          lambda: math_ops.cast(self.width * input_height_t / input_width_t,
                                input_height_t.dtype), lambda: self.height)
      resized_width = tf_utils.smart_cond(
          ratio_cond, lambda: self.width,
          lambda: math_ops.cast(self.height * input_width_t / input_height_t,
                                input_width_t.dtype))
      # pylint: enable=g-long-lambda
      resized_inputs = image_ops.resize_images_v2(
          images=inputs, size=array_ops.stack([resized_height, resized_width]))

      img_hd_diff = resized_height - self.height
      img_wd_diff = resized_width - self.width
      bbox_h_start = math_ops.cast(img_hd_diff / 2, dtypes.int32)
      bbox_w_start = math_ops.cast(img_wd_diff / 2, dtypes.int32)
      bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
      bbox_size = array_ops.stack([-1, self.height, self.width, -1])
      outputs = array_ops.slice(resized_inputs, bbox_begin, bbox_size)
      return outputs
コード例 #4
0
    def forward(self, *x):
        x = enforce_singleton(x)
        low_level_feature = self.backbond1(x)
        high_level_feature = self.backbond2(low_level_feature)
        x = self.aspp(high_level_feature)
        new_shape = list(int_shape(x)[1:])
        x = image_ops.resize_images_v2(x, [new_shape[1] * 4, new_shape[0] * 4],
                                       method=image_ops.ResizeMethod.BILINEAR)
        low_level_feature = self.low_level_conv(low_level_feature)
        x = concate([x, low_level_feature], axis=-1)
        x = self.decoder(x)

        new_shape = list(int_shape(x)[1:])
        x = image_ops.resize_images_v2(x, [new_shape[1] * 4, new_shape[0] * 4],
                                       method=image_ops.ResizeMethod.BILINEAR)
        return x
コード例 #5
0
    def forward(self, x):
        s = self.pool(x)
        s = self.activation(self.squeeze(s))
        s = tf.sigmoid(self.excite(s))

        if self.is_gather_excite:
            s = image_ops.resize_images_v2(
                s, x.shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)
        x = s * x
        return x
コード例 #6
0
    def forward(self, *x):
        x = enforce_singleton(x)
        low_level_feature = self.backbond1(x)
        high_level_feature = self.backbond2(low_level_feature)
        x = self.aspp(high_level_feature)
        new_shape = x.shape.as_list()[1:-1]
        x = image_ops.resize_images_v2(
            x,
            (new_shape[0], new_shape[1] * 4, new_shape[2] * 4, new_shape[3]),
            method=image_ops.ResizeMethod.BILINEAR)
        low_level_feature = self.low_level_conv(low_level_feature)
        x = concate([x, low_level_feature], axis=1)
        x = self.decoder(x)

        new_shape = x.shape.as_list()[1:-1]
        x = image_ops.resize_images_v2(
            x,
            (new_shape[0], new_shape[1] * 4, new_shape[2] * 4, new_shape[3]),
            method=image_ops.ResizeMethod.BILINEAR)
        return x
コード例 #7
0
def load_image(path):
    print("Query: ")
    img_disp = PIL.Image.open(path)
    img_disp.thumbnail((224, 224))

    image = io_ops.read_file(path)
    image = image_ops.decode_image(image, channels=3, expand_animations=False)
    image = image_ops.resize_images_v2(image, (224, 224), method='bilinear')
    image.set_shape((224, 224, 3))
    image = image.numpy()
    image = tf.keras.applications.mobilenet_v2.preprocess_input(image)
    image = np.array([image])
    return image
コード例 #8
0
def load_image(path, image_size, num_channels, interpolation,
               smart_resize=False):
  """Load an image from a path and resize it."""
  img = io_ops.read_file(path)
  img = image_ops.decode_image(
      img, channels=num_channels, expand_animations=False)
  if smart_resize:
    img = keras_image_ops.smart_resize(img, image_size,
                                       interpolation=interpolation)
  else:
    img = image_ops.resize_images_v2(img, image_size, method=interpolation)
  img.set_shape((image_size[0], image_size[1], num_channels))
  return img
コード例 #9
0
 def random_width_inputs():
   """Inputs width-adjusted with random ops."""
   inputs_shape = array_ops.shape(inputs)
   img_hd = inputs_shape[H_AXIS]
   img_wd = math_ops.cast(inputs_shape[W_AXIS], dtypes.float32)
   width_factor = self._rng.uniform(
       shape=[],
       minval=(1.0 + self.width_lower),
       maxval=(1.0 + self.width_upper))
   adjusted_width = math_ops.cast(width_factor * img_wd, dtypes.int32)
   adjusted_size = array_ops.stack([img_hd, adjusted_width])
   output = image_ops.resize_images_v2(
       images=inputs, size=adjusted_size, method=self._interpolation_method)
   original_shape = inputs.shape.as_list()
   output_shape = original_shape[0:2] + [None] + [original_shape[3]]
   output.set_shape(output_shape)
   return output
コード例 #10
0
 def random_height_inputs():
   """Inputs height-adjusted with random ops."""
   inputs_shape = array_ops.shape(inputs)
   img_hd = math_ops.cast(inputs_shape[H_AXIS], dtypes.float32)
   img_wd = inputs_shape[W_AXIS]
   height_factor = self._rng.uniform(
       shape=[],
       minval=(1.0 + self.height_lower),
       maxval=(1.0 + self.height_upper))
   adjusted_height = math_ops.cast(height_factor * img_hd, dtypes.int32)
   adjusted_size = array_ops.stack([adjusted_height, img_wd])
   output = image_ops.resize_images_v2(
       images=inputs, size=adjusted_size, method=self._interpolation_method)
   original_shape = inputs.shape.as_list()
   output_shape = [original_shape[0]] + [None] + original_shape[2:4]
   output.set_shape(output_shape)
   return output
コード例 #11
0
  def testResizeWithPartialStaticShape(self, src_shape, src_sizes, dst_size):
    channels = src_shape[-1] or 3
    images = self.make_image_batch(src_sizes, channels)
    rt_spec = ragged_tensor.RaggedTensorSpec(src_shape,
                                             ragged_rank=images.ragged_rank)
    expected_shape = [len(src_sizes)] + list(dst_size) + [channels]

    # Use @tf.function to erase static shape information.
    @def_function.function(input_signature=[rt_spec])
    def do_resize(images):
      return image_ops.resize_images_v2(images, dst_size)

    resized_images = do_resize(images)
    self.assertIsInstance(resized_images, ops.Tensor)
    self.assertTrue(resized_images.shape.is_compatible_with(expected_shape))

    # Check that results for each image matches what we'd get with the
    # non-batch version of tf.images.resize.
    for i in range(len(src_sizes)):
      actual = resized_images[i]
      expected = image_ops.resize_images_v2(images[i].to_tensor(), dst_size)
      self.assertAllClose(actual, expected)
コード例 #12
0
def get_image(width, height, want_grayscale, filepath):
    """Returns an image loaded into an np.ndarray with dims [height, width, (3 or 1)].

  Args:
    width: Width to rescale the image to.
    height: Height to rescale the image to.
    want_grayscale: Whether the result should be converted to grayscale.
    filepath: Path of the image file..

  Returns:
    np.ndarray of shape (height, width, channels) where channels is 1 if
      want_grayscale is true, otherwise 3.
  """
    with ops.Graph().as_default():
        with session.Session():
            file_data = io_ops.read_file(filepath)
            channels = 1 if want_grayscale else 3
            image_tensor = image_ops.decode_image(file_data,
                                                  channels=channels).eval()
            resized_tensor = image_ops.resize_images_v2(
                image_tensor, (height, width)).eval()
    return resized_tensor
コード例 #13
0
 def do_resize(images):
   return image_ops.resize_images_v2(images, dst_size)
コード例 #14
0
 def call(self, inputs):
   outputs = image_ops.resize_images_v2(
       images=inputs,
       size=[self.target_height, self.target_width],
       method=self._interpolation_method)
   return outputs
コード例 #15
0
 def testBadRank(self):
   rt = ragged_tensor.RaggedTensor.from_tensor(array_ops.zeros([5, 5, 3]))
   with self.assertRaisesRegex(ValueError, 'rank must be 4'):
     image_ops.resize_images_v2(rt, [10, 10])
コード例 #16
0
def smart_resize(x, size, interpolation='bilinear'):
  """Resize images to a target size without aspect ratio distortion.

  TensorFlow image datasets typically yield images that have each a different
  size. However, these images need to be batched before they can be
  processed by Keras layers. To be batched, images need to share the same height
  and width.

  You could simply do:

  ```python
  size = (200, 200)
  ds = ds.map(lambda img: tf.image.resize(img, size))
  ```

  However, if you do this, you distort the aspect ratio of your images, since
  in general they do not all have the same aspect ratio as `size`. This is
  fine in many cases, but not always (e.g. for GANs this can be a problem).

  Note that passing the argument `preserve_aspect_ratio=True` to `resize`
  will preserve the aspect ratio, but at the cost of no longer respecting the
  provided target size. Because `tf.image.resize` doesn't crop images,
  your output images will still have different sizes.

  This calls for:

  ```python
  size = (200, 200)
  ds = ds.map(lambda img: smart_resize(img, size))
  ```

  Your output images will actually be `(200, 200)`, and will not be distorted.
  Instead, the parts of the image that do not fit within the target size
  get cropped out.

  The resizing process is:

  1. Take the largest centered crop of the image that has the same aspect ratio
  as the target size. For instance, if `size=(200, 200)` and the input image has
  size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.
  2. Resize the cropped image to the target size. In the example above,
  we resize the `(340, 340)` crop to `(200, 200)`.

  Args:
    x: Input image (as a tensor or NumPy array). Must be in format
      `(height, width, channels)`.
    size: Tuple of `(height, width)` integer. Target size.
    interpolation: String, interpolation to use for resizing.
      Defaults to `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`,
      `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.

  Returns:
    Array with shape `(size[0], size[1], channels)`. If the input image was a
    NumPy array, the output is a NumPy array, and if it was a TF tensor,
    the output is a TF tensor.
  """
  if len(size) != 2:
    raise ValueError('Expected `size` to be a tuple of 2 integers, '
                     'but got: %s' % (size,))
  img = ops.convert_to_tensor_v2_with_dispatch(x)
  if img.shape.rank is not None:
    if img.shape.rank != 3:
      raise ValueError(
          'Expected an image array with shape `(height, width, channels)`, but '
          'got input with incorrect rank, of shape %s' % (img.shape,))
  shape = array_ops.shape(img)
  height, width = shape[0], shape[1]
  target_height, target_width = size

  crop_height = math_ops.cast(
      math_ops.cast(width * target_height, 'float32') / target_width, 'int32')
  crop_width = math_ops.cast(
      math_ops.cast(height * target_width, 'float32') / target_height, 'int32')

  # Set back to input height / width if crop_height / crop_width is not smaller.
  crop_height = math_ops.minimum(height, crop_height)
  crop_width = math_ops.minimum(width, crop_width)

  crop_box_hstart = math_ops.cast(
      math_ops.cast(height - crop_height, 'float32') / 2, 'int32')
  crop_box_wstart = math_ops.cast(
      math_ops.cast(width - crop_width, 'float32') / 2, 'int32')

  crop_box_start = array_ops.stack([crop_box_hstart, crop_box_wstart, 0])
  crop_box_size = array_ops.stack([crop_height, crop_width, -1])

  img = array_ops.slice(img, crop_box_start, crop_box_size)
  img = image_ops.resize_images_v2(
      images=img,
      size=size,
      method=interpolation)
  if isinstance(x, np.ndarray):
    return img.numpy()
  return img
コード例 #17
0
 def do_resize(images, new_size):
   return image_ops.resize_images_v2(images, new_size)