Пример #1
0
 def forward(self, input):
     if self.keep_aspect_ratio:
         res = _C.dispatch_image_resize_keep_aspect_ratio(
             self._op,
             input,
             target_size=self.target_size,
             min_size=self.min_size,
             max_size=self.max_size,
             resize_longer=self.resize_longer,
             interpolation_type=self.interpolation_type,
         )
         new_size = flow.tensor_buffer_to_tensor(res[1],
                                                 dtype=flow.int32,
                                                 instance_shape=(2, ))
         scale = flow.tensor_buffer_to_tensor(res[2],
                                              dtype=flow.float32,
                                              instance_shape=(2, ))
     else:
         res = _C.dispatch_image_resize_to_fixed(
             self._op,
             input,
             target_width=self.target_w,
             target_height=self.target_h,
             channels=self.channels,
             data_type=self.dtype,
             interpolation_type=self.interpolation_type,
         )
         new_size = None
         scale = res[1]
     res_image = res[0]
     return (res_image, scale, new_size)
Пример #2
0
def _test_tensor_buffer_convert(test_case, device):
    input = flow.tensor(np.random.rand(16, 24, 32, 36),
                        dtype=flow.float32,
                        device=flow.device(device))
    tensor_buffer = flow.tensor_to_tensor_buffer(input, instance_dims=2)
    orig_tensor = flow.tensor_buffer_to_tensor(tensor_buffer,
                                               dtype=flow.float32,
                                               instance_shape=[32, 36])
    test_case.assertTrue(np.array_equal(input.numpy(), orig_tensor.numpy()))
Пример #3
0
def ofrecord_image_classification_reader(
    ofrecord_dir: str,
    image_feature_name: str,
    label_feature_name: str,
    batch_size: int = 1,
    data_part_num: int = 1,
    part_name_prefix: str = "part-",
    part_name_suffix_length: int = -1,
    random_shuffle: bool = False,
    shuffle_buffer_size: int = 1024,
    shuffle_after_epoch: bool = False,
    color_space: str = "BGR",
    decode_buffer_size_per_thread: int = 32,
    num_decode_threads_per_machine: Optional[int] = None,
    name: Optional[str] = None,
) -> BlobDef:
    if name is None:
        name = id_util.UniqueStr("OFRecordImageClassificationReader_")
    (image, label) = (
        flow.user_op_builder(name).Op("ofrecord_image_classification_reader").
        Output("image").Output("label").Attr("data_dir", ofrecord_dir).Attr(
            "data_part_num",
            data_part_num).Attr("batch_size", batch_size).Attr(
                "part_name_prefix",
                part_name_prefix).Attr("random_shuffle", random_shuffle).Attr(
                    "shuffle_buffer_size", shuffle_buffer_size).Attr(
                        "shuffle_after_epoch", shuffle_after_epoch).Attr(
                            "part_name_suffix_length",
                            part_name_suffix_length).Attr(
                                "color_space",
                                color_space).Attr("image_feature_name",
                                                  image_feature_name).Attr(
                                                      "label_feature_name",
                                                      label_feature_name).
        Attr("decode_buffer_size_per_thread",
             decode_buffer_size_per_thread).Attr(
                 "num_decode_threads_per_machine",
                 num_decode_threads_per_machine
                 or 0).Build().InferAndTryRun().RemoteBlobList())
    label = flow.tensor_buffer_to_tensor(label,
                                         dtype=flow.int32,
                                         instance_shape=[1])
    label = flow.squeeze(label, axis=[-1])
    return image, label
Пример #4
0
def api_image_resize(
    image: BlobDef,
    target_size: Union[int, Sequence[int]] = None,
    min_size: Optional[int] = None,
    max_size: Optional[int] = None,
    keep_aspect_ratio: bool = False,
    resize_side: str = "shorter",
    channels: int = 3,
    dtype: Optional[dtype_util.dtype] = None,
    interpolation_type: str = "auto",
    name: Optional[str] = None,
    # deprecated params, reserve for backward compatible
    color_space: Optional[str] = None,
    interp_type: Optional[str] = None,
    resize_shorter: int = 0,
    resize_x: int = 0,
    resize_y: int = 0,
) -> Union[BlobDef, Sequence[BlobDef]]:
    r"""Resize images to target size.

    Args:
        image: A `Tensor` consists of images to be resized.
        target_size: A list or tuple when `keep_aspect_ratio` is false or an int when
            `keep_aspect_ratio` is true. When `keep_aspect_ratio` is false, `target_size` has
            a form of `(target_width, target_height)` that image will resize to. When
            `keep_aspect_ratio` is true, the longer side or shorter side of the image
            will be resized to target size.
        min_size: An int, optional. Only works when `keep_aspect_ratio` is true and `resize_side`
            is "longer". If `min_size` is not None, the shorter side must be greater than or
            equal to `min_size`. Default is None.
        max_size: An int, optional. Only works when `keep_aspect_ratio` is true and `resize_side`
            is "shorter". If `max_size` is not None, the longer side must be less than or equal
            to `max_size`. Default is None.
        keep_aspect_ratio: A bool. If is false, indicate that image will be resized to fixed
            width and height, otherwise image will be resized keeping aspect ratio.
        resize_side: A str of "longer" or "shorter". Only works when `keep_aspect_ratio` is True.
            If `resize_side` is "longer", the longer side of image will be resized to `target_size`.
            If `resize_side` is "shorter", the shorter side of image will be resized to
            `target_size`.
        channels: An int. how many channels an image has
        dtype: `oneflow.dtype`. Indicate output resized image data type.
        interpolation_type: A str of "auto", "bilinear", "nearest_neighbor", "bicubic" or "area".
            Indicate interpolation method used to resize image.
        name: A str, optional. Name for the operation.
        color_space: Deprecated, a str of "RGB", "BGR" or "GRAY". Please use `channels` instead.
        interp_type: Deprecated, s str of "Linear", "Cubic" or "NN". Please use `interpolation_type`
            instead.
        resize_shorter: Deprecated, a int. Indicate target size that the shorter side of image will
            resize to. Please use `target_size` and `resize_side` instead.
        resize_x: Deprecated, a int. Indicate the target size that the width of image will resize to.
            Please use `target_size` instead.
        resize_y: Deprecated, a int. Indicate the target size that the height of image will resize to.
            Please use `target_size` instead.

    Returns:
        Tuple of resized images `Blob`, width and height scales `Blob` and new width and height `Blob`
        (new width and height `Blob` will be None when keep_aspect_ratio is false).
        If deprecated params are used, a single resized images `Blob` will be returned.
    """
    # process deprecated params
    deprecated_param_used = False
    if color_space is not None:
        print(
            "WARNING: color_space has been deprecated. Please use channels instead."
        )
        print(traceback.format_stack()[-2])
        deprecated_param_used = True
        assert isinstance(color_space, str)
        if color_space.upper() == "RGB" or color_space.upper() == "BGR":
            channels = 3
        elif color_space.upper() == "GRAY":
            channels = 1
        else:
            raise ValueError("invalid color_space")

    if interp_type is not None:
        print(
            "WARNING: interp_type has been deprecated. Please use interpolation_type instead."
        )
        print(traceback.format_stack()[-2])
        deprecated_param_used = True
        assert isinstance(interp_type, str)
        if interp_type == "Linear":
            interpolation_type = "bilinear"
        elif interp_type == "NN":
            interpolation_type = "nearest_neighbor"
        elif interp_type == "Cubic":
            interpolation_type = "bicubic"
        else:
            raise ValueError("invalid interp_type")

    if resize_x > 0 and resize_y > 0:
        print(
            "WARNING: resize_x and resize_y has been deprecated. Please use target_size instead."
        )
        print(traceback.format_stack()[-2])
        deprecated_param_used = True
        target_size = (resize_x, resize_y)
        keep_aspect_ratio = False

    if resize_shorter > 0:
        print(
            "WARNING: resize_shorter has been deprecated. Please use target_size instead."
        )
        print(traceback.format_stack()[-2])
        deprecated_param_used = True
        target_size = resize_shorter
        keep_aspect_ratio = True
        resize_side = "shorter"

    if name is None:
        name = id_util.UniqueStr("ImageResize_")

    if keep_aspect_ratio:
        if not isinstance(target_size, int):
            raise ValueError(
                "target_size must be an int when keep_aspect_ratio is True")

        if min_size is None:
            min_size = 0

        if max_size is None:
            max_size = 0

        if resize_side == "shorter":
            resize_longer = False
        elif resize_side == "longer":
            resize_longer = True
        else:
            raise ValueError('resize_side must be "shorter" or "longer"')

        op = (flow.user_op_builder(name).Op(
            "image_resize_keep_aspect_ratio").Input(
                "in",
                [image]).Output("out").Output("size").Output("scale").Attr(
                    "target_size",
                    target_size).Attr("min_size", min_size).Attr(
                        "max_size",
                        max_size).Attr("resize_longer", resize_longer).Attr(
                            "interpolation_type", interpolation_type).Build())
        res_image, new_size, scale = op.InferAndTryRun().RemoteBlobList()
        scale = flow.tensor_buffer_to_tensor(scale,
                                             dtype=flow.float32,
                                             instance_shape=(2, ))
        new_size = flow.tensor_buffer_to_tensor(new_size,
                                                dtype=flow.int32,
                                                instance_shape=(2, ))

    else:
        if (not isinstance(target_size, (list, tuple)) or len(target_size) != 2
                or not all(isinstance(size, int) for size in target_size)):
            raise ValueError(
                "target_size must be a form like (width, height) when keep_aspect_ratio is False"
            )

        if dtype is None:
            dtype = flow.uint8

        target_w, target_h = target_size
        op = (flow.user_op_builder(name).Op("image_resize_to_fixed").Input(
            "in", [image]).Output("out").Output("scale").Attr(
                "target_width", target_w).Attr("target_height", target_h).Attr(
                    "channels",
                    channels).Attr("data_type",
                                   dtype).Attr("interpolation_type",
                                               interpolation_type).Build())
        res_image, scale = op.InferAndTryRun().RemoteBlobList()
        new_size = None

    if deprecated_param_used:
        return res_image

    return res_image, scale, new_size
Пример #5
0
 def job_fn(x: oft.Numpy.Placeholder(input_arr.shape, dtype=flow.float32)):
     tensor_buffer = flow.tensor_to_tensor_buffer(x, instance_dims=2)
     return flow.tensor_buffer_to_tensor(tensor_buffer,
                                         dtype=flow.float32,
                                         instance_shape=[32, 36])