def test_features_image(self, p):
        input, expected = self.input_expected_image_tensor(p)
        transform = transforms.RandomVerticalFlip(p=p)

        actual = transform(features.Image(input))

        assert_equal(features.Image(expected), actual)
def make_image(size=None,
               *,
               color_space,
               extra_dims=(),
               dtype=torch.float32,
               constant_alpha=True):
    size = size or torch.randint(16, 33, (2, )).tolist()

    try:
        num_channels = {
            features.ColorSpace.GRAY: 1,
            features.ColorSpace.GRAY_ALPHA: 2,
            features.ColorSpace.RGB: 3,
            features.ColorSpace.RGB_ALPHA: 4,
        }[color_space]
    except KeyError as error:
        raise pytest.UsageError() from error

    shape = (*extra_dims, num_channels, *size)
    max_value = get_max_value(dtype)
    data = make_tensor(shape, low=0, high=max_value, dtype=dtype)
    if color_space in {
            features.ColorSpace.GRAY_ALPHA, features.ColorSpace.RGB_ALPHA
    } and constant_alpha:
        data[..., -1, :, :] = max_value
    return features.Image(data, color_space=color_space)
Exemple #3
0
    def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
        image, target = data  # They're both numpy arrays at this point

        return {
            "image": features.Image(image),
            "label": Label(target.item()),
        }
 def _transform(self, input: Any, params: Dict[str, Any]) -> Any:
     if isinstance(input, (features.Image, PIL.Image.Image,
                           np.ndarray)) or is_simple_tensor(input):
         output = F.to_image_tensor(input, copy=self.copy)
         return features.Image(output)
     else:
         return input
Exemple #5
0
    def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
        image, target = data  # They're both numpy arrays at this point

        return {
            "image": features.Image(image.transpose(2, 0, 1)),
            "label": Label(target.item(), categories=self._categories),
        }
Exemple #6
0
def make_image(size=None, *, color_space, extra_dims=(), dtype=torch.float32):
    size = size or torch.randint(16, 33, (2, )).tolist()

    num_channels = {
        features.ColorSpace.GRAYSCALE: 1,
        features.ColorSpace.RGB: 3,
    }[color_space]

    shape = (*extra_dims, num_channels, *size)
    if dtype.is_floating_point:
        data = torch.rand(shape, dtype=dtype)
    else:
        data = torch.randint(0, torch.iinfo(dtype).max, shape, dtype=dtype)
    return features.Image(data, color_space=color_space)
Exemple #7
0
 def _transform(self, input: Any, params: Dict[str, Any]) -> Any:
     if type(input) is features.EncodedImage:
         output = F.decode_image_with_pil(input)
         return features.Image(output)
     else:
         return input
def pil(buffer: io.IOBase) -> features.Image:
    return features.Image(pil_to_tensor(PIL.Image.open(buffer)))
def make_image(**kwargs):
    data = make_tensor((3, *torch.randint(16, 33, (2, )).tolist()))
    return features.Image(data, **kwargs)