def _transform(self, input: Any, params: Dict[str, Any]) -> Any: if isinstance(input, (features.BoundingBox, features.SegmentationMask)): raise TypeError( f"{type(input).__name__}'s are not supported by {type(self).__name__}()" ) elif isinstance(input, features.Image): output = F.resized_crop_image_tensor( input, **params, size=list(self.size), interpolation=self.interpolation) return features.Image.new_like(input, output) elif isinstance(input, torch.Tensor): return F.resized_crop_image_tensor( input, **params, size=list(self.size), interpolation=self.interpolation) elif isinstance(input, PIL.Image.Image): return F.resized_crop_image_pil(input, **params, size=list(self.size), interpolation=self.interpolation) else: return input
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: if isinstance(input, features.Image): output = F.resized_crop_image_tensor( input, **params, size=list(self.size), interpolation=self.interpolation ) return features.Image.new_like(input, output) elif is_simple_tensor(input): return F.resized_crop_image_tensor(input, **params, size=list(self.size), interpolation=self.interpolation) elif isinstance(input, PIL.Image.Image): return F.resized_crop_image_pil(input, **params, size=list(self.size), interpolation=self.interpolation) else: return input
def resized_crop( self, top: int, left: int, height: int, width: int, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR, antialias: bool = False, ) -> Image: from torchvision.prototype.transforms import functional as _F output = _F.resized_crop_image_tensor(self, top, left, height, width, size=list(size), interpolation=interpolation, antialias=antialias) return Image.new_like(self, output)