def apply_adjust_hue(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor: """Apply hue adjustment. Wrapper for adjust_hue for Torchvision-like param settings. Args: input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (B, C, H, W). params (Dict[str, torch.Tensor]): - params['hue_factor']: How much to shift the hue channel. Should be in [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in HSV space in positive and negative direction respectively. 0 means no shift. Therefore, both -0.5 and 0.5 will give an image with complementary colors while 0 gives the original image. Returns: torch.Tensor: Adjusted image. """ input = _transform_input(input) _validate_input_dtype( input, accepted_dtypes=[torch.float16, torch.float32, torch.float64]) transformed = adjust_hue(input, params['hue_factor'].to(input.dtype) * 2 * pi) return transformed
def apply_transform( self, input: Tensor, params: Dict[str, Tensor], transform: Optional[Tensor] = None ) -> Tensor: transforms = [ lambda img: adjust_brightness(img, params["brightness_factor"] - 1), lambda img: adjust_contrast(img, params["contrast_factor"]), lambda img: adjust_saturation(img, params["saturation_factor"]), lambda img: adjust_hue(img, params["hue_factor"] * 2 * pi), ] jittered = input for idx in params["order"].tolist(): t = transforms[idx] jittered = t(jittered) return jittered