def motion_blur_param_generator( batch_size: int, kernel_size: Union[int, Tuple[int, int]], angle: UnionFloat, direction: UnionFloat, border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name, same_on_batch: bool = True) -> Dict[str, torch.Tensor]: angle_bound: torch.Tensor = _check_and_bound(angle, 'angle', center=0.) direction_bound: torch.Tensor = _check_and_bound(direction, 'direction', center=0., bounds=(-1, 1)) if isinstance(kernel_size, int): ksize_factor = torch.tensor([kernel_size] * batch_size) elif isinstance(kernel_size, tuple): ksize_x, ksize_y = kernel_size ksize_factor = _adapted_uniform( (batch_size, ), ksize_x // 2, ksize_y // 2, same_on_batch).int() * 2 + 1 else: raise TypeError(f"Unsupported type: {type(kernel_size)}") angle_factor = _adapted_uniform((batch_size, ), angle_bound[0], angle_bound[1], same_on_batch) direction_factor = _adapted_uniform((batch_size, ), direction_bound[0], direction_bound[1], same_on_batch) return dict(ksize_factor=ksize_factor, angle_factor=angle_factor, direction_factor=direction_factor, border_type=torch.tensor(BorderType.get(border_type).value))
def random_color_jitter_generator( batch_size: int, brightness: FloatUnionType = 0., contrast: FloatUnionType = 0., saturation: FloatUnionType = 0., hue: FloatUnionType = 0., same_on_batch: bool = False) -> Dict[str, torch.Tensor]: r"""Generator random color jiter parameters for a batch of images. Args: batch_size (int): the number of images. brightness (float or tuple): Default value is 0 contrast (float or tuple): Default value is 0 saturation (float or tuple): Default value is 0 hue (float or tuple): Default value is 0 same_on_batch (bool): apply the same transformation across the batch. Default: False Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. """ brightness_bound: torch.Tensor = _check_and_bound(brightness, 'brightness', center=1., bounds=(0, float('inf'))) contrast_bound: torch.Tensor = _check_and_bound(contrast, 'contrast', center=1., bounds=(0, float('inf'))) saturation_bound: torch.Tensor = _check_and_bound(saturation, 'saturation', center=1., bounds=(0, float('inf'))) hue_bound: torch.Tensor = _check_and_bound(hue, 'hue', bounds=(-0.5, 0.5)) brightness_factor = _adapted_uniform((batch_size, ), brightness_bound[0], brightness_bound[1], same_on_batch) contrast_factor = _adapted_uniform((batch_size, ), contrast_bound[0], contrast_bound[1], same_on_batch) hue_factor = _adapted_uniform((batch_size, ), hue_bound[0], hue_bound[1], same_on_batch) saturation_factor = _adapted_uniform((batch_size, ), saturation_bound[0], saturation_bound[1], same_on_batch) return dict(brightness_factor=brightness_factor, contrast_factor=contrast_factor, hue_factor=hue_factor, saturation_factor=saturation_factor, order=torch.randperm(4))
def random_color_jitter_generator( batch_size: int, brightness: FloatUnionType = 0., contrast: FloatUnionType = 0., saturation: FloatUnionType = 0., hue: FloatUnionType = 0., same_on_batch: bool = False) -> Dict[str, torch.Tensor]: r"""Generator random color jiter parameters for a batch of images. Args: batch_size (int): the number of images. brightness (float or tuple): Default value is 0 contrast (float or tuple): Default value is 0 saturation (float or tuple): Default value is 0 hue (float or tuple): Default value is 0 same_on_batch (bool): apply the same transformation across the batch. Default: False Returns: dict: generated parameter dictionary. See :class:`~kornia.augmentation.ColorJitter` for details. """ brightness_bound: torch.Tensor = _check_and_bound(brightness, 'brightness', center=1., bounds=(0, 2)) contrast_bound: torch.Tensor = _check_and_bound(contrast, 'contrast', center=1.) saturation_bound: torch.Tensor = _check_and_bound(saturation, 'saturation', center=1.) hue_bound: torch.Tensor = _check_and_bound(hue, 'hue', bounds=(-0.5, 0.5)) brightness_factor = _adapted_uniform((batch_size, ), brightness_bound[0], brightness_bound[1], same_on_batch) contrast_factor = _adapted_uniform((batch_size, ), contrast_bound[0], contrast_bound[1], same_on_batch) hue_factor = _adapted_uniform((batch_size, ), hue_bound[0], hue_bound[1], same_on_batch) saturation_factor = _adapted_uniform((batch_size, ), saturation_bound[0], saturation_bound[1], same_on_batch) return { "brightness_factor": brightness_factor, "contrast_factor": contrast_factor, "hue_factor": hue_factor, "saturation_factor": saturation_factor, "order": torch.randperm(4) }
def random_solarize_generator( batch_size: int, thresholds: FloatUnionType = 0.1, additions: FloatUnionType = 0.1, same_on_batch: bool = False) -> Dict[str, torch.Tensor]: r"""Generator random solarize parameters for a batch of images. For each pixel in the image less than threshold, we add 'addition' amount to it and then clip the pixel value to be between 0 and 1.0 Args: batch_size (int): the number of images. thresholds (float or tuple): Pixels less than threshold will selected. Otherwise, subtract 1.0 from the pixel. Default value is 0.1 additions (float or tuple): The value is between -0.5 and 0.5. Default value is 0.1 same_on_batch (bool): apply the same transformation across the batch. Default: False Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. """ thresholds_bound: torch.Tensor = _check_and_bound(thresholds, 'thresholds', center=0.5, bounds=(0., 1.)) additions_bound: torch.Tensor = _check_and_bound(additions, 'additions', bounds=(-0.5, 0.5)) thresholds_factor = _adapted_uniform( (batch_size, ), thresholds_bound[0].float(), thresholds_bound[1].float(), same_on_batch) additions_factor = _adapted_uniform( (batch_size, ), additions_bound[0].float(), additions_bound[1].float(), same_on_batch) return dict(thresholds_factor=thresholds_factor, additions_factor=additions_factor)