def random_rotation_generator( batch_size: int, degrees: torch.Tensor, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get parameters for ``rotate`` for a random rotate transform. Args: batch_size (int): the tensor batch size. degrees (torch.Tensor): range of degrees with shape (2) to select from. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - degrees (torch.Tensor): element-wise rotation degrees with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _joint_range_check(degrees, "degrees") _degrees = _adapted_uniform( (batch_size, ), degrees[0].to(device=device, dtype=dtype), degrees[1].to(device=device, dtype=dtype), same_on_batch, ) _degrees = _degrees.to(device=degrees.device, dtype=degrees.dtype) return dict(degrees=_degrees)
def random_posterize_generator( batch_size: int, bits: torch.Tensor = torch.tensor([3, 5]), same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Generate random posterize parameters for a batch of images. Args: batch_size (int): the number of images. bits (int or tuple): Takes in an integer tuple tensor that ranged from 0 ~ 8. Default value is [3, 5]. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - bits_factor (torch.Tensor): element-wise bit factors with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _joint_range_check(bits, 'bits', (0, 8)) bits_factor = _adapted_uniform( (batch_size,), bits[0].to(device=device, dtype=dtype), bits[1].to(device=device, dtype=dtype), same_on_batch ).int() return dict(bits_factor=bits_factor.to(device=bits.device, dtype=torch.int32))
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: scale = torch.as_tensor(self.scale, device=device, dtype=dtype) ratio = torch.as_tensor(self.ratio, device=device, dtype=dtype) if not (isinstance(self.value, (int, float)) and self.value >= 0 and self.value <= 1): raise AssertionError( f"'value' must be a number between 0 - 1. Got {self.value}.") _joint_range_check(scale, 'scale', bounds=(0, float('inf'))) _joint_range_check(ratio, 'ratio', bounds=(0, float('inf'))) self.scale_sampler = Uniform(scale[0], scale[1], validate_args=False) if ratio[0] < 1.0 and ratio[1] > 1.0: self.ratio_sampler1 = Uniform(ratio[0], 1, validate_args=False) self.ratio_sampler2 = Uniform(1, ratio[1], validate_args=False) self.index_sampler = Uniform( torch.tensor(0, device=device, dtype=dtype), torch.tensor(1, device=device, dtype=dtype), validate_args=False, ) else: self.ratio_sampler = Uniform(ratio[0], ratio[1], validate_args=False) self.uniform_sampler = Uniform( torch.tensor(0, device=device, dtype=dtype), torch.tensor(1, device=device, dtype=dtype), validate_args=False, )
def random_sharpness_generator( batch_size: int, sharpness: torch.Tensor = torch.tensor([0, 1.0]), same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Generate random sharpness parameters for a batch of images. Args: batch_size (int): the number of images. sharpness (torch.Tensor): Must be above 0. Default value is sampled from (0, 1). same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - sharpness_factor (torch.Tensor): element-wise sharpness factors with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _joint_range_check(sharpness, 'sharpness', bounds=(0, float('inf'))) sharpness_factor = _adapted_uniform( (batch_size, ), sharpness[0].to(device=device, dtype=dtype), sharpness[1].to(device=device, dtype=dtype), same_on_batch, ) return dict(sharpness_factor=sharpness_factor.to(device=sharpness.device, dtype=sharpness.dtype))
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: idx_range = _range_bound(self.domain, 'idx_range', device=device, dtype=dtype) _joint_range_check(idx_range, 'idx_range', (0, self.domain[1])) self.pl_idx_dist = Uniform(idx_range[0], idx_range[1], validate_args=False)
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: bits = torch.as_tensor(self.bits, device=device, dtype=dtype) if len(bits.size()) == 0: bits = bits.repeat(2) bits[1] = 8 elif not (len(bits.size()) == 1 and bits.size(0) == 2): raise ValueError(f"'bits' shall be either a scalar or a length 2 tensor. Got {bits}.") _joint_range_check(bits, 'bits', (0, 8)) self.bit_sampler = Uniform(bits[0], bits[1], validate_args=False)
def random_mixup_generator( batch_size: int, p: float = 0.5, lambda_val: Optional[torch.Tensor] = None, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Generate mixup indexes and lambdas for a batch of inputs. Args: batch_size (int): the number of images. If batchsize == 1, the output will be as same as the input. p (flot): probability of applying mixup. lambda_val (torch.Tensor, optional): min-max strength for mixup images, ranged from [0., 1.]. If None, it will be set to tensor([0., 1.]), which means no restrictions. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - mix_pairs (torch.Tensor): element-wise probabilities with a shape of (B,). - mixup_lambdas (torch.Tensor): element-wise probabilities with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. Examples: >>> rng = torch.manual_seed(0) >>> random_mixup_generator(5, 0.7) {'mixup_pairs': tensor([4, 0, 3, 1, 2]), 'mixup_lambdas': tensor([0.6323, 0.0000, 0.4017, 0.0223, 0.1689])} """ _common_param_check(batch_size, same_on_batch) _device, _dtype = _extract_device_dtype([lambda_val]) lambda_val = torch.as_tensor( [0.0, 1.0] if lambda_val is None else lambda_val, device=device, dtype=dtype) _joint_range_check(lambda_val, 'lambda_val', bounds=(0, 1)) batch_probs: torch.Tensor = random_prob_generator( batch_size, p, same_on_batch=same_on_batch, device=device, dtype=dtype) mixup_pairs: torch.Tensor = torch.randperm(batch_size, device=device, dtype=dtype).long() mixup_lambdas: torch.Tensor = _adapted_uniform((batch_size, ), lambda_val[0], lambda_val[1], same_on_batch=same_on_batch) mixup_lambdas = mixup_lambdas * batch_probs return dict( mixup_pairs=mixup_pairs.to(device=_device, dtype=torch.long), mixup_lambdas=mixup_lambdas.to(device=_device, dtype=_dtype), )
def random_solarize_generator( batch_size: int, thresholds: torch.Tensor = torch.tensor([0.4, 0.6]), additions: torch.Tensor = torch.tensor([-0.1, 0.1]), same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Generate random solarize parameters for a batch of images. For each pixel in the image less than threshold, we add 'addition' amount to it and then clip the pixel value to be between 0 and 1.0 Args: batch_size (int): the number of images. thresholds (torch.Tensor): Pixels less than threshold will selected. Otherwise, subtract 1.0 from the pixel. Takes in a range tensor of (0, 1). Default value will be sampled from [0.4, 0.6]. additions (torch.Tensor): The value is between -0.5 and 0.5. Default value will be sampled from [-0.1, 0.1] same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - thresholds_factor (torch.Tensor): element-wise thresholds factors with a shape of (B,). - additions_factor (torch.Tensor): element-wise additions factors with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _joint_range_check(thresholds, 'thresholds', (0, 1)) _joint_range_check(additions, 'additions', (-0.5, 0.5)) _device, _dtype = _extract_device_dtype([thresholds, additions]) thresholds_factor = _adapted_uniform( (batch_size, ), thresholds[0].to(device=device, dtype=dtype), thresholds[1].to(device=device, dtype=dtype), same_on_batch, ) additions_factor = _adapted_uniform( (batch_size, ), additions[0].to(device=device, dtype=dtype), additions[1].to(device=device, dtype=dtype), same_on_batch, ) return dict( thresholds_factor=thresholds_factor.to(device=_device, dtype=_dtype), additions_factor=additions_factor.to(device=_device, dtype=_dtype), )
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: scale = torch.as_tensor(self.scale, device=device, dtype=dtype) ratio = torch.as_tensor(self.ratio, device=device, dtype=dtype) _joint_range_check(scale, "scale") _joint_range_check(ratio, "ratio") self.rand_sampler = Uniform( torch.tensor(0.0, device=device, dtype=dtype), torch.tensor(1.0, device=device, dtype=dtype)) self.log_ratio_sampler = Uniform(torch.log(ratio[0]), torch.log(ratio[1]), validate_args=False)
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: if self.lambda_val is None: lambda_val = torch.tensor([0.0, 1.0], device=device, dtype=dtype) else: lambda_val = torch.as_tensor(self.lambda_val, device=device, dtype=dtype) _joint_range_check(lambda_val, 'lambda_val', bounds=(0, 1)) self.lambda_sampler = Uniform(lambda_val[0], lambda_val[1], validate_args=False) self.prob_sampler = Bernoulli( torch.tensor(float(self.p), device=device, dtype=dtype))
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: if self.beta is None: self._beta = torch.tensor(1.0, device=device, dtype=dtype) else: self._beta = torch.as_tensor(self.beta, device=device, dtype=dtype) if self.cut_size is None: self._cut_size = torch.tensor([0.0, 1.0], device=device, dtype=dtype) else: self._cut_size = torch.as_tensor(self.cut_size, device=device, dtype=dtype) _joint_range_check(self._cut_size, 'cut_size', bounds=(0, 1)) self.beta_sampler = Beta(self._beta, self._beta) self.prob_sampler = Bernoulli( torch.tensor(float(self.p), device=device, dtype=dtype)) self.rand_sampler = Uniform( torch.tensor(0.0, device=device, dtype=dtype), torch.tensor(1.0, device=device, dtype=dtype), validate_args=False, )
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: brightness = _range_bound(self.brightness, 'brightness', center=1.0, bounds=(0, 2), device=device, dtype=dtype) contrast: Tensor = _range_bound(self.contrast, 'contrast', center=1.0, device=device, dtype=dtype) saturation: Tensor = _range_bound(self.saturation, 'saturation', center=1.0, device=device, dtype=dtype) hue: Tensor = _range_bound(self.hue, 'hue', bounds=(-0.5, 0.5), device=device, dtype=dtype) _joint_range_check(brightness, "brightness", (0, 2)) _joint_range_check(contrast, "contrast", (0, float('inf'))) _joint_range_check(hue, "hue", (-0.5, 0.5)) _joint_range_check(saturation, "saturation", (0, float('inf'))) self.brightness_sampler = Uniform(brightness[0], brightness[1], validate_args=False) self.contrast_sampler = Uniform(contrast[0], contrast[1], validate_args=False) self.hue_sampler = Uniform(hue[0], hue[1], validate_args=False) self.saturation_sampler = Uniform(saturation[0], saturation[1], validate_args=False) self.randperm = partial(torch.randperm, device=device, dtype=dtype)
def random_affine_generator( batch_size: int, height: int, width: int, degrees: torch.Tensor, translate: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None, shear: Optional[torch.Tensor] = None, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get parameters for ``affine`` for a random affine transform. Args: batch_size (int): the tensor batch size. height (int) : height of the image. width (int): width of the image. degrees (torch.Tensor): Range of degrees to select from like (min, max). translate (tensor, optional): tuple of maximum absolute fraction for horizontal and vertical translations. For example translate=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default. scale (tensor, optional): scaling factor interval, e.g (a, b), then scale is randomly sampled from the range a <= scale <= b. Will keep original scale by default. shear (tensor, optional): Range of degrees to select from. Shear is a 2x2 tensor, a x-axis shear in (shear[0][0], shear[0][1]) and y-axis shear in (shear[1][0], shear[1][1]) will be applied. Will not apply shear by default. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - translations (torch.Tensor): element-wise translations with a shape of (B, 2). - center (torch.Tensor): element-wise center with a shape of (B, 2). - scale (torch.Tensor): element-wise scales with a shape of (B, 2). - angle (torch.Tensor): element-wise rotation angles with a shape of (B,). - sx (torch.Tensor): element-wise x-axis shears with a shape of (B,). - sy (torch.Tensor): element-wise y-axis shears with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _joint_range_check(degrees, "degrees") if not (isinstance(width, (int, )) and isinstance(height, (int, )) and width > 0 and height > 0): raise AssertionError( f"`width` and `height` must be positive integers. Got {width}, {height}." ) _device, _dtype = _extract_device_dtype([degrees, translate, scale, shear]) degrees = degrees.to(device=device, dtype=dtype) angle = _adapted_uniform((batch_size, ), degrees[0], degrees[1], same_on_batch) angle = angle.to(device=_device, dtype=_dtype) # compute tensor ranges if scale is not None: scale = scale.to(device=device, dtype=dtype) if not (len(scale.shape) == 1 and len(scale) in (2, 4)): raise AssertionError( f"`scale` shall have 2 or 4 elements. Got {scale}.") _joint_range_check(cast(torch.Tensor, scale[:2]), "scale") _scale = _adapted_uniform((batch_size, ), scale[0], scale[1], same_on_batch).unsqueeze(1).repeat(1, 2) if len(scale) == 4: _joint_range_check(cast(torch.Tensor, scale[2:]), "scale_y") _scale[:, 1] = _adapted_uniform((batch_size, ), scale[2], scale[3], same_on_batch) _scale = _scale.to(device=_device, dtype=_dtype) else: _scale = torch.ones((batch_size, 2), device=_device, dtype=_dtype) if translate is not None: translate = translate.to(device=device, dtype=dtype) if not (0.0 <= translate[0] <= 1.0 and 0.0 <= translate[1] <= 1.0 and translate.shape == torch.Size([2])): raise AssertionError( f"Expect translate contains two elements and ranges are in [0, 1]. Got {translate}." ) max_dx: torch.Tensor = translate[0] * width max_dy: torch.Tensor = translate[1] * height translations = torch.stack( [ _adapted_uniform( (batch_size, ), -max_dx, max_dx, same_on_batch), _adapted_uniform( (batch_size, ), -max_dy, max_dy, same_on_batch), ], dim=-1, ) translations = translations.to(device=_device, dtype=_dtype) else: translations = torch.zeros((batch_size, 2), device=_device, dtype=_dtype) center: torch.Tensor = torch.tensor( [width, height], device=_device, dtype=_dtype).view(1, 2) / 2.0 - 0.5 center = center.expand(batch_size, -1) if shear is not None: shear = shear.to(device=device, dtype=dtype) _joint_range_check(cast(torch.Tensor, shear)[0], "shear") _joint_range_check(cast(torch.Tensor, shear)[1], "shear") sx = _adapted_uniform((batch_size, ), shear[0][0], shear[0][1], same_on_batch) sy = _adapted_uniform((batch_size, ), shear[1][0], shear[1][1], same_on_batch) sx = sx.to(device=_device, dtype=_dtype) sy = sy.to(device=_device, dtype=_dtype) else: sx = sy = torch.tensor([0] * batch_size, device=_device, dtype=_dtype) return dict(translations=translations, center=center, scale=_scale, angle=angle, sx=sx, sy=sy)
def random_cutmix_generator( batch_size: int, width: int, height: int, p: float = 0.5, num_mix: int = 1, beta: Optional[torch.Tensor] = None, cut_size: Optional[torch.Tensor] = None, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Generate cutmix indexes and lambdas for a batch of inputs. Args: batch_size (int): the number of images. If batchsize == 1, the output will be as same as the input. width (int): image width. height (int): image height. p (float): probability of applying cutmix. num_mix (int): number of images to mix with. Default is 1. beta (torch.Tensor, optional): hyperparameter for generating cut size from beta distribution. If None, it will be set to 1. cut_size (torch.Tensor, optional): controlling the minimum and maximum cut ratio from [0, 1]. If None, it will be set to [0, 1], which means no restriction. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - mix_pairs (torch.Tensor): element-wise probabilities with a shape of (num_mix, B). - crop_src (torch.Tensor): element-wise probabilities with a shape of (num_mix, B, 4, 2). Note: The generated random numbers are not reproducible across different devices and dtypes. Examples: >>> rng = torch.manual_seed(0) >>> random_cutmix_generator(3, 224, 224, p=0.5, num_mix=2) {'mix_pairs': tensor([[2, 0, 1], [1, 2, 0]]), 'crop_src': tensor([[[[ 35., 25.], [208., 25.], [208., 198.], [ 35., 198.]], <BLANKLINE> [[156., 137.], [155., 137.], [155., 136.], [156., 136.]], <BLANKLINE> [[ 3., 12.], [210., 12.], [210., 219.], [ 3., 219.]]], <BLANKLINE> <BLANKLINE> [[[ 83., 125.], [177., 125.], [177., 219.], [ 83., 219.]], <BLANKLINE> [[ 54., 8.], [205., 8.], [205., 159.], [ 54., 159.]], <BLANKLINE> [[ 97., 70.], [ 96., 70.], [ 96., 69.], [ 97., 69.]]]])} """ _device, _dtype = _extract_device_dtype([beta, cut_size]) beta = torch.as_tensor(1.0 if beta is None else beta, device=device, dtype=dtype) cut_size = torch.as_tensor([0.0, 1.0] if cut_size is None else cut_size, device=device, dtype=dtype) if not (num_mix >= 1 and isinstance(num_mix, (int, ))): raise AssertionError( f"`num_mix` must be an integer greater than 1. Got {num_mix}.") if not (type(height) is int and height > 0 and type(width) is int and width > 0): raise AssertionError( f"'height' and 'width' must be integers. Got {height}, {width}.") _joint_range_check(cut_size, 'cut_size', bounds=(0, 1)) _common_param_check(batch_size, same_on_batch) if batch_size == 0: return dict( mix_pairs=torch.zeros([0, 3], device=_device, dtype=torch.long), crop_src=torch.zeros([0, 4, 2], device=_device, dtype=torch.long), ) batch_probs: torch.Tensor = random_prob_generator(batch_size * num_mix, p, same_on_batch, device=device, dtype=dtype) mix_pairs: torch.Tensor = torch.rand(num_mix, batch_size, device=device, dtype=dtype).argsort(dim=1) cutmix_betas: torch.Tensor = _adapted_beta((batch_size * num_mix, ), beta, beta, same_on_batch=same_on_batch) # Note: torch.clamp does not accept tensor, cutmix_betas.clamp(cut_size[0], cut_size[1]) throws: # Argument 1 to "clamp" of "_TensorBase" has incompatible type "Tensor"; expected "float" cutmix_betas = torch.min(torch.max(cutmix_betas, cut_size[0]), cut_size[1]) cutmix_rate = torch.sqrt(1.0 - cutmix_betas) * batch_probs cut_height = (cutmix_rate * height).floor().to(device=device, dtype=_dtype) cut_width = (cutmix_rate * width).floor().to(device=device, dtype=_dtype) _gen_shape = (1, ) if same_on_batch: _gen_shape = (cut_height.size(0), ) cut_height = cut_height[0] cut_width = cut_width[0] # Reserve at least 1 pixel for cropping. x_start = (_adapted_uniform( _gen_shape, torch.zeros_like(cut_width, device=device, dtype=dtype), (width - cut_width - 1).to(device=device, dtype=dtype), same_on_batch, ).floor().to(device=device, dtype=_dtype)) y_start = (_adapted_uniform( _gen_shape, torch.zeros_like(cut_height, device=device, dtype=dtype), (height - cut_height - 1).to(device=device, dtype=dtype), same_on_batch, ).floor().to(device=device, dtype=_dtype)) crop_src = bbox_generator(x_start.squeeze(), y_start.squeeze(), cut_width, cut_height) # (B * num_mix, 4, 2) => (num_mix, batch_size, 4, 2) crop_src = crop_src.view(num_mix, batch_size, 4, 2) return dict( mix_pairs=mix_pairs.to(device=_device, dtype=torch.long), crop_src=crop_src.floor().to(device=_device, dtype=_dtype), )
def random_crop_size_generator( batch_size: int, size: Tuple[int, int], scale: torch.Tensor, ratio: torch.Tensor, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get cropping heights and widths for ```crop``` transformation for resized crop transform. Args: batch_size (int): the tensor batch size. size (Tuple[int, int]): expected output size of each edge. scale (torch.Tensor): range of size of the origin size cropped with (2,) shape. ratio (torch.Tensor): range of aspect ratio of the origin aspect ratio cropped with (2,) shape. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - size (torch.Tensor): element-wise cropping sizes with a shape of (B, 2). Note: The generated random numbers are not reproducible across different devices and dtypes. Examples: >>> _ = torch.manual_seed(42) >>> random_crop_size_generator(3, (30, 30), scale=torch.tensor([.7, 1.3]), ratio=torch.tensor([.9, 1.])) {'size': tensor([[29., 29.], [27., 28.], [26., 29.]])} """ _common_param_check(batch_size, same_on_batch) _joint_range_check(scale, "scale") _joint_range_check(ratio, "ratio") if not (len(size) == 2 and type(size[0]) is int and size[1] > 0 and type(size[1]) is int and size[1] > 0): raise AssertionError( f"'height' and 'width' must be integers. Got {size}.") _device, _dtype = _extract_device_dtype([scale, ratio]) if batch_size == 0: return dict(size=torch.zeros([0, 2], device=_device, dtype=_dtype)) scale = scale.to(device=device, dtype=dtype) ratio = ratio.to(device=device, dtype=dtype) # 10 trails for each element area = _adapted_uniform((batch_size, 10), scale[0] * size[0] * size[1], scale[1] * size[0] * size[1], same_on_batch) log_ratio = _adapted_uniform((batch_size, 10), torch.log(ratio[0]), torch.log(ratio[1]), same_on_batch) aspect_ratio = torch.exp(log_ratio) w = torch.sqrt(area * aspect_ratio).round().floor() h = torch.sqrt(area / aspect_ratio).round().floor() # Element-wise w, h condition cond = ((0 < w) * (w < size[0]) * (0 < h) * (h < size[1])).int() # torch.argmax is not reproducible across devices: https://github.com/pytorch/pytorch/issues/17738 # Here, we will select the first occurrence of the duplicated elements. cond_bool, argmax_dim1 = ((cond.cumsum(1) == 1) & cond.bool()).max(1) h_out = w[torch.arange(0, batch_size, device=device, dtype=torch.long), argmax_dim1] w_out = h[torch.arange(0, batch_size, device=device, dtype=torch.long), argmax_dim1] if not cond_bool.all(): # Fallback to center crop in_ratio = float(size[0]) / float(size[1]) if in_ratio < ratio.min(): h_ct = torch.tensor(size[0], device=device, dtype=dtype) w_ct = torch.round(h_ct / ratio.min()) elif in_ratio > ratio.min(): w_ct = torch.tensor(size[1], device=device, dtype=dtype) h_ct = torch.round(w_ct * ratio.min()) else: # whole image h_ct = torch.tensor(size[0], device=device, dtype=dtype) w_ct = torch.tensor(size[1], device=device, dtype=dtype) h_ct = h_ct.floor() w_ct = w_ct.floor() h_out = h_out.where(cond_bool, h_ct) w_out = w_out.where(cond_bool, w_ct) return dict(size=torch.stack([h_out, w_out], dim=1).to(device=_device, dtype=_dtype))
def random_motion_blur_generator( batch_size: int, kernel_size: Union[int, Tuple[int, int]], angle: torch.Tensor, direction: torch.Tensor, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get parameters for motion blur. Args: batch_size (int): the tensor batch size. kernel_size (int or (int, int)): motion kernel size (odd and positive) or range. angle (torch.Tensor): angle of the motion blur in degrees (anti-clockwise rotation). direction (torch.Tensor): forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle), while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a uniformly (but still angled) motion blur. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - ksize_factor (torch.Tensor): element-wise kernel size factors with a shape of (B,). - angle_factor (torch.Tensor): element-wise angle factors with a shape of (B,). - direction_factor (torch.Tensor): element-wise direction factors with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _joint_range_check(angle, 'angle') _joint_range_check(direction, 'direction', (-1, 1)) _device, _dtype = _extract_device_dtype([angle, direction]) if isinstance(kernel_size, int): if not (kernel_size >= 3 and kernel_size % 2 == 1): raise AssertionError( f"`kernel_size` must be odd and greater than 3. Got {kernel_size}." ) ksize_factor = torch.tensor([kernel_size] * batch_size, device=device, dtype=dtype) elif isinstance(kernel_size, tuple): # kernel_size is fixed across the batch if len(kernel_size) != 2: raise AssertionError( f"`kernel_size` must be (2,) if it is a tuple. Got {kernel_size}." ) ksize_factor = (_adapted_uniform((batch_size, ), kernel_size[0] // 2, kernel_size[1] // 2, same_on_batch=True).int() * 2 + 1) else: raise TypeError(f"Unsupported type: {type(kernel_size)}") angle_factor = _adapted_uniform((batch_size, ), angle[0].to(device=device, dtype=dtype), angle[1].to(device=device, dtype=dtype), same_on_batch) direction_factor = _adapted_uniform( (batch_size, ), direction[0].to(device=device, dtype=dtype), direction[1].to(device=device, dtype=dtype), same_on_batch, ) return dict( ksize_factor=ksize_factor.to(device=_device, dtype=torch.int32), angle_factor=angle_factor.to(device=_device, dtype=_dtype), direction_factor=direction_factor.to(device=_device, dtype=_dtype), )
def random_rectangles_params_generator( batch_size: int, height: int, width: int, scale: torch.Tensor, ratio: torch.Tensor, value: float = 0.0, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get parameters for ```erasing``` transformation for erasing transform. Args: batch_size (int): the tensor batch size. height (int) : height of the image. width (int): width of the image. scale (torch.Tensor): range of size of the origin size cropped. Shape (2). ratio (torch.Tensor): range of aspect ratio of the origin aspect ratio cropped. Shape (2). value (float): value to be filled in the erased area. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - widths (torch.Tensor): element-wise erasing widths with a shape of (B,). - heights (torch.Tensor): element-wise erasing heights with a shape of (B,). - xs (torch.Tensor): element-wise erasing x coordinates with a shape of (B,). - ys (torch.Tensor): element-wise erasing y coordinates with a shape of (B,). - values (torch.Tensor): element-wise filling values with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _common_param_check(batch_size, same_on_batch) _device, _dtype = _extract_device_dtype([ratio, scale]) if not (type(height) is int and height > 0 and type(width) is int and width > 0): raise AssertionError( f"'height' and 'width' must be integers. Got {height}, {width}.") if not (isinstance(value, (int, float)) and value >= 0 and value <= 1): raise AssertionError( f"'value' must be a number between 0 - 1. Got {value}.") _joint_range_check(scale, 'scale', bounds=(0, float('inf'))) _joint_range_check(ratio, 'ratio', bounds=(0, float('inf'))) images_area = height * width target_areas = (_adapted_uniform( (batch_size, ), scale[0].to(device=device, dtype=dtype), scale[1].to(device=device, dtype=dtype), same_on_batch, ) * images_area) if ratio[0] < 1.0 and ratio[1] > 1.0: aspect_ratios1 = _adapted_uniform( (batch_size, ), ratio[0].to(device=device, dtype=dtype), 1, same_on_batch) aspect_ratios2 = _adapted_uniform( (batch_size, ), 1, ratio[1].to(device=device, dtype=dtype), same_on_batch) if same_on_batch: rand_idxs = (torch.round( _adapted_uniform( (1, ), torch.tensor(0, device=device, dtype=dtype), torch.tensor(1, device=device, dtype=dtype), same_on_batch, )).repeat(batch_size).bool()) else: rand_idxs = torch.round( _adapted_uniform( (batch_size, ), torch.tensor(0, device=device, dtype=dtype), torch.tensor(1, device=device, dtype=dtype), same_on_batch, )).bool() aspect_ratios = torch.where(rand_idxs, aspect_ratios1, aspect_ratios2) else: aspect_ratios = _adapted_uniform( (batch_size, ), ratio[0].to(device=device, dtype=dtype), ratio[1].to(device=device, dtype=dtype), same_on_batch, ) # based on target areas and aspect ratios, rectangle params are computed heights = torch.min( torch.max(torch.round((target_areas * aspect_ratios)**(1 / 2)), torch.tensor(1.0, device=device, dtype=dtype)), torch.tensor(height, device=device, dtype=dtype), ) widths = torch.min( torch.max(torch.round((target_areas / aspect_ratios)**(1 / 2)), torch.tensor(1.0, device=device, dtype=dtype)), torch.tensor(width, device=device, dtype=dtype), ) xs_ratio = _adapted_uniform( (batch_size, ), torch.tensor(0, device=device, dtype=dtype), torch.tensor(1, device=device, dtype=dtype), same_on_batch, ) ys_ratio = _adapted_uniform( (batch_size, ), torch.tensor(0, device=device, dtype=dtype), torch.tensor(1, device=device, dtype=dtype), same_on_batch, ) xs = xs_ratio * (torch.tensor(width, device=device, dtype=dtype) - widths + 1) ys = ys_ratio * (torch.tensor(height, device=device, dtype=dtype) - heights + 1) return dict( widths=widths.floor().to(device=_device, dtype=_dtype), heights=heights.floor().to(device=_device, dtype=_dtype), xs=xs.floor().to(device=_device, dtype=_dtype), ys=ys.floor().to(device=_device, dtype=_dtype), values=torch.tensor([value] * batch_size, device=_device, dtype=_dtype), )
def random_motion_blur_generator3d( batch_size: int, kernel_size: Union[int, Tuple[int, int]], angle: torch.Tensor, direction: torch.Tensor, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get parameters for motion blur. Args: batch_size (int): the tensor batch size. kernel_size (int or (int, int)): motion kernel size (odd and positive) or range. angle (torch.Tensor): yaw, pitch and roll range of the motion blur in degrees :math:`(3, 2)`. direction (torch.Tensor): forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle), while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a uniformly (but still angled) motion blur. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - ksize_factor (torch.Tensor): element-wise kernel size factors with a shape of (B,). - angle_factor (torch.Tensor): element-wise center with a shape of (B,). - direction_factor (torch.Tensor): element-wise scales with a shape of (B,). Note: The generated random numbers are not reproducible across different devices and dtypes. """ _device, _dtype = _extract_device_dtype([angle, direction]) _joint_range_check(direction, 'direction', (-1, 1)) if isinstance(kernel_size, int): if not (kernel_size >= 3 and kernel_size % 2 == 1): raise AssertionError(f"`kernel_size` must be odd and greater than 3. Got {kernel_size}.") ksize_factor = torch.tensor([kernel_size] * batch_size, device=device, dtype=dtype).int() elif isinstance(kernel_size, tuple): if not (len(kernel_size) == 2 and kernel_size[0] >= 3 and kernel_size[0] <= kernel_size[1]): raise AssertionError(f"`kernel_size` must be greater than 3. Got range {kernel_size}.") # kernel_size is fixed across the batch ksize_factor = ( _adapted_uniform((batch_size,), kernel_size[0] // 2, kernel_size[1] // 2, same_on_batch=True).int() * 2 + 1 ) else: raise TypeError(f"Unsupported type: {type(kernel_size)}") if angle.shape != torch.Size([3, 2]): raise AssertionError(f"'angle' must be the shape of (3, 2). Got {angle.shape}.") angle = angle.to(device=device, dtype=dtype) yaw = _adapted_uniform((batch_size,), angle[0][0], angle[0][1], same_on_batch) pitch = _adapted_uniform((batch_size,), angle[1][0], angle[1][1], same_on_batch) roll = _adapted_uniform((batch_size,), angle[2][0], angle[2][1], same_on_batch) angle_factor = torch.stack([yaw, pitch, roll], dim=1) direction = direction.to(device=device, dtype=dtype) direction_factor = _adapted_uniform((batch_size,), direction[0], direction[1], same_on_batch) return dict( ksize_factor=ksize_factor.to(device=_device), angle_factor=angle_factor.to(device=_device, dtype=_dtype), direction_factor=direction_factor.to(device=_device, dtype=_dtype), )
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: _degrees = _range_bound(self.degrees, 'degrees', 0, (-360, 360)).to(device=device, dtype=dtype) _translate = ( self.translate if self.translate is None else _range_bound( self.translate, 'translate', bounds=(0, 1), check='singular').to(device=device, dtype=dtype)) _scale: Optional[torch.Tensor] = None if self.scale is not None: if len(self.scale) == 2: _scale = _range_bound(self.scale[:2], 'scale', bounds=(0, float('inf')), check='singular').to(device=device, dtype=dtype) elif len(self.scale) == 4: _scale = torch.cat([ _range_bound(self.scale[:2], 'scale_x', bounds=(0, float('inf')), check='singular'), _range_bound( self.scale[2:], 'scale_y', bounds=(0, float('inf')), check='singular' # type:ignore ), ]).to(device=device, dtype=dtype) else: raise ValueError( f"'scale' expected to be either 2 or 4 elements. Got {self.scale}" ) _shear: Optional[torch.Tensor] = None if self.shear is not None: shear = torch.as_tensor(self.shear, device=device, dtype=dtype) if shear.shape == torch.Size([2, 2]): _shear = shear else: _shear = torch.stack([ _range_bound(shear if shear.dim() == 0 else shear[:2], 'shear-x', 0, (-360, 360)), torch.tensor([0, 0], device=device, dtype=dtype) if shear.dim() == 0 or len(shear) == 2 else _range_bound( shear[2:], 'shear-y', 0, (-360, 360)), ]) translate_x_sampler: Optional[Uniform] = None translate_y_sampler: Optional[Uniform] = None scale_2_sampler: Optional[Uniform] = None scale_4_sampler: Optional[Uniform] = None shear_x_sampler: Optional[Uniform] = None shear_y_sampler: Optional[Uniform] = None if _translate is not None: translate_x_sampler = Uniform(-_translate[0], _translate[0], validate_args=False) translate_y_sampler = Uniform(-_translate[1], _translate[1], validate_args=False) if _scale is not None: if len(_scale) == 2: scale_2_sampler = Uniform(_scale[0], _scale[1], validate_args=False) elif len(_scale) == 4: scale_2_sampler = Uniform(_scale[0], _scale[1], validate_args=False) scale_4_sampler = Uniform(_scale[2], _scale[3], validate_args=False) else: raise ValueError( f"'scale' expected to be either 2 or 4 elements. Got {self.scale}" ) if _shear is not None: _joint_range_check(cast(torch.Tensor, _shear)[0], "shear") _joint_range_check(cast(torch.Tensor, _shear)[1], "shear") shear_x_sampler = Uniform(_shear[0][0], _shear[0][1], validate_args=False) shear_y_sampler = Uniform(_shear[1][0], _shear[1][1], validate_args=False) self.degree_sampler = Uniform(_degrees[0], _degrees[1], validate_args=False) self.translate_x_sampler = translate_x_sampler self.translate_y_sampler = translate_y_sampler self.scale_2_sampler = scale_2_sampler self.scale_4_sampler = scale_4_sampler self.shear_x_sampler = shear_x_sampler self.shear_y_sampler = shear_y_sampler