def apply_transformation(x, trans): dx, dy, angle = trans[0], trans[1], trans[2] height, width = x.shape[2], x.shape[3] # Pad the image to prevent two-step rotation / translation from truncating # corners max_dist_from_center = np.sqrt(height**2 + width**2) / 2 min_edge_from_center = float(np.min([height, width])) / 2 padding = np.ceil(max_dist_from_center - min_edge_from_center).astype( np.int32) x = nn.ConstantPad2d(padding, 0)(x) # Apply rotation angle = ch.from_numpy(np.ones(x.shape[0]) * angle) angle = angle.to(x.get_device()) x = rotate(x, angle) # Apply translation dx_in_px = -dx * height dy_in_px = -dy * width translation = ch.from_numpy( np.tile(np.array([dx_in_px, dy_in_px], dtype=np.float32), (x.shape[0], 1))) translation = translation.to(x.get_device()) x = translate(x, translation) x = translate(x, translation) # Pad if needed if x.shape[2] < height or x.shape[3] < width: pad = nn.ConstantPad2d( (0, max(0, height - x.shape[2]), 0, max(0, width - x.shape[3])), 0) x = pad(x) return center_crop(x, (height, width))
def apply_center_crop(input: torch.Tensor, params: Dict[str, torch.Tensor], return_transform: bool = False) -> UnionType: if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") size1: int = int(params['size'][0].item()) size2: int = int(params['size'][1].item()) return center_crop(input, (size1, size2), return_transform)
def cutout_pad(x: Tensor, v: float) -> Tensor: B, C, H, W = x.shape x = F.pad(x, [int(v * W / 2), int(v * W / 2), int(v * H / 2), int(v * H / 2)]) x = cutout(x, v / (1 + v)) x = T.center_crop(x, (H, W)) return x