예제 #1
0
def align_multi(img_tensor, landmarks, crop_size=(112, 112)):
    """Align muti-faces in a image
    
    Args:
        img_tensor (torch.Tensor)

        landmarks (np.ndarray or torch.IntTensor): Facial landmarks points with shape [n, 5, 2] 
    """

    if isinstance(landmarks, torch.Tensor):
        landmarks = landmarks.cpu().numpy()

    tr_matrixes = []
    for points in landmarks:
        matrix = get_transform_matrix(points,
                                      crop_size=crop_size,
                                      align_type='similarity')
        M = torch.from_numpy(matrix).unsqueeze(0).type(torch.float32)
        tr_matrixes.append(M)

    images = torch.cat([img_tensor] * len(tr_matrixes)).type(torch.float32)

    faces = tgm.warp_affine(images, torch.cat(tr_matrixes), dsize=(112, 112))

    return faces
예제 #2
0
def norm_crop_torch(faces_tensor, lmks_tensor, device, image_size=112):
    assert len(faces_tensor.shape) == 4
    assert len(lmks_tensor.shape) == 3
    M, pose_indexes = estimate_norm_torch(lmks_tensor)
    M = M.to(device)
    faces_tensor = faces_tensor.to(device)
    warped = warp_affine(faces_tensor, M, (image_size, image_size))
    return warped, pose_indexes
예제 #3
0
 def test_translation(self, batch_size):
     offset = 1.
     channels, height, width = 1, 3, 4
     aff_ab = torch.eye(2, 3).repeat(batch_size, 1, 1)  # Bx2x3
     aff_ab[..., -1] += offset
     img_b = torch.arange(float(height * width)).view(
         1, channels, height, width).repeat(batch_size, 1, 1, 1)
     img_a = tgm.warp_affine(img_b, aff_ab, (height, width))
     assert utils.check_equal_torch(img_b[..., :2, :3], img_a[..., 1:, 1:])
예제 #4
0
def affine(tensor, matrix):
    """Apply an affine transformation to the image.

    Args:
        tensor (torch.Tensor): The image tensor to be warped.
        matrix (torch.Tensor): The 2x3 affine transformation matrix.

    Returns:
        Tensor: The warped image.
    """
    is_unbatched = tensor.ndimension() == 3
    if is_unbatched:
        tensor = tensor.unsqueeze(0)
    warped = warp_affine(tensor, matrix, tensor.size()[-2:])
    if is_unbatched:
        warped = warped.squeeze(0)
    return warped
    def align_fake(self, margin=20):
        # get params
        desiredLeftEye = [
            float(self.alignment_params["desiredLeftEye"][0]),
            float(self.alignment_params["desiredLeftEye"][1])
        ]
        rotation_point = self.alignment_params["eyesCenter"]
        angle = -self.alignment_params["angle"]
        h, w = self.fake_B.shape[2:]
        # get original positions
        m1 = round(w * 0.5)
        m2 = round(desiredLeftEye[0] * w)
        # define the scale factor
        scale = 1 / self.alignment_params["scale"]
        width = int(self.alignment_params["shape"][0])
        long_edge_size = width / abs(np.cos(np.deg2rad(angle)))
        w_original = int(scale * long_edge_size)
        h_original = int(scale * long_edge_size)
        # get offset
        tX = w_original * 0.5
        tY = h_original * desiredLeftEye[1]
        # get rotation center
        center = torch.ones(1, 2)
        center[..., 0] = m1
        center[..., 1] = m2
        # compute the transformation matrix
        M = tgm.get_rotation_matrix2d(center, angle, scale).to(self.device)
        M[0, 0, 2] += (tX - m1)
        M[0, 1, 2] += (tY - m2)
        # get insertion point
        x_start = int(rotation_point[0] - (0.5 * w_original))
        y_start = int(rotation_point[1] - (desiredLeftEye[0] * h_original))
        _, _, h_tensor, w_tensor = self.real_B_unaligned_full.shape

        # Now apply the transformation to original image
        # clone fake
        fake_B_clone = self.fake_B.clone().requires_grad_(True)
        # apply warp
        fake_B_warped = tgm.warp_affine(fake_B_clone,
                                        M,
                                        dsize=(h_original, w_original))
        # clone warped
        self.fake_B_unaligned = fake_B_warped.clone().requires_grad_(True)

        # make sure warping does not exceed real_B_unaligned_full dimensions
        if y_start < 0:
            fake_B_warped = fake_B_warped[:, :, abs(y_start):h_original, :]
            h_original += y_start
            y_start = 0
        if x_start < 0:
            fake_B_warped = fake_B_warped[:, :, :, abs(x_start):w_original]
            w_original += x_start
            x_start = 0
        if y_start + h_original > h_tensor:
            h_original -= (y_start + h_original - h_tensor)
            fake_B_warped = fake_B_warped[:, :, 0:h_original, :]
        if x_start + w_original > w_tensor:
            w_original -= (x_start + w_original - w_tensor)
            fake_B_warped = fake_B_warped[:, :, :, 0:w_original]

        # create mask that is true where fake_B_warped is 0
        # This is the background that is not filled with image after the transformation
        mask = ((fake_B_warped[0][0] == 0) & (fake_B_warped[0][1] == 0) &
                (fake_B_warped[0][2] == 0))
        # fill fake_B_filled where mask = False with self.real_B_unaligned_full
        fake_B_filled = torch.where(
            mask,
            self.real_B_unaligned_full[:, :, y_start:y_start + h_original,
                                       x_start:x_start + w_original],
            fake_B_warped)

        # reinsert into tensor
        self.fake_B_unaligned = self.real_B_unaligned_full.clone(
        ).requires_grad_(True)
        mask = torch.zeros_like(self.fake_B_unaligned, dtype=torch.bool)
        mask[0, :, y_start:y_start + h_original,
             x_start:x_start + w_original] = True
        self.fake_B_unaligned = self.fake_B_unaligned.masked_scatter(
            mask, fake_B_filled)

        # cutout tensor
        h_size_tensor, w_size_tensor = self.real_B_unaligned_full.shape[2:]
        margin = max(
            min(
                y_start - max(0, y_start - margin),
                x_start - max(0, x_start - margin),
                min(y_start + h_original + margin, h_size_tensor) - y_start -
                h_original,
                min(x_start + w_original + margin, w_size_tensor) - x_start -
                w_original,
            ), 0)
        self.fake_B_unaligned = self.fake_B_unaligned[:, :, y_start -
                                                      margin:y_start +
                                                      h_original + margin,
                                                      x_start -
                                                      margin:x_start +
                                                      w_original + margin]
        self.real_B_unaligned = self.real_B_unaligned_full[:, :, y_start -
                                                           margin:y_start +
                                                           h_original + margin,
                                                           x_start -
                                                           margin:x_start +
                                                           w_original + margin]

        self.real_B_unaligned = F.interpolate(self.real_B_unaligned,
                                              size=(300, 300))
        self.fake_B_unaligned = F.interpolate(self.fake_B_unaligned,
                                              size=(300, 300))
예제 #6
0
 def test_smoke(self):
     batch_size, channels, height, width = 1, 2, 3, 4
     aff_ab = torch.eye(2, 3)[None]  # 1x2x3
     img_b = torch.rand(batch_size, channels, height, width)
     img_a = tgm.warp_affine(img_b, aff_ab, (height, width))
     assert img_b.shape == img_a.shape
예제 #7
0
 def forward(self, x, M):
     x = tgm.warp_affine(x,
                         M,
                         dsize=(self.output_height, self.output_width))
     return x