Пример #1
0
 def run_corresponding_points_alignment():
     points_alignment.corresponding_points_alignment(
         X,
         X_t,
         weights,
         allow_reflection=allow_reflection,
         estimate_scale=estimate_scale,
     )
     torch.cuda.synchronize()
Пример #2
0
def _compute_norm_sign_scaling_factor(c_cam, alphas, x_world, y, weight, eps=1e-9):
    """ Given a solution, adjusts the scale and flip
    Args:
        c_cam: control points in camera coordinates
        alphas: barycentric coordinates of the points
        x_world: Batch of 3-dimensional points of shape `(minibatch, num_points, 3)`.
        y: Batch of 2-dimensional points of shape `(minibatch, num_points, 2)`.
        weights: Batch of non-negative weights of
            shape `(minibatch, num_point)`. `None` means equal weights.
        eps: epsilon to threshold negative `z` values
    """
    # position of reference points in camera coordinates
    x_cam = torch.matmul(alphas, c_cam)

    x_cam = x_cam * (1.0 - 2.0 * (oputil.wmean(x_cam[..., 2:], weight) < 0).float())
    if torch.any(x_cam[..., 2:] < -eps):
        neg_rate = oputil.wmean((x_cam[..., 2:] < 0).float(), weight, dim=(0, 1)).item()
        warnings.warn("\nEPnP: %2.2f%% points have z<0." % (neg_rate * 100.0))

    R, T, s = points_alignment.corresponding_points_alignment(
        x_world, x_cam, weight, estimate_scale=True
    )
    s = s.clamp(eps)
    x_cam = x_cam / s[:, None, None]
    T = T / s[:, None]
    x_w_rotated = torch.matmul(x_world, R) + T[:, None, :]
    err_2d = _reproj_error(x_w_rotated, y, weight)
    err_3d = _algebraic_error(x_w_rotated, x_cam, weight)

    return EpnpSolution(x_cam, R, T, err_2d, err_3d)
Пример #3
0
            def align_and_get_mse(weights_):
                R_n, T_n, s_n = points_alignment.corresponding_points_alignment(
                    X_noisy,
                    X_t,
                    weights_,
                    allow_reflection=allow_reflection,
                    estimate_scale=estimate_scale,
                )

                X_t_est = _apply_pcl_transformation(X_noisy, R_n, T_n, s=s_n)

                return (((X_t_est - X_t) * weights[..., None])**
                        2).sum(dim=(1, 2)) / weights.sum(dim=-1)
Пример #4
0
    def compute_eye_param(self, vertices, eye_lm_idx, face_model):
        nsh_vert_lm = vertices[None, eye_lm_idx]
        nsh_std_lm = self.to_tensor(
            self.transfers[face_model].tgt_std_vert)[None, eye_lm_idx]
        R, T, s = corresponding_points_alignment(nsh_vert_lm,
                                                 nsh_std_lm,
                                                 estimate_scale=True)
        R = R.cpu().numpy()[0]
        T = T.cpu().numpy()[0]
        s = s.cpu().numpy()
        angle = Rotation.from_matrix(R).as_euler('xyz')
        eye_param = np.concatenate([angle, T, s])

        return eye_param
Пример #5
0
    def _test_single_corresponding_points_alignment(
        self,
        batch_size=10,
        n_points=100,
        dim=3,
        use_pointclouds=False,
        estimate_scale=False,
        reflect=False,
        allow_reflection=False,
        random_weights=False,
    ):
        """
        Executes a single test for `corresponding_points_alignment` for a
        specific setting of the inputs / outputs.
        """

        device = torch.device("cuda:0")

        # initialize the a ground truth point cloud
        X = TestCorrespondingPointsAlignment.init_point_cloud(
            batch_size=batch_size,
            n_points=n_points,
            dim=dim,
            device=device,
            use_pointclouds=use_pointclouds,
            random_pcl_size=True,
        )

        # generate the true transformation
        R, T, s = TestCorrespondingPointsAlignment.generate_pcl_transformation(
            batch_size=batch_size,
            scale=estimate_scale,
            reflect=reflect,
            dim=dim,
            device=device,
        )

        if reflect:
            # generate random reflection M and apply to the rotations
            M = TestCorrespondingPointsAlignment.generate_random_reflection(
                batch_size=batch_size, dim=dim, device=device
            )
            R = torch.bmm(M, R)

        weights = None
        if random_weights:
            template = X.points_padded() if use_pointclouds else X
            weights = torch.rand_like(template[:, :, 0])
            weights = weights / weights.sum(dim=1, keepdim=True)
            # zero out some weights as zero weights are a common use case
            # this guarantees there are no zero weight
            weights *= (weights * template.size()[1] > 0.3).to(weights)
            if use_pointclouds:  # convert to List[Tensor]
                weights = [
                    w[:npts] for w, npts in zip(weights, X.num_points_per_cloud())
                ]

        # apply the generated transformation to the generated
        # point cloud X
        X_t = _apply_pcl_transformation(X, R, T, s=s)

        # run the CorrespondingPointsAlignment algorithm
        R_est, T_est, s_est = points_alignment.corresponding_points_alignment(
            X,
            X_t,
            weights,
            allow_reflection=allow_reflection,
            estimate_scale=estimate_scale,
        )

        assert_error_message = (
            f"Corresponding_points_alignment assertion failure for "
            f"n_points={n_points}, "
            f"dim={dim}, "
            f"use_pointclouds={use_pointclouds}, "
            f"estimate_scale={estimate_scale}, "
            f"reflect={reflect}, "
            f"allow_reflection={allow_reflection},"
            f"random_weights={random_weights}."
        )

        # if we test the weighted case, check that weights help with noise
        if random_weights and not use_pointclouds and n_points >= (dim + 10):
            # add noise to 20% points with smallest weight
            X_noisy = X_t.clone()
            _, mink_idx = torch.topk(-weights, int(n_points * 0.2), dim=1)
            mink_idx = mink_idx[:, :, None].expand(-1, -1, X_t.shape[-1])
            X_noisy.scatter_add_(
                1, mink_idx, 0.3 * torch.randn_like(mink_idx, dtype=X_t.dtype)
            )

            def align_and_get_mse(weights_):
                R_n, T_n, s_n = points_alignment.corresponding_points_alignment(
                    X_noisy,
                    X_t,
                    weights_,
                    allow_reflection=allow_reflection,
                    estimate_scale=estimate_scale,
                )

                X_t_est = _apply_pcl_transformation(X_noisy, R_n, T_n, s=s_n)

                return (((X_t_est - X_t) * weights[..., None]) ** 2).sum(
                    dim=(1, 2)
                ) / weights.sum(dim=-1)

            # check that using weights leads to lower weighted_MSE(X_noisy, X_t)
            self.assertTrue(
                torch.all(align_and_get_mse(weights) <= align_and_get_mse(None))
            )

        if reflect and not allow_reflection:
            # check that all rotations have det=1
            self._assert_all_close(
                torch.det(R_est), R_est.new_ones(batch_size), assert_error_message
            )

        else:
            # mask out inputs with too few non-degenerate points for assertions
            w = (
                torch.ones_like(R_est[:, 0, 0])
                if weights is None or n_points >= dim + 10
                else (weights > 0.0).all(dim=1).to(R_est)
            )
            # check that the estimated tranformation is the same
            # as the ground truth
            if n_points >= (dim + 1):
                # the checks on transforms apply only when
                # the problem setup is unambiguous
                msg = assert_error_message
                self._assert_all_close(R_est, R, msg, w[:, None, None], atol=1e-5)
                self._assert_all_close(T_est, T, msg, w[:, None])
                self._assert_all_close(s_est, s, msg, w)

                # check that the orthonormal part of the
                # transformation has a correct determinant (+1/-1)
                desired_det = R_est.new_ones(batch_size)
                if reflect:
                    desired_det *= -1.0
                self._assert_all_close(torch.det(R_est), desired_det, msg, w)

            # check that the transformed point cloud
            # X matches X_t
            X_t_est = _apply_pcl_transformation(X, R_est, T_est, s=s_est)
            self._assert_all_close(
                X_t, X_t_est, assert_error_message, w[:, None, None], atol=1e-5
            )
Пример #6
0
    def predict(self,
                image,
                out_dir,
                idx=None,
                deploy=False,
                face_model='230'):
        '''deploy for nsh'''
        if not deploy and idx is None:
            idx = '{:>05d}'.format(idx)

        images, uvmaps, params, nsh_face_vert, nsh_neu_vert = self.preprocess(
            image, face_model)

        fnames = []

        gen_uvmaps = self.inpaint_model.forward(images[:, :3],
                                                uvmaps,
                                                nsh_face_vert[None],
                                                params,
                                                fix_uv=True,
                                                deploy=deploy,
                                                face_model=face_model)
        nsh_uv = F.interpolate(gen_uvmaps.detach(),
                               size=1024,
                               mode='bilinear',
                               align_corners=False)[0]

        fnames.append(os.path.join(out_dir, '{}_uv.png'.format(idx)))
        self.imsave(fnames[-1], nsh_uv, False, True)

        lm_idx = self.to_tensor(self.transfers[face_model].lm_icp_idx,
                                torch.int64)
        nsh_vert_lm = nsh_neu_vert[None, lm_idx]
        nsh_std_lm = self.to_tensor(
            self.transfers[face_model].tgt_std_vert)[None, lm_idx]
        R, T, s = corresponding_points_alignment(nsh_vert_lm,
                                                 nsh_std_lm,
                                                 estimate_scale=True)
        s = s * 0.97

        nsh_neu_vert_trans = (
            s[:, None, None] * torch.bmm(nsh_neu_vert[None], R) +
            T[:, None, :])[0]
        nsh_neu_vert = nsh_neu_vert_trans.cpu().numpy()
        nsh_neu_vert = self.transfers[face_model].normalize(nsh_neu_vert)
        fnames.append(os.path.join(out_dir, '{}_neu.obj'.format(idx)))
        meshio.write_obj(
            fnames[-1],
            nsh_neu_vert[self.uv_creators[face_model].nsh_face_start_idx:],
            self.nsh_face_meshes[face_model].triangles,
            texcoords=self.nsh_face_meshes[face_model].texcoords,
            mtllib=True,
            uv_name='{}_uv'.format(idx))

        fnames.append(os.path.join(out_dir, '{}_neu.mtl'.format(idx)))

        try:
            self.imsave(os.path.join(out_dir, '{}_input.jpg'.format(idx)),
                        images[0, :3], True)
        except:
            pass