def regularize_tangent_vec(self, tangent_vec, base_point, metric=None):
        """
        In 3D, regularize a tangent_vector by getting the norm of its parallel
        transport to the identity, determined by the metric,
        to be less than pi.
        """
        tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=2)
        _, vec_dim = tangent_vec.shape
        if vec_dim == 3:
            if metric is None:
                metric = self.left_canonical_metric
            base_point = self.regularize(base_point)

            jacobian = self.jacobian_translation(
                point=base_point, left_or_right=metric.left_or_right)
            inv_jacobian = gs.linalg.inv(jacobian)
            tangent_vec_at_id = gs.dot(
                tangent_vec, gs.transpose(inv_jacobian, axes=(0, 2, 1)))
            tangent_vec_at_id = gs.squeeze(tangent_vec_at_id, axis=1)

            tangent_vec_at_id = self.regularize_tangent_vec_at_identity(
                tangent_vec_at_id, metric)

            regularized_tangent_vec = gs.dot(
                tangent_vec_at_id, gs.transpose(jacobian, axes=(0, 2, 1)))
            regularized_tangent_vec = gs.squeeze(regularized_tangent_vec,
                                                 axis=1)
        else:
            # TODO(nina): is regularization needed in nD?
            regularized_tangent_vec = tangent_vec
        return regularized_tangent_vec
Beispiel #2
0
        def wrapper(*args, **kwargs):
            vect_args = []
            initial_shapes = []
            initial_ndims = []
            for param, point_type in zip(args, point_types):
                if point_type == 'scalar':
                    param = gs.array(param)
                initial_shapes.append(param.shape)
                initial_ndims.append(gs.ndim(param))

                if point_type == 'scalar':
                    vect_param = gs.to_ndarray(param, to_ndim=1)
                    vect_param = gs.to_ndarray(vect_param, to_ndim=2, axis=1)
                else:
                    vect_param = gs.to_ndarray(
                        param, POINT_TYPES_TO_NDIMS[point_type])
                vect_args.append(vect_param)
            result = function(*vect_args, **kwargs)

            if squeeze_output_dim_1(result, initial_shapes, point_types):
                result = gs.squeeze(result, axis=1)

            if squeeze_output_dim_0(initial_ndims, point_types):
                result = gs.squeeze(result, axis=0)
            return result
    def random_unit_tangent_vec(self, base_point, n_vectors=1):
        """Generate a random unit tangent vector at a given point.

        Parameters
        ----------
        base_point : array-like, shape=[..., dim]
            Point.
        n_vectors : float
            Number of vectors to be generated at base_point.
            For vectorization purposes n_vectors can be greater than 1 iff base_point
            constitues of a single point.

        Returns
        -------
        normalized_vector : array-like, shape=[..., n_vectors, dim]
            Random unit tangent vector at base_point.
        """
        shape = base_point.shape
        if len(shape) > 1 and shape[-2] > 1 and n_vectors > 1:
            raise ValueError(
                "Several tangent vectors is only applicable to a single base point."
            )
        random_vector = gs.squeeze(gs.random.rand(n_vectors, *shape))
        normalized_vector = self.normalize(random_vector, base_point)
        return gs.squeeze(normalized_vector)
Beispiel #4
0
def adapt_result(result, initial_shapes, args_kwargs_types, is_scal):
    """Adapt shape of output.

    This function squeezes the dim 0 or 1 of the output, depending on:
    - the type of the output: scalar vs else,
    - the initial shapes or args and kwargs provided by the user.

    Parameters
    ----------
    result : unspecified
        Output of the function.
    initial_shapes : list
        Shapes of args and kwargs provided by the user.
    args_kwargs_types : list
        Types of args and kwargs.
    is_scal : bool
        Boolean determining if the output 'result' is a scalar.

    Returns
    -------
    result : unspecified
        Output of the function, with adapted shape.
    """
    if squeeze_output_dim_1(result, initial_shapes, args_kwargs_types,
                            is_scal):
        if result.shape[1] == 1:
            result = gs.squeeze(result, axis=1)

    if squeeze_output_dim_0(result, initial_shapes, args_kwargs_types):
        if result.shape[0] == 1:
            result = gs.squeeze(result, axis=0)

    return result
Beispiel #5
0
    def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
        """Inner product between two tangent vectors at a base point.

        Parameters
        ----------
        tangent_vec_a: array-like, shape=[n_samples, dimension]
                                   or shape=[1, dimension]

        tangent_vec_b: array-like, shape=[n_samples, dimension]
                                   or shape=[1, dimension]

        base_point: array-like, shape=[n_samples, dimension]
                                or shape=[1, dimension]

        Returns
        -------
        inner_product : array-like, shape=[n_samples,]
        """
        tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2)
        tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2)
        n_tangent_vec_a = gs.shape(tangent_vec_a)[0]
        n_tangent_vec_b = gs.shape(tangent_vec_b)[0]

        inner_prod_mat = self.inner_product_matrix(base_point)
        inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3)
        n_mats = gs.shape(inner_prod_mat)[0]

        if n_tangent_vec_a != n_mats:
            if n_tangent_vec_a == 1:
                tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0)
                einsum_str_a = 'j,njk->nk'
            elif n_mats == 1:
                inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0)
                einsum_str_a = 'nj,jk->nk'
            else:
                raise ValueError('Shape mismatch for einsum.')
        else:
            einsum_str_a = 'nj,njk->nk'

        aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat)
        n_auxs, _ = gs.shape(aux)

        if n_tangent_vec_b != n_auxs:
            if n_auxs == 1:
                aux = gs.squeeze(aux, axis=0)
                einsum_str_b = 'k,nk->n'
            elif n_tangent_vec_b == 1:
                tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0)
                einsum_str_b = 'nk,k->n'
            else:
                raise ValueError('Shape mismatch for einsum.')
        else:
            einsum_str_b = 'nk,nk->n'

        inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b)
        inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)

        assert gs.ndim(inner_prod) == 2, inner_prod.shape
        return inner_prod
Beispiel #6
0
def grad(y_pred, y_true,
         metric=SO3.bi_invariant_metric,
         representation='vector'):
    """Closed-form for the gradient of pose_loss.

    Parameters
    ----------
    y_pred : array-like
        Prediction on SO(3).
    y_true : array-like
        Ground-truth on SO(3).
    metric : RiemannianMetric
        Metric used to compute the loss and gradient.
    representation : str, {'vector', 'matrix'}
        Representation chosen for points in SE(3).

    Returns
    -------
    lie_grad : array-like
        Tangent vector at point y_pred.
    """
    y_pred = gs.expand_dims(y_pred, axis=0)
    y_true = gs.expand_dims(y_true, axis=0)

    if representation == 'vector':
        lie_grad = lie_group.grad(y_pred, y_true, SO3, metric)

    if representation == 'quaternion':
        quat_scalar = y_pred[:, :1]
        quat_vec = y_pred[:, 1:]

        quat_vec_norm = gs.linalg.norm(quat_vec, axis=1)
        quat_sq_norm = quat_vec_norm ** 2 + quat_scalar ** 2

        quat_arctan2 = gs.arctan2(quat_vec_norm, quat_scalar)
        differential_scalar = - 2 * quat_vec / (quat_sq_norm)
        differential_scalar = gs.to_ndarray(differential_scalar, to_ndim=2)
        differential_scalar = gs.transpose(differential_scalar)

        differential_vec = (2 * (quat_scalar / quat_sq_norm
                                 - 2 * quat_arctan2 / quat_vec_norm)
                            * (gs.einsum('ni,nj->nij', quat_vec, quat_vec)
                               / quat_vec_norm ** 2)
                            + 2 * quat_arctan2 / quat_vec_norm * gs.eye(3))
        differential_vec = gs.squeeze(differential_vec)

        differential = gs.concatenate(
            [differential_scalar, differential_vec],
            axis=1)

        y_pred = SO3.rotation_vector_from_quaternion(y_pred)
        y_true = SO3.rotation_vector_from_quaternion(y_true)

        lie_grad = lie_group.grad(y_pred, y_true, SO3, metric)

        lie_grad = gs.matmul(lie_grad, differential)

    lie_grad = gs.squeeze(lie_grad, axis=0)
    return lie_grad
Beispiel #7
0
    def norm_factor_gradient(self, variances):
        """Compute normalization factor and its gradient.

        Compute normalization factor given current variance
        and dimensionality.

        Parameters
        ----------
        variances : array-like, shape=[n]
            Value of variance.

        Returns
        -------
        norm_factor : array-like, shape=[n]
            Normalisation factor.
        norm_factor_gradient : array-like, shape=[n]
            Gradient of the normalization factor.
        """
        variances = gs.transpose(gs.to_ndarray(variances, to_ndim=2))
        dim_range = gs.arange(0, self.dim, 1.0)
        alpha = self._compute_alpha(dim_range)

        binomial_coefficient = gs.ones(self.dim)
        binomial_coefficient[1:] = (self.dim - 1 + 1 - dim_range[1:]) / dim_range[1:]
        binomial_coefficient = gs.cumprod(binomial_coefficient)

        beta = ((-gs.ones(self.dim)) ** dim_range) * binomial_coefficient

        sigma_repeated = gs.repeat(variances, self.dim, -1)
        prod_alpha_sigma = gs.einsum("ij,j->ij", sigma_repeated, alpha)
        term_2 = gs.exp((prod_alpha_sigma) ** 2) * (1 + gs.erf(prod_alpha_sigma))
        term_1 = gs.sqrt(gs.pi / 2.0) * (1.0 / (2 ** (self.dim - 1)))
        term_2 = gs.einsum("ij,j->ij", term_2, beta)
        norm_factor = term_1 * variances * gs.sum(term_2, axis=-1, keepdims=True)
        grad_term_1 = 1 / variances

        grad_term_21 = 1 / gs.sum(term_2, axis=-1, keepdims=True)

        grad_term_211 = (
            gs.exp((prod_alpha_sigma) ** 2)
            * (1 + gs.erf(prod_alpha_sigma))
            * gs.einsum("ij,j->ij", sigma_repeated, alpha**2)
            * 2
        )

        grad_term_212 = gs.repeat(
            gs.expand_dims((2 / gs.sqrt(gs.pi)) * alpha, axis=0),
            variances.shape[0],
            axis=0,
        )

        grad_term_22 = grad_term_211 + grad_term_212
        grad_term_22 = gs.einsum("ij, j->ij", grad_term_22, beta)
        grad_term_22 = gs.sum(grad_term_22, axis=-1, keepdims=True)

        norm_factor_gradient = grad_term_1 + (grad_term_21 * grad_term_22)

        return gs.squeeze(norm_factor), gs.squeeze(norm_factor_gradient)
Beispiel #8
0
    def regularize_tangent_vec_at_identity(self,
                                           tangent_vec,
                                           metric=None,
                                           point_type=None):
        """
        In 3D, regularize a tangent_vector by getting its norm at the identity,
        determined by the metric, to be less than pi.
        """
        if point_type is None:
            point_type = self.default_point_type

        if point_type == 'vector':
            tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=2)

            if self.n == 3:
                if metric is None:
                    metric = self.left_canonical_metric
                tangent_vec_metric_norm = metric.norm(tangent_vec)
                tangent_vec_canonical_norm = gs.linalg.norm(tangent_vec,
                                                            axis=1)
                if gs.ndim(tangent_vec_canonical_norm) == 1:
                    tangent_vec_canonical_norm = gs.expand_dims(
                        tangent_vec_canonical_norm, axis=1)

                mask_norm_0 = gs.isclose(tangent_vec_metric_norm, 0)
                mask_canonical_norm_0 = gs.isclose(tangent_vec_canonical_norm,
                                                   0)

                mask_0 = mask_norm_0 | mask_canonical_norm_0
                mask_else = ~mask_0

                mask_0 = gs.squeeze(mask_0, axis=1)
                mask_else = gs.squeeze(mask_else, axis=1)

                coef = gs.empty_like(tangent_vec_metric_norm)
                regularized_vec = tangent_vec

                regularized_vec[mask_0] = tangent_vec[mask_0]

                coef[mask_else] = (tangent_vec_metric_norm[mask_else] /
                                   tangent_vec_canonical_norm[mask_else])
                regularized_vec[mask_else] = self.regularize(
                    coef[mask_else] * tangent_vec[mask_else])
                regularized_vec[mask_else] = (regularized_vec[mask_else] /
                                              coef[mask_else])
            else:
                # TODO(nina): regularization needed in nD?
                regularized_vec = tangent_vec

        elif point_type == 'matrix':
            # TODO(nina): regularization in terms
            # of skew-symmetric matrices?
            regularized_vec = tangent_vec

        return regularized_vec
Beispiel #9
0
 def test_point_to_pdf(self, n_draws, point, n_samples):
     point = gs.to_ndarray(point, 1)
     n_points = point.shape[0]
     pmf = self.space(n_draws).point_to_pmf(point)
     samples = gs.to_ndarray(self.space(n_draws).sample(point, n_samples), 1)
     result = gs.squeeze(pmf(samples))
     pmf = []
     for i in range(n_points):
         pmf.append(gs.array([binom.pmf(x, n_draws, point[i]) for x in samples]))
     expected = gs.squeeze(gs.stack(pmf, axis=0))
     self.assertAllClose(result, expected)
Beispiel #10
0
    def _exponential_matrix(self, rot_vec):
        """Compute exponential of rotation matrix represented by rot_vec.

        Parameters
        ----------
        rot_vec : array-like, shape=[..., 3]

        Returns
        -------
        exponential_mat : Matrix exponential of rot_vec
        """
        # TODO (nguigs): find usecase for this method
        rot_vec = self.rotations.regularize(rot_vec)
        n_rot_vecs = 1 if rot_vec.ndim == 1 else len(rot_vec)

        angle = gs.linalg.norm(rot_vec, axis=-1)
        angle = gs.to_ndarray(angle, to_ndim=2, axis=1)

        skew_rot_vec = self.rotations.skew_matrix_from_vector(rot_vec)

        coef_1 = gs.empty_like(angle)
        coef_2 = gs.empty_like(coef_1)

        mask_0 = gs.equal(angle, 0)
        mask_0 = gs.squeeze(mask_0, axis=1)
        mask_close_to_0 = gs.isclose(angle, 0)
        mask_close_to_0 = gs.squeeze(mask_close_to_0, axis=1)
        mask_else = ~mask_0 & ~mask_close_to_0

        coef_1[mask_close_to_0] = (1. / 2.
                                   - angle[mask_close_to_0] ** 2 / 24.)
        coef_2[mask_close_to_0] = (1. / 6.
                                   - angle[mask_close_to_0] ** 3 / 120.)

        # TODO (nina): Check if the discontinuity at 0 is expected.
        coef_1[mask_0] = 0
        coef_2[mask_0] = 0

        coef_1[mask_else] = (angle[mask_else] ** (-2)
                             * (1. - gs.cos(angle[mask_else])))
        coef_2[mask_else] = (angle[mask_else] ** (-2)
                             * (1. - (gs.sin(angle[mask_else])
                                      / angle[mask_else])))

        term_1 = gs.zeros((n_rot_vecs, self.n, self.n))
        term_2 = gs.zeros_like(term_1)

        for i in range(n_rot_vecs):
            term_1[i] = gs.eye(self.n) + skew_rot_vec[i] * coef_1[i]
            term_2[i] = gs.matmul(skew_rot_vec[i], skew_rot_vec[i]) * coef_2[i]

        exponential_mat = term_1 + term_2

        return exponential_mat
Beispiel #11
0
 def test_point_to_pdf(self, point, n_samples):
     point = gs.to_ndarray(point, 1)
     n_points = point.shape[0]
     pdf = self.space().point_to_pdf(point)
     samples = gs.to_ndarray(self.space().sample(point, n_samples), 1)
     result = gs.squeeze(pdf(samples))
     pdf = []
     for i in range(n_points):
         pdf.append(
             gs.array([expon.pdf(x, scale=point[i]) for x in samples]))
     expected = gs.squeeze(gs.stack(pdf, axis=0))
     self.assertAllClose(result, expected)
    def exponential_matrix(self, rot_vec):
        """
        Compute the exponential of the rotation matrix
        represented by rot_vec.

        :param rot_vec: 3D rotation vector
        :returns exponential_mat: 3x3 matrix
        """

        rot_vec = self.rotations.regularize(rot_vec)
        n_rot_vecs, _ = rot_vec.shape

        angle = gs.linalg.norm(rot_vec, axis=1)
        angle = gs.to_ndarray(angle, to_ndim=2, axis=1)

        skew_rot_vec = so_group.skew_matrix_from_vector(rot_vec)

        coef_1 = gs.empty_like(angle)
        coef_2 = gs.empty_like(coef_1)

        mask_0 = gs.equal(angle, 0)
        mask_0 = gs.squeeze(mask_0, axis=1)
        mask_close_to_0 = gs.isclose(angle, 0)
        mask_close_to_0 = gs.squeeze(mask_close_to_0, axis=1)
        mask_else = ~mask_0 & ~mask_close_to_0

        coef_1[mask_close_to_0] = (1. / 2.
                                   - angle[mask_close_to_0] ** 2 / 24.)
        coef_2[mask_close_to_0] = (1. / 6.
                                   - angle[mask_close_to_0] ** 3 / 120.)

        # TODO(nina): check if the discountinuity as 0 is expected.
        coef_1[mask_0] = 0
        coef_2[mask_0] = 0

        coef_1[mask_else] = (angle[mask_else] ** (-2)
                             * (1. - gs.cos(angle[mask_else])))
        coef_2[mask_else] = (angle[mask_else] ** (-2)
                             * (1. - (gs.sin(angle[mask_else])
                                      / angle[mask_else])))

        term_1 = gs.zeros((n_rot_vecs, self.n, self.n))
        term_2 = gs.zeros_like(term_1)

        for i in range(n_rot_vecs):
            term_1[i] = gs.eye(self.n) + skew_rot_vec[i] * coef_1[i]
            term_2[i] = gs.matmul(skew_rot_vec[i], skew_rot_vec[i]) * coef_2[i]

        exponential_mat = term_1 + term_2
        assert exponential_mat.ndim == 3

        return exponential_mat
Beispiel #13
0
    def matrix_from_rotation_vector(self, rot_vec):
        """Convert rotation vector to rotation matrix.

        Parameters
        ----------
        rot_vec: array-like, shape=[..., 3]

        Returns
        -------
        rot_mat: array-like, shape=[..., 3]
        """
        rot_vec = self.regularize(rot_vec)

        angle = gs.linalg.norm(rot_vec, axis=1)
        angle = gs.to_ndarray(angle, to_ndim=2, axis=1)

        skew_rot_vec = self.skew_matrix_from_vector(rot_vec)

        coef_1 = gs.zeros_like(angle)
        coef_2 = gs.zeros_like(angle)

        # This avoids dividing by 0.
        mask_0 = gs.isclose(angle, 0.)
        mask_0_float = gs.cast(mask_0, gs.float32) + self.epsilon

        coef_1 += mask_0_float * (1. - (angle ** 2) / 6.)
        coef_2 += mask_0_float * (1. / 2. - angle ** 2)

        # This avoids dividing by 0.
        mask_else = ~mask_0
        mask_else_float = gs.cast(mask_else, gs.float32) + self.epsilon

        angle += mask_0_float

        coef_1 += mask_else_float * (gs.sin(angle) / angle)
        coef_2 += mask_else_float * (
            (1. - gs.cos(angle)) / (angle ** 2))

        coef_1 = gs.squeeze(coef_1, axis=1)
        coef_2 = gs.squeeze(coef_2, axis=1)
        term_1 = (gs.eye(self.dim)
                  + gs.einsum('n,njk->njk', coef_1, skew_rot_vec))

        squared_skew_rot_vec = gs.einsum(
            'nij,njk->nik', skew_rot_vec, skew_rot_vec)

        term_2 = gs.einsum('n,njk->njk', coef_2, squared_skew_rot_vec)

        return term_1 + term_2
Beispiel #14
0
    def random_point(self, n_samples=1, bound=1.):
        """Sample over the hyperbolic space using uniform distribution.

        Sample over the hyperbolic space. The sampling is performed
        by sampling over uniform distribution, the sampled examples
        are considered in the intrinsic coordinates system.
        The function then transforms intrinsic samples into system
        coordinate selected.

        Parameters
        ----------
        n_samples : int
            Number of samples.
            Optional, default: 1.
        bound: float
            Bound defining the hypersquare in which to sample uniformly.
            Optional, default: 1.

        Returns
        -------
        samples : array-like, shape=[..., dim + 1]
            Samples in hyperbolic space.
        """
        size = (n_samples, self.dim)
        samples = bound * 2. * (gs.random.rand(*size) - 0.5)

        samples = _Hyperbolic.change_coordinates_system(
            samples, 'intrinsic', self.coords_type)

        if n_samples == 1:
            samples = gs.squeeze(samples, axis=0)
        return samples
Beispiel #15
0
    def log(self, point, base_point, n_steps=N_STEPS, jacobian=False):
        """Compute the logarithm map.

        Compute logarithm map associated to the Fisher information metric by
        solving the boundary value problem associated to the geodesic ordinary
        differential equation (ODE) using the Christoffel symbols.

        Parameters
        ----------
        point : array-like, shape=[..., dim]
            Point.
        base_point : array-like, shape=[..., dim]
            Base po int.
        n_steps : int
            Number of steps for integration.
            Optional, default: 100.
        jacobian : boolean.
            If True, the explicit value of the jacobian is used to solve
            the geodesic boundary value problem.
            Optional, default: False.

        Returns
        -------
        tangent_vec : array-like, shape=[..., dim]
            Initial velocity of the geodesic starting at base_point and
            reaching point at time 1.
        """
        t = gs.linspace(0.0, 1.0, n_steps)
        geodesic = self._geodesic_bvp(initial_point=base_point,
                                      end_point=point,
                                      jacobian=jacobian)
        geodesic_at_t = geodesic(t)
        log = n_steps * (geodesic_at_t[..., 1, :] - geodesic_at_t[..., 0, :])

        return gs.squeeze(gs.stack(log))
    def rotation_vector_from_quaternion(self, quaternion):
        """
        Convert a unit quaternion into a rotation vector.
        """
        assert self.n == 3, ('The quaternion representation does not exist'
                             ' for rotations in %d dimensions.' % self.n)
        quaternion = gs.to_ndarray(quaternion, to_ndim=2)
        n_quaternions, _ = quaternion.shape

        cos_half_angle = quaternion[:, 0]
        cos_half_angle = gs.clip(cos_half_angle, -1, 1)
        half_angle = gs.arccos(cos_half_angle)

        half_angle = gs.to_ndarray(half_angle, to_ndim=2, axis=1)
        assert half_angle.shape == (n_quaternions, 1)

        rot_vec = gs.zeros_like(quaternion[:, 1:])

        mask_0 = gs.isclose(half_angle, 0)
        mask_0 = gs.squeeze(mask_0, axis=1)
        mask_not_0 = ~mask_0
        rotation_axis = (quaternion[mask_not_0, 1:] /
                         gs.sin(half_angle[mask_not_0]))
        rot_vec[mask_not_0] = (2 * half_angle[mask_not_0] * rotation_axis)

        rot_vec = self.regularize(rot_vec)
        return rot_vec
    def quaternion_from_rotation_vector(self, rot_vec):
        """
        Convert a rotation vector into a unit quaternion.
        """
        assert self.n == 3, ('The quaternion representation does not exist'
                             ' for rotations in %d dimensions.' % self.n)
        rot_vec = self.regularize(rot_vec)
        n_rot_vecs, _ = rot_vec.shape

        angle = gs.linalg.norm(rot_vec, axis=1)
        angle = gs.to_ndarray(angle, to_ndim=2, axis=1)

        rotation_axis = gs.zeros_like(rot_vec)

        mask_0 = gs.isclose(angle, 0)
        mask_0 = gs.squeeze(mask_0, axis=1)
        mask_not_0 = ~mask_0
        rotation_axis[mask_not_0] = rot_vec[mask_not_0] / angle[mask_not_0]

        n_quaternions, _ = rot_vec.shape
        quaternion = gs.zeros((n_quaternions, 4))
        quaternion[:, :1] = gs.cos(angle / 2)
        quaternion[:, 1:] = gs.sin(angle / 2) * rotation_axis[:]

        return quaternion
Beispiel #18
0
    def exp(self, tangent_vec, base_landmarks):
        """Compute Riemannian exponential of tan vector wrt base landmark set.

        Parameters
        ----------
        tangent_vec
        base_landmarks

        Returns
        -------
        exp
        """
        tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=3)
        base_landmarks = gs.to_ndarray(base_landmarks, to_ndim=3)

        n_landmark_sets, n_landmarks_per_set, n_coords = base_landmarks.shape
        n_tangent_vecs = tangent_vec.shape[0]

        new_dim = n_landmark_sets * n_landmarks_per_set
        new_base_landmarks = gs.reshape(base_landmarks, (new_dim, n_coords))
        new_tangent_vec = gs.reshape(tangent_vec, (new_dim, n_coords))

        exp = self.ambient_metric.exp(new_tangent_vec, new_base_landmarks)
        exp = gs.reshape(exp, (n_tangent_vecs, n_landmarks_per_set, n_coords))
        exp = gs.squeeze(exp)

        return exp
Beispiel #19
0
    def log(self, landmarks, base_landmarks):
        """Compute Riemannian log of a set of landmarks wrt base landmark set.

        Parameters
        ----------
        landmarks
        base_landmarks

        Returns
        -------
        log
        """
        assert landmarks.shape == base_landmarks.shape
        landmarks = gs.to_ndarray(landmarks, to_ndim=3)
        base_landmarks = gs.to_ndarray(base_landmarks, to_ndim=3)

        n_landmark_sets, n_landmarks_per_set, n_coords = landmarks.shape

        landmarks = gs.reshape(
            landmarks, (n_landmark_sets * n_landmarks_per_set, n_coords))
        base_landmarks = gs.reshape(
            base_landmarks, (n_landmark_sets * n_landmarks_per_set, n_coords))
        log = self.ambient_metric.log(landmarks, base_landmarks)
        log = gs.reshape(log, (n_landmark_sets, n_landmarks_per_set, n_coords))
        log = gs.squeeze(log)

        return log
Beispiel #20
0
    def log(self, landmarks, base_landmarks):
        """Compute Riemannian log of a set of landmarks wrt base landmark set.

        Parameters
        ----------
        landmarks
        base_landmarks

        Returns
        -------
        log
        """
        if landmarks.shape != base_landmarks.shape:
            raise NotImplementedError

        n_landmark_sets, n_landmarks_per_set, n_coords = landmarks.shape

        landmarks = gs.reshape(
            landmarks, (n_landmark_sets * n_landmarks_per_set, n_coords))
        base_landmarks = gs.reshape(
            base_landmarks, (n_landmark_sets * n_landmarks_per_set, n_coords))
        log = self.ambient_metric.log(landmarks, base_landmarks)
        log = gs.reshape(log, (n_landmark_sets, n_landmarks_per_set, n_coords))
        log = gs.squeeze(log)

        return log
Beispiel #21
0
    def belongs(self, point):
        """Check whether point is of the form rotation, translation.

        Parameters
        ----------
        point : array-like, shape=[..., n, n].
            Point to be checked.

        Returns
        -------
        belongs : array-like, shape=[...,]
            Boolean denoting if point belongs to the group.
        """
        point_dim1, point_dim2 = point.shape[-2:]
        belongs = (point_dim1 == point_dim2 == self.n + 1)

        rotation = point[..., :self.n, :self.n]
        rot_belongs = self.rotations.belongs(rotation)

        belongs = gs.logical_and(belongs, rot_belongs)

        last_line_except_last_term = point[..., self.n:, :-1]
        all_but_last_zeros = ~gs.any(last_line_except_last_term, axis=(-2, -1))

        belongs = gs.logical_and(belongs, all_but_last_zeros)

        last_term = point[..., self.n:, self.n:]
        belongs = gs.logical_and(belongs, gs.all(last_term == 1,
                                                 axis=(-2, -1)))

        if point.ndim == 2:
            return gs.squeeze(belongs)
        return gs.flatten(belongs)
Beispiel #22
0
    def random_uniform(self, n_samples=1, tol=1e-6):
        """Sample in SE(n) from the uniform distribution.

        Parameters
        ----------
        n_samples : int
            Number of samples.
            Optional, default: 1.
        tol : unused

        Returns
        -------
        samples : array-like, shape=[..., n + 1, n + 1]
            Sample in SE(n).
        """
        random_translation = self.translations.random_uniform(n_samples)
        random_rotation = self.rotations.random_uniform(n_samples)
        random_rotation = gs.to_ndarray(random_rotation, to_ndim=3)

        random_translation = gs.to_ndarray(random_translation, to_ndim=2)
        random_translation = gs.transpose(
            gs.to_ndarray(random_translation, to_ndim=3, axis=1), (0, 2, 1))

        random_point = gs.concatenate((random_rotation, random_translation),
                                      axis=2)
        last_line = gs.zeros((n_samples, 1, self.n + 1))
        random_point = gs.concatenate((random_point, last_line), axis=1)
        random_point = gs.assignment(random_point, 1, (-1, -1), axis=0)
        if gs.shape(random_point)[0] == 1:
            random_point = gs.squeeze(random_point, axis=0)
        return random_point
Beispiel #23
0
    def square_root_velocity(self, curve):
        """Compute the square root velocity representation of a curve.

        The velocity is computed using the log map. The case of several curves
        is handled through vectorization. In that case, an index selection
        procedure allows to get rid of the log between the end point of
        curve[k, :, :] and the starting point of curve[k + 1, :, :].

        Parameters
        ----------
        curve :

        Returns
        -------
        srv :
        """
        curve = gs.to_ndarray(curve, to_ndim=3)
        n_curves, n_sampling_points, n_coords = curve.shape
        srv_shape = (n_curves, n_sampling_points - 1, n_coords)

        curve = gs.reshape(curve, (n_curves * n_sampling_points, n_coords))
        coef = gs.cast(gs.array(n_sampling_points - 1), gs.float32)
        velocity = coef * self.ambient_metric.log(point=curve[1:, :],
                                                  base_point=curve[:-1, :])
        velocity_norm = self.ambient_metric.norm(velocity, curve[:-1, :])
        srv = velocity / gs.sqrt(velocity_norm)

        index = gs.arange(n_curves * n_sampling_points - 1)
        mask = ~gs.equal((index + 1) % n_sampling_points, 0)
        index_select = gs.gather(index, gs.squeeze(gs.where(mask)))
        srv = gs.reshape(gs.gather(srv, index_select), srv_shape)

        return srv
Beispiel #24
0
    def metric_matrix(self, base_point=None):
        """Compute the inner-product matrix.

        Compute the inner-product matrix of the Fisher information metric
        at the tangent space at base point.

        Parameters
        ----------
        base_point : array-like, shape=[..., dim]
            Base point.

        Returns
        -------
        mat : array-like, shape=[..., dim, dim]
            Inner-product matrix.
        """
        if base_point is None:
            raise ValueError(
                "A base point must be given to compute the " "metric matrix"
            )
        base_point = gs.to_ndarray(base_point, to_ndim=2)
        n_points = base_point.shape[0]

        mat_ones = gs.ones((n_points, self.dim, self.dim))
        poly_sum = gs.polygamma(1, gs.sum(base_point, -1))
        mat_diag = from_vector_to_diagonal_matrix(gs.polygamma(1, base_point))

        mat = mat_diag - gs.einsum("i,ijk->ijk", poly_sum, mat_ones)
        return gs.squeeze(mat)
Beispiel #25
0
 def process_function(return_dict, ip=ip, ep=ep):
     solution = solve_bvp(
         bvp, bc, t_int, initialize(ip, ep), fun_jac=fun_jac)
     solution_at_t = solution.sol(t)
     geodesic = solution_at_t[:self.dim, :]
     geod.append(gs.squeeze(gs.transpose(geodesic)))
     return_dict[0] = geod
Beispiel #26
0
    def predict(self, X):
        """Predict the labels for each data point.

        Label each data point with the cluster having the nearest
        centroid using metric distance.

        Parameters
        ----------
        X : array-like, shape=[..., n_features]
            Input data.

        Returns
        -------
        self : array-like, shape=[...,]
            Array of predicted cluster indices for each sample.
        """
        if self.centroids is None:
            raise RuntimeError("fit needs to be called first.")
        dists = gs.stack(
            [self.metric.dist(centroid, X) for centroid in self.centroids],
            axis=1)
        dists = gs.squeeze(dists)

        labels = gs.argmin(dists, -1)

        return labels
    def test_space_derivative(self):
        """Test space derivative.

        Check result on an example and vectorization.
        """
        n_points = 3
        dim = 3
        curve = gs.random.rand(n_points, dim)
        result = self.srv_metric_r3.space_derivative(curve)
        delta = 1 / n_points
        d_curve_1 = (curve[1] - curve[0]) / delta
        d_curve_2 = (curve[2] - curve[0]) / (2 * delta)
        d_curve_3 = (curve[2] - curve[1]) / delta
        expected = gs.squeeze(
            gs.vstack(
                (
                    gs.to_ndarray(d_curve_1, 2),
                    gs.to_ndarray(d_curve_2, 2),
                    gs.to_ndarray(d_curve_3, 2),
                )
            )
        )
        self.assertAllClose(result, expected)

        path_of_curves = gs.random.rand(
            self.n_discretized_curves, self.n_sampling_points, dim
        )
        result = self.srv_metric_r3.space_derivative(path_of_curves)
        expected = []
        for i in range(self.n_discretized_curves):
            expected.append(self.srv_metric_r3.space_derivative(path_of_curves[i]))
        expected = gs.stack(expected)
        self.assertAllClose(result, expected)
Beispiel #28
0
    def random_uniform(self, n_samples=1, tol=1e-6):
        """Sample in GL(n) from the uniform distribution.

        Parameters
        ----------
        n_samples : int, optional
            Number of samples.
        tol: float, optional
            Threshold for the absolute value of the determinant of the
            returned matrix.

        Returns
        -------
        samples : array-like, shape=[..., n, n]
            Point sampled on GL(n).
        """
        samples = gs.random.rand(n_samples, self.n, self.n)
        while True:
            dets = gs.linalg.det(samples)
            indcs = gs.isclose(dets, 0.0, atol=tol)
            num_bad_samples = gs.sum(indcs)
            if num_bad_samples == 0:
                break
            new_samples = gs.random.rand(num_bad_samples, self.n, self.n)
            samples = self._replace_values(samples, new_samples, indcs)
        if n_samples == 1:
            samples = gs.squeeze(samples, axis=0)
        return samples
Beispiel #29
0
    def random_uniform(self, n_samples=1):
        """Sample in the hypersphere from the uniform distribution.

        Parameters
        ----------
        n_samples : int
            Number of samples.
            Optional, default: 1.

        Returns
        -------
        samples : array-like, shape=[..., dim + 1]
            Points sampled on the hypersphere.
        """
        size = (n_samples, self.dim + 1)

        samples = gs.random.normal(size=size)
        while True:
            norms = gs.linalg.norm(samples, axis=1)
            indcs = gs.isclose(norms, 0.0, atol=gs.atol)
            num_bad_samples = gs.sum(indcs)
            if num_bad_samples == 0:
                break
            new_samples = gs.random.normal(
                size=(num_bad_samples, self.dim + 1))
            samples = self._replace_values(samples, new_samples, indcs)

        samples = gs.einsum('..., ...i->...i', 1 / norms, samples)
        if n_samples == 1:
            samples = gs.squeeze(samples, axis=0)
        return samples
    def left_log_from_identity(self, point):
        """Compute Riemannian log of a point wrt. id of left-invar. metric.

        Compute Riemannian logarithm of a point wrt the identity associated
        to the left-invariant metric.

        If the method is called by a right-invariant metric, it uses the
        left-invariant metric associated to the same inner-product matrix
        at the identity.

        Parameters
        ----------
        point

        Returns
        -------
        log
        """
        point = self.group.regularize(point)
        inner_prod_mat = self.inner_product_mat_at_identity
        inv_inner_prod_mat = gs.linalg.inv(inner_prod_mat)
        sqrt_inv_inner_prod_mat = gs.linalg.sqrtm(inv_inner_prod_mat)
        assert sqrt_inv_inner_prod_mat.shape == ((1, ) +
                                                 (self.group.dimension, ) * 2)
        aux = gs.squeeze(sqrt_inv_inner_prod_mat, axis=0)
        log = gs.matmul(point, aux)
        log = self.group.regularize_tangent_vec_at_identity(tangent_vec=log,
                                                            metric=self)
        assert gs.ndim(log) == 2
        return log