def test_geodesic_vectorization(self): space = Hypersphere(2) metric = space.metric initial_point = space.random_uniform(2) vector = gs.random.rand(2, 3) initial_tangent_vec = space.to_tangent(vector=vector, base_point=initial_point) end_point = space.random_uniform(2) time = gs.linspace(0, 1, 10) geo = metric.geodesic(initial_point, initial_tangent_vec) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected) geo = metric.geodesic(initial_point, end_point=end_point) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected) geo = metric.geodesic(initial_point, end_point=end_point[0]) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected) initial_tangent_vec = space.to_tangent(vector=vector, base_point=initial_point[0]) geo = metric.geodesic(initial_point[0], initial_tangent_vec) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected)
class TestVisualization(geomstats.tests.TestCase): def setUp(self): self.n_samples = 10 self.SO3_GROUP = SpecialOrthogonal(n=3, point_type='vector') self.SE3_GROUP = SpecialEuclidean(n=3, point_type='vector') self.S1 = Hypersphere(dim=1) self.S2 = Hypersphere(dim=2) self.H2 = Hyperbolic(dim=2) self.H2_half_plane = PoincareHalfSpace(dim=2) plt.figure() @staticmethod def test_tutorial_matplotlib(): visualization.tutorial_matplotlib() def test_plot_points_so3(self): points = self.SO3_GROUP.random_uniform(self.n_samples) visualization.plot(points, space='SO3_GROUP') def test_plot_points_se3(self): points = self.SE3_GROUP.random_uniform(self.n_samples) visualization.plot(points, space='SE3_GROUP') @geomstats.tests.np_and_pytorch_only def test_plot_points_s1(self): points = self.S1.random_uniform(self.n_samples) visualization.plot(points, space='S1') def test_plot_points_s2(self): points = self.S2.random_uniform(self.n_samples) visualization.plot(points, space='S2') def test_plot_points_h2_poincare_disk(self): points = self.H2.random_uniform(self.n_samples) visualization.plot(points, space='H2_poincare_disk') def test_plot_points_h2_poincare_half_plane_ext(self): points = self.H2.random_uniform(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane', point_type='extrinsic') def test_plot_points_h2_poincare_half_plane_none(self): points = self.H2_half_plane.random_uniform(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane') def test_plot_points_h2_poincare_half_plane_hs(self): points = self.H2_half_plane.random_uniform(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane', point_type='half_space') def test_plot_points_h2_klein_disk(self): points = self.H2.random_uniform(self.n_samples) visualization.plot(points, space='H2_klein_disk')
def test_extrinsic_to_angle_inverse(self): space = Hypersphere(1) point = space.random_uniform() angle = space.extrinsic_to_angle(point) result = space.angle_to_extrinsic(angle) self.assertAllClose(result, point) space = Hypersphere(1, default_coords_type='intrinsic') angle = space.random_uniform() extrinsic = space.angle_to_extrinsic(angle) result = space.extrinsic_to_angle(extrinsic) self.assertAllClose(result, angle)
def test_rotate_points(self): sphere = Hypersphere(2) end_point = sphere.random_uniform() north_pole = gs.array([1., 0., 0.]) result = utils.rotate_points(north_pole, end_point) expected = end_point self.assertAllClose(result, expected) points = sphere.random_uniform(10) result = utils.rotate_points(points, north_pole) self.assertAllClose(result, points) points = gs.concatenate([north_pole[None, :], points]) result = utils.rotate_points(points, end_point) self.assertAllClose(result[0], end_point)
class GeomstatsSphere(Manifold): """A simple adapter class which proxies calls by pymanopt's solvers to `Manifold` subclasses to the underlying geomstats `Hypersphere` class. """ def __init__(self, ambient_dimension): self._sphere = Hypersphere(ambient_dimension - 1) def norm(self, base_vector, tangent_vector): return self._sphere.metric.norm(tangent_vector, base_point=base_vector) def inner(self, base_vector, tangent_vector_a, tangent_vector_b): return self._sphere.metric.inner_product( tangent_vector_a, tangent_vector_b, base_point=base_vector) def proj(self, base_vector, tangent_vector): return self._sphere.to_tangent( tangent_vector, base_point=base_vector) def retr(self, base_vector, tangent_vector): """The retraction operator, which maps a tangent vector in the tangent space at a specific point back to the manifold by approximating moving along a geodesic. Since geomstats's `Hypersphere` class doesn't provide a retraction we use the exponential map instead (see also https://hal.archives-ouvertes.fr/hal-00651608/document). """ return self._sphere.metric.exp(tangent_vector, base_point=base_vector) def rand(self): return self._sphere.random_uniform()
def test_parallel_transport_trajectory(self, dim, n_samples): sphere = Hypersphere(dim) for step in ["pole", "schild"]: n_steps = 1 if step == "pole" else 50 tol = 1e-6 if step == "pole" else 1e-2 base_point = sphere.random_uniform(n_samples) tan_vec_a = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) tan_vec_b = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) expected = sphere.metric.parallel_transport( tan_vec_a, base_point, tan_vec_b) expected_point = sphere.metric.exp(tan_vec_b, base_point) ladder = sphere.metric.ladder_parallel_transport( tan_vec_a, base_point, tan_vec_b, n_rungs=n_steps, scheme=step, return_geodesics=True, ) result = ladder["transported_tangent_vec"] result_point = ladder["end_point"] self.assertAllClose(result, expected, rtol=tol, atol=tol) self.assertAllClose(result_point, expected_point)
def test_parallel_transport(self, dim, n_samples): sphere = Hypersphere(dim) base_point = sphere.random_uniform(n_samples) tan_vec_a = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) tan_vec_b = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) expected = sphere.metric.parallel_transport(tan_vec_a, base_point, tan_vec_b) expected_point = sphere.metric.exp(tan_vec_b, base_point) base_point = gs.cast(base_point, gs.float64) base_point, tan_vec_a, tan_vec_b = gs.convert_to_wider_dtype( [base_point, tan_vec_a, tan_vec_b]) for step, alpha in zip(["pole", "schild"], [1, 2]): min_n = 1 if step == "pole" else 50 tol = 1e-5 if step == "pole" else 1e-2 for n_rungs in [min_n, 11]: ladder = sphere.metric.ladder_parallel_transport( tan_vec_a, base_point, tan_vec_b, n_rungs=n_rungs, scheme=step, alpha=alpha, ) result = ladder["transported_tangent_vec"] result_point = ladder["end_point"] self.assertAllClose(result, expected, rtol=tol, atol=tol) self.assertAllClose(result_point, expected_point)
class TestVisualizationMethods(geomstats.tests.TestCase): def setUp(self): self.n_samples = 10 self.SO3_GROUP = SpecialOrthogonal(n=3) self.SE3_GROUP = SpecialEuclidean(n=3) self.S1 = Hypersphere(dim=1) self.S2 = Hypersphere(dim=2) self.H2 = Hyperbolic(dim=2) plt.figure() @geomstats.tests.np_only def test_plot_points_so3(self): points = self.SO3_GROUP.random_uniform(self.n_samples) visualization.plot(points, space='SO3_GROUP') @geomstats.tests.np_only def test_plot_points_se3(self): points = self.SE3_GROUP.random_uniform(self.n_samples) visualization.plot(points, space='SE3_GROUP') @geomstats.tests.np_only def test_plot_points_s1(self): points = self.S1.random_uniform(self.n_samples) visualization.plot(points, space='S1') @geomstats.tests.np_only def test_plot_points_s2(self): points = self.S2.random_uniform(self.n_samples) visualization.plot(points, space='S2') @geomstats.tests.np_only def test_plot_points_h2_poincare_disk(self): points = self.H2.random_uniform(self.n_samples) visualization.plot(points, space='H2_poincare_disk') @geomstats.tests.np_only def test_plot_points_h2_poincare_half_plane(self): points = self.H2.random_uniform(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane') @geomstats.tests.np_only def test_plot_points_h2_klein_disk(self): points = self.H2.random_uniform(self.n_samples) visualization.plot(points, space='H2_klein_disk')
def empirical_frechet_var_bubble(n_samples, theta, dim, n_expectation=1000): """Variance of the empirical Fréchet mean for a bubble distribution. Draw n_sampless from a bubble distribution, computes its empirical Fréchet mean and the square distance to the asymptotic mean. This is repeated n_expectation times to compute an approximation of its expectation (i.e. its variance) by sampling. The bubble distribution is an isotropic distributions on a Riemannian hyper sub-sphere of radius 0 < theta < Pi around the north pole of the sphere of dimension dim. Parameters ---------- n_samples : int Number of samples to draw. theta: float Radius of the bubble distribution. dim : int Dimension of the sphere (embedded in R^{dim+1}). n_expectation: int, optional (defaults to 1000) Number of computations for approximating the expectation. Returns ------- tuple (variance, std-dev on the computed variance) """ if dim <= 1: raise ValueError( 'Dim > 1 needed to draw a uniform sample on sub-sphere.') var = [] sphere = Hypersphere(dim=dim) bubble = Hypersphere(dim=dim - 1) north_pole = gs.zeros(dim + 1) north_pole[dim] = 1.0 for _ in range(n_expectation): # Sample n points from the uniform distribution on a sub-sphere # of radius theta (i.e cos(theta) in ambient space) # TODO (nina): Add this code as a method of hypersphere data = gs.zeros((n_samples, dim + 1), dtype=gs.float64) directions = bubble.random_uniform(n_samples) directions = gs.to_ndarray(directions, to_ndim=2) for i in range(n_samples): for j in range(dim): data[i, j] = gs.sin(theta) * directions[i, j] data[i, dim] = gs.cos(theta) # TODO (nina): Use FrechetMean here current_mean = _adaptive_gradient_descent(data, metric=sphere.metric, max_iter=32, init_point=north_pole) var.append(sphere.metric.squared_dist(north_pole, current_mean)) return gs.mean(var), 2 * gs.std(var) / gs.sqrt(n_expectation)
def empirical_frechet_var_bubble(n_samples, theta, dim, n_expectation=1000): """Variance of the empirical Fréchet mean for a bubble distribution. Draw n_sampless from a bubble distribution, computes its empirical Fréchet mean and the square distance to the asymptotic mean. This is repeated n_expectation times to compute an approximation of its expectation (i.e. its variance) by sampling. The bubble distribution is an isotropic distributions on a Riemannian hyper sub-sphere of radius 0 < theta < Pi around the north pole of the sphere of dimension dim. Parameters ---------- n_samples : int Number of samples to draw. theta: float Radius of the bubble distribution. dim : int Dimension of the sphere (embedded in R^{dim+1}). n_expectation: int, optional (defaults to 1000) Number of computations for approximating the expectation. Returns ------- tuple (variance, std-dev on the computed variance) """ if dim <= 1: raise ValueError( "Dim > 1 needed to draw a uniform sample on sub-sphere.") var = [] sphere = Hypersphere(dim=dim) bubble = Hypersphere(dim=dim - 1) north_pole = gs.zeros(dim + 1) north_pole[dim] = 1.0 for _ in range(n_expectation): # Sample n points from the uniform distribution on a sub-sphere # of radius theta (i.e cos(theta) in ambient space) # TODO (nina): Add this code as a method of hypersphere last_col = gs.cos(theta) * gs.ones(n_samples) last_col = last_col[:, None] if (n_samples > 1) else last_col directions = bubble.random_uniform(n_samples) rest_col = gs.sin(theta) * directions data = gs.concatenate([rest_col, last_col], axis=-1) estimator = FrechetMean(sphere.metric, max_iter=32, method="adaptive", init_point=north_pole) estimator.fit(data) current_mean = estimator.estimate_ var.append(sphere.metric.squared_dist(north_pole, current_mean)) return gs.mean(var), 2 * gs.std(var) / gs.sqrt(n_expectation)
def empirical_frechet_mean_random_init_s2(data, n_init=1, init_points=[]): """Fréchet mean on S2 by gradient descent from multiple starting points" Parameters ---------- data: empirical distribution on S2 n_init: number of initial points drawn uniformly at random on S2 init_points: list of initial points for the first gradient descent Returns ------- frechet mean list """ assert n_init >= 1, "Gradient descent needs at least one starting point" dim = len(data[0]) - 1 sphere = Hypersphere(dimension=dim) if len(init_points) == 0: init_points = [sphere.random_uniform()] # for a noncompact manifold, we need to revise this to a ball # with a maximal radius mean = _adaptive_gradient_descent(data, metric=sphere.metric, n_max_iterations=64, init_points=init_points) sigma_mean = mean_sq_dist_s2(mean, data) # print ("variance {0} for FM {1}".format(sigFM,FM)) for i in range(n_init - 1): init_points = sphere.random_uniform() new_mean = _adaptive_gradient_descent(data, metric=sphere.metric, n_max_iterations=64, init_points=init_points) sigma_new_mean = mean_sq_dist_s2(new_mean, data) if sigma_new_mean < sigma_mean: mean = new_mean sigma_mean = sigma_new_mean # print ("new variance {0} for FM {1}".format(sigFM,FM)) return mean
def test_parallel_transport_vectorization(self): sphere = Hypersphere(2) n_samples = 4 def is_isometry(tan_a, trans_a, endpoint): is_tangent = gs.isclose(sphere.metric.inner_product( endpoint, trans_a), 0., atol=1e-6) is_equinormal = gs.isclose(gs.linalg.norm(trans_a, axis=-1), gs.linalg.norm(tan_a, axis=-1)) return gs.logical_and(is_tangent, is_equinormal) base_point = sphere.random_uniform(n_samples) tan_vec_a = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) tan_vec_b = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) end_point = sphere.metric.exp(tan_vec_b, base_point) transported = sphere.metric.parallel_transport(tan_vec_a, tan_vec_b, base_point) result = is_isometry(tan_vec_a, transported, end_point) self.assertTrue(gs.all(result)) base_point = base_point[0] tan_vec_a = sphere.to_tangent(tan_vec_a, base_point) tan_vec_b = sphere.to_tangent(tan_vec_b, base_point) end_point = sphere.metric.exp(tan_vec_b, base_point) transported = sphere.metric.parallel_transport(tan_vec_a, tan_vec_b, base_point) result = is_isometry(tan_vec_a, transported, end_point) self.assertTrue(gs.all(result)) one_tan_vec_a = tan_vec_a[0] transported = sphere.metric.parallel_transport(one_tan_vec_a, tan_vec_b, base_point) result = is_isometry(one_tan_vec_a, transported, end_point) self.assertTrue(gs.all(result)) one_tan_vec_b = tan_vec_b[0] end_point = end_point[0] transported = sphere.metric.parallel_transport(tan_vec_a, one_tan_vec_b, base_point) result = is_isometry(tan_vec_a, transported, end_point) self.assertTrue(gs.all(result)) transported = sphere.metric.parallel_transport(one_tan_vec_a, one_tan_vec_b, base_point) result = is_isometry(one_tan_vec_a, transported, end_point) self.assertTrue(result)
def test_parallel_transport(self): sphere = Hypersphere(dimension=2) connection = LeviCivitaConnection(sphere.metric) n_samples = 10 base_point = sphere.random_uniform(n_samples) tan_vec_a = sphere.projection_to_tangent_space( gs.random.rand(n_samples, 3), base_point) tan_vec_b = sphere.projection_to_tangent_space( gs.random.rand(n_samples, 3), base_point) expected = sphere.metric.parallel_transport( tan_vec_a, tan_vec_b, base_point) result = connection.pole_ladder_parallel_transport( tan_vec_a, tan_vec_b, base_point) self.assertAllClose(result, expected, rtol=1e-7, atol=1e-5)
def test_spherical_to_extrinsic_and_inverse(self): dim = 2 n_samples = 5 sphere = Hypersphere(dim) points = gs.random.rand(n_samples, 2) * gs.pi * gs.array([1., 2. ])[None, :] extrinsic = sphere.spherical_to_extrinsic(points) result = sphere.extrinsic_to_spherical(extrinsic) self.assertAllClose(result, points) points_extrinsic = sphere.random_uniform(n_samples) spherical = sphere.extrinsic_to_spherical(points_extrinsic) result = sphere.spherical_to_extrinsic(spherical) self.assertAllClose(result, points_extrinsic)
def test_circle_mean(self): space = Hypersphere(1) points = space.random_uniform(10) mean_circle = FrechetMean(space.metric) mean_circle.fit(points) estimate_circle = mean_circle.estimate_ # set a wrong dimension so that the extrinsic coordinates are used metric = space.metric metric.dim = 2 mean_extrinsic = FrechetMean(metric) mean_extrinsic.fit(points) estimate_extrinsic = mean_extrinsic.estimate_ self.assertAllClose(estimate_circle, estimate_extrinsic)
def test_ladder_alpha(self, dim, n_samples): sphere = Hypersphere(dim) base_point = sphere.random_uniform(n_samples) tan_vec_a = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) tan_vec_b = sphere.to_tangent(gs.random.rand(n_samples, 3), base_point) with pytest.raises(ValueError): sphere.metric.ladder_parallel_transport( tan_vec_a, base_point, tan_vec_b, n_rungs=1, scheme="pole", alpha=0.5, return_geodesics=False, )
def empirical_frechet_var_bubble(n_samples, theta, dim, n_expectation=1000): """Variance of the empirical Fréchet mean for a bubble distribution. Draw n_sampless from a bubble distribution, computes its empirical Fréchet mean and the square distance to the asymptotic mean. This is repeated n_expectation times to compute an approximation of its expectation (i.e. its variance) by sampling. The bubble distribution is an isotropic distributions on a Riemannian hyper sub-sphere of radius 0 < theta = around the north pole of the hyperbolic space of dimension dim. Parameters ---------- n_samples: number of samples to draw theta: radius of the bubble distribution dim: dimension of the hyperbolic space (embedded in R^{1,dim}) n_expectation: number of computations for approximating the expectation Returns ------- tuple (variance, std-dev on the computed variance) """ assert dim > 1, "Dim > 1 needed to draw a uniform sample on sub-sphere" var = [] hyperbole = Hyperbolic(dimension=dim) bubble = Hypersphere(dimension=dim - 1) origin = gs.zeros(dim + 1) origin[0] = 1.0 for k in range(n_expectation): # Sample n points from the uniform distribution on a sub-sphere # of radius theta (i.e cos(theta) in ambient space) data = gs.zeros((n_samples, dim + 1), dtype=gs.float64) directions = bubble.random_uniform(n_samples) for i in range(n_samples): for j in range(dim): data[i, j + 1] = gs.sinh(theta) * directions[i, j] data[i, 0] = gs.cosh(theta) current_mean = _adaptive_gradient_descent(data, metric=hyperbole.metric, n_max_iterations=64, init_points=[origin]) var.append(hyperbole.metric.squared_dist(origin, current_mean)) return np.mean(var), 2 * np.std(var) / np.sqrt(n_expectation)
def main(): """Plot the result of a KNN classification on the sphere.""" sphere = Hypersphere(dim=2) sphere_distance = sphere.metric.dist n_labels = 2 n_samples_per_dataset = 10 n_targets = 200 dataset_1 = sphere.random_von_mises_fisher(kappa=10, n_samples=n_samples_per_dataset) dataset_2 = -sphere.random_von_mises_fisher( kappa=10, n_samples=n_samples_per_dataset) training_dataset = gs.concatenate((dataset_1, dataset_2), axis=0) labels_dataset_1 = gs.zeros([n_samples_per_dataset], dtype=gs.int64) labels_dataset_2 = gs.ones([n_samples_per_dataset], dtype=gs.int64) labels = gs.concatenate((labels_dataset_1, labels_dataset_2)) target = sphere.random_uniform(n_samples=n_targets) neigh = KNearestNeighborsClassifier(n_neighbors=2, distance=sphere_distance) neigh.fit(training_dataset, labels) target_labels = neigh.predict(target) plt.figure(0) ax = plt.subplot(111, projection="3d") plt.title("Training set") sphere_plot = visualization.Sphere() sphere_plot.draw(ax=ax) for i_label in range(n_labels): points_label_i = training_dataset[labels == i_label, ...] sphere_plot.draw_points(ax=ax, points=points_label_i) plt.figure(1) ax = plt.subplot(111, projection="3d") plt.title("Classification") sphere_plot = visualization.Sphere() sphere_plot.draw(ax=ax) for i_label in range(n_labels): target_points_label_i = target[target_labels == i_label, ...] sphere_plot.draw_points(ax=ax, points=target_points_label_i) plt.show()
def test_fit_init_random_sphere(self): """Test fitting data into a GMM.""" space = Hypersphere(2) gmm_learning = RiemannianEM( metric=space.metric, n_gaussians=2, initialisation_method=self.initialisation_method, ) means = space.random_uniform(2) cluster_1 = space.random_von_mises_fisher(mu=means[0], kappa=20, n_samples=140) cluster_2 = space.random_von_mises_fisher(mu=means[1], kappa=20, n_samples=140) data = gs.concatenate((cluster_1, cluster_2), axis=0) means, variances, coefficients = gmm_learning.fit(data) self.assertTrue((coefficients < 1).all() and (coefficients > 0).all()) self.assertTrue((variances < 1).all() and (variances > 0).all()) self.assertTrue(space.belongs(means).all())
def sectional_curvature_test_data(self): dim_list = [1] n_samples_list = random.sample(range(1, 4), 2) random_data = [] for dim, n_samples in zip(dim_list, n_samples_list): sphere = Hypersphere(dim) base_point = sphere.random_uniform() tangent_vec_a = sphere.to_tangent( gs.random.rand(n_samples, sphere.dim + 1), base_point) tangent_vec_b = sphere.to_tangent( gs.random.rand(n_samples, sphere.dim + 1), base_point) expected = gs.ones(n_samples) # try shape here random_data.append( dict( dim=dim, tangent_vec_a=tangent_vec_a, tangent_vec_b=tangent_vec_b, base_point=base_point, expected=expected, ), ) return self.generate_tests(random_data)
def test_sample_von_mises_fisher_arbitrary_mean(self): """ Check that the maximum likelihood estimates of the mean and concentration parameter are close to the real values. A first estimation of the concentration parameter is obtained by a closed-form expression and improved through the Newton method. """ for dim in [2, 9]: n_points = 10000 sphere = Hypersphere(dim) # check mean value for concentrated distribution for different mean kappa = 1000.0 mean = sphere.random_uniform() points = sphere.random_von_mises_fisher(mu=mean, kappa=kappa, n_samples=n_points) sum_points = gs.sum(points, axis=0) result = sum_points / gs.linalg.norm(sum_points) expected = mean self.assertAllClose(result, expected, atol=MEAN_ESTIMATION_TOL)
def main(): circle = Hypersphere(dimension=1) data = circle.random_uniform(n_samples=1000) n_clusters = 5 clustering = OnlineKMeans(metric=circle.metric, n_clusters=n_clusters) clustering = clustering.fit(data) plt.figure(0) visualization.plot(points=clustering.cluster_centers_, space='S1', color='red') plt.show() plt.figure(1) ax = plt.axes() circle_plot = visualization.Circle() circle_plot.draw(ax=ax) for i in range(n_clusters): cluster = data[clustering.labels_ == i, :] circle_plot.draw_points(ax=ax, points=cluster) plt.show()
class GeomstatsSphere(EuclideanEmbeddedSubmanifold): """A simple adapter class which proxies calls by pymanopt's solvers to `Manifold` subclasses to the underlying geomstats `Hypersphere` class. """ def __init__(self, ambient_dimension): dim = ambient_dimension - 1 self._sphere = Hypersphere(dim) super().__init__('{}-dimensional Hypersphere'.format(dim), dim) def norm(self, base_point, tangent_vector): return self._sphere.metric.norm(tangent_vector, base_point=base_point) def inner(self, base_point, tangent_vector_a, tangent_vector_b): return self._sphere.metric.inner_product(tangent_vector_a, tangent_vector_b, base_point=base_point) def proj(self, base_point, ambient_vector): return self._sphere.to_tangent(ambient_vector, base_point=base_point) def retr(self, base_point, tangent_vector): """The retraction operator, which maps a tangent vector in the tangent space at a specific point back to the manifold by approximating moving along a geodesic. Since geomstats's `Hypersphere` class doesn't provide a retraction we use the exponential map instead (see also https://hal.archives-ouvertes.fr/hal-00651608/document). """ return self._sphere.metric.exp(tangent_vector, base_point=base_point) def rand(self): return self._sphere.random_uniform() def randvec(self, base_point): random_point = gs.random.normal(size=self.dim + 1) random_tangent_vector = self.proj(base_point, random_point) return random_tangent_vector / gs.linalg.norm(random_tangent_vector) def zerovec(self, base_point): return gs.zeros_like(self.rand())
def test_tangent_spherical_and_extrinsic_inverse(self): dim = 2 n_samples = 5 sphere = Hypersphere(dim) points = gs.random.rand(n_samples, 2) * gs.pi * gs.array([1., 2. ])[None, :] tangent_spherical = gs.random.rand(n_samples, 2) tangent_extrinsic = sphere.tangent_spherical_to_extrinsic( tangent_spherical, points) result = sphere.tangent_extrinsic_to_spherical( tangent_extrinsic, base_point_spherical=points) self.assertAllClose(result, tangent_spherical) points_extrinsic = sphere.random_uniform(n_samples) vector = gs.random.rand(n_samples, dim + 1) tangent_extrinsic = sphere.to_tangent(vector, points_extrinsic) tangent_spherical = sphere.tangent_extrinsic_to_spherical( tangent_extrinsic, base_point=points_extrinsic) spherical = sphere.extrinsic_to_spherical(points_extrinsic) result = sphere.tangent_spherical_to_extrinsic(tangent_spherical, spherical) self.assertAllClose(result, tangent_extrinsic)
class TestVisualization(geomstats.tests.TestCase): def setUp(self): self.n_samples = 10 self.SO3_GROUP = SpecialOrthogonal(n=3, point_type='vector') self.SE3_GROUP = SpecialEuclidean(n=3, point_type='vector') self.S1 = Hypersphere(dim=1) self.S2 = Hypersphere(dim=2) self.H2 = Hyperbolic(dim=2) self.H2_half_plane = PoincareHalfSpace(dim=2) self.M32 = Matrices(m=3, n=2) self.S32 = PreShapeSpace(k_landmarks=3, m_ambient=2) self.KS = visualization.KendallSphere() self.M33 = Matrices(m=3, n=3) self.S33 = PreShapeSpace(k_landmarks=3, m_ambient=3) self.KD = visualization.KendallDisk() plt.figure() @staticmethod def test_tutorial_matplotlib(): visualization.tutorial_matplotlib() def test_plot_points_so3(self): points = self.SO3_GROUP.random_uniform(self.n_samples) visualization.plot(points, space='SO3_GROUP') def test_plot_points_se3(self): points = self.SE3_GROUP.random_point(self.n_samples) visualization.plot(points, space='SE3_GROUP') def test_draw_pre_shape_2d(self): self.KS.draw() def test_draw_points_pre_shape_2d(self): points = self.S32.random_point(self.n_samples) visualization.plot(points, space='S32') points = self.M32.random_point(self.n_samples) visualization.plot(points, space='M32') self.KS.clear_points() def test_draw_curve_pre_shape_2d(self): self.KS.draw() base_point = self.S32.random_point() vec = self.S32.random_point() tangent_vec = self.S32.to_tangent(vec, base_point) times = gs.linspace(0., 1., 1000) speeds = gs.array([-t * tangent_vec for t in times]) points = self.S32.ambient_metric.exp(speeds, base_point) self.KS.add_points(points) self.KS.draw_curve() self.KS.clear_points() def test_draw_vector_pre_shape_2d(self): self.KS.draw() base_point = self.S32.random_point() vec = self.S32.random_point() tangent_vec = self.S32.to_tangent(vec, base_point) self.KS.draw_vector(tangent_vec, base_point) def test_convert_to_spherical_coordinates_pre_shape_2d(self): points = self.S32.random_point(self.n_samples) coords = self.KS.convert_to_spherical_coordinates(points) x = coords[:, 0] y = coords[:, 1] z = coords[:, 2] result = x**2 + y**2 + z**2 expected = .25 * gs.ones(self.n_samples) self.assertAllClose(result, expected) def test_rotation_pre_shape_2d(self): theta = gs.random.rand(1)[0] phi = gs.random.rand(1)[0] rot = self.KS.rotation(theta, phi) result = _SpecialOrthogonalMatrices(3).belongs(rot) expected = True self.assertAllClose(result, expected) def test_draw_pre_shape_3d(self): self.KD.draw() def test_draw_points_pre_shape_3d(self): points = self.S33.random_point(self.n_samples) visualization.plot(points, space='S33') points = self.M33.random_point(self.n_samples) visualization.plot(points, space='M33') self.KD.clear_points() def test_draw_curve_pre_shape_3d(self): self.KD.draw() base_point = self.S33.random_point() vec = self.S33.random_point() tangent_vec = self.S33.to_tangent(vec, base_point) tangent_vec = .5 * tangent_vec / self.S33.ambient_metric.norm( tangent_vec) times = gs.linspace(0., 1., 1000) speeds = gs.array([-t * tangent_vec for t in times]) points = self.S33.ambient_metric.exp(speeds, base_point) self.KD.add_points(points) self.KD.draw_curve() self.KD.clear_points() def test_draw_vector_pre_shape_3d(self): self.KS.draw() base_point = self.S32.random_point() vec = self.S32.random_point() tangent_vec = self.S32.to_tangent(vec, base_point) self.KS.draw_vector(tangent_vec, base_point) def test_convert_to_planar_coordinates_pre_shape_3d(self): points = self.S33.random_point(self.n_samples) coords = self.KD.convert_to_planar_coordinates(points) x = coords[:, 0] y = coords[:, 1] radius = x**2 + y**2 result = [r <= 1. for r in radius] self.assertTrue(gs.all(result)) @geomstats.tests.np_and_pytorch_only def test_plot_points_s1(self): points = self.S1.random_uniform(self.n_samples) visualization.plot(points, space='S1') def test_plot_points_s2(self): points = self.S2.random_uniform(self.n_samples) visualization.plot(points, space='S2') def test_plot_points_h2_poincare_disk(self): points = self.H2.random_point(self.n_samples) visualization.plot(points, space='H2_poincare_disk') def test_plot_points_h2_poincare_half_plane_ext(self): points = self.H2.random_point(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane', point_type='extrinsic') def test_plot_points_h2_poincare_half_plane_none(self): points = self.H2_half_plane.random_point(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane') def test_plot_points_h2_poincare_half_plane_hs(self): points = self.H2_half_plane.random_point(self.n_samples) visualization.plot(points, space='H2_poincare_half_plane', point_type='half_space') def test_plot_points_h2_klein_disk(self): points = self.H2.random_point(self.n_samples) visualization.plot(points, space='H2_klein_disk') @staticmethod def test_plot_points_se2(): points = SpecialEuclidean(n=2, point_type='vector').random_point(4) visu = visualization.SpecialEuclidean2(points, point_type='vector') ax = visu.set_ax() visu.draw(ax)
class TestHypersphereMethods(geomstats.tests.TestCase): def setUp(self): gs.random.seed(1234) self.dimension = 4 self.space = Hypersphere(dimension=self.dimension) self.metric = self.space.metric self.n_samples = 10 @geomstats.tests.np_and_pytorch_only def test_random_uniform_and_belongs(self): """ Test that the random uniform method samples on the hypersphere space. """ n_samples = self.n_samples point = self.space.random_uniform(n_samples) result = self.space.belongs(point) expected = gs.array([[True]] * n_samples) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_random_uniform(self): point = self.space.random_uniform() self.assertAllClose(gs.shape(point), (1, self.dimension + 1)) def test_projection_and_belongs(self): point = gs.array([1., 2., 3., 4., 5.]) proj = self.space.projection(point) result = self.space.belongs(proj) expected = gs.array([[True]]) self.assertAllClose(expected, result) def test_intrinsic_and_extrinsic_coords(self): """ Test that the composition of intrinsic_to_extrinsic_coords and extrinsic_to_intrinsic_coords gives the identity. """ point_int = gs.array([.1, 0., 0., .1]) point_ext = self.space.intrinsic_to_extrinsic_coords(point_int) result = self.space.extrinsic_to_intrinsic_coords(point_ext) expected = point_int expected = helper.to_vector(expected) self.assertAllClose(result, expected) point_ext = (1. / (gs.sqrt(6.)) * gs.array([1., 0., 0., 1., 2.])) point_int = self.space.extrinsic_to_intrinsic_coords(point_ext) result = self.space.intrinsic_to_extrinsic_coords(point_int) expected = point_ext expected = helper.to_vector(expected) self.assertAllClose(result, expected) def test_intrinsic_and_extrinsic_coords_vectorization(self): """ Test that the composition of intrinsic_to_extrinsic_coords and extrinsic_to_intrinsic_coords gives the identity. """ point_int = gs.array([[.1, 0., 0., .1], [.1, .1, .1, .4], [.1, .3, 0., .1], [-0.1, .1, -.4, .1], [0., 0., .1, .1], [.1, .1, .1, .1]]) point_ext = self.space.intrinsic_to_extrinsic_coords(point_int) result = self.space.extrinsic_to_intrinsic_coords(point_ext) expected = point_int expected = helper.to_vector(expected) self.assertAllClose(result, expected) point_int = self.space.extrinsic_to_intrinsic_coords(point_ext) result = self.space.intrinsic_to_extrinsic_coords(point_int) expected = point_ext expected = helper.to_vector(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_log_and_exp_general_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Log then Riemannian Exp # General case base_point = gs.array([1., 2., 3., 4., 6.]) base_point = base_point / gs.linalg.norm(base_point) point = gs.array([0., 5., 6., 2., -1.]) point = point / gs.linalg.norm(point) log = self.metric.log(point=point, base_point=base_point) result = self.metric.exp(tangent_vec=log, base_point=base_point) expected = point expected = helper.to_vector(expected) self.assertAllClose(result, expected, atol=1e-6) @geomstats.tests.np_and_pytorch_only def test_log_and_exp_edge_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Log then Riemannian Exp # Edge case: two very close points, base_point_2 and point_2, # form an angle < epsilon base_point = gs.array([1., 2., 3., 4., 6.]) base_point = base_point / gs.linalg.norm(base_point) point = (base_point + 1e-12 * gs.array([-1., -2., 1., 1., .1])) point = point / gs.linalg.norm(point) log = self.metric.log(point=point, base_point=base_point) result = self.metric.exp(tangent_vec=log, base_point=base_point) expected = point expected = helper.to_vector(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_exp_vectorization(self): n_samples = self.n_samples dim = self.dimension + 1 one_vec = self.space.random_uniform() one_base_point = self.space.random_uniform() n_vecs = self.space.random_uniform(n_samples=n_samples) n_base_points = self.space.random_uniform(n_samples=n_samples) one_tangent_vec = self.space.projection_to_tangent_space( one_vec, base_point=one_base_point) result = self.metric.exp(one_tangent_vec, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) n_tangent_vecs = self.space.projection_to_tangent_space( n_vecs, base_point=one_base_point) result = self.metric.exp(n_tangent_vecs, one_base_point) self.assertAllClose(gs.shape(result), (n_samples, dim)) one_tangent_vec = self.space.projection_to_tangent_space( one_vec, base_point=n_base_points) result = self.metric.exp(one_tangent_vec, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) n_tangent_vecs = self.space.projection_to_tangent_space( n_vecs, base_point=n_base_points) result = self.metric.exp(n_tangent_vecs, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) @geomstats.tests.np_and_pytorch_only def test_log_vectorization(self): n_samples = self.n_samples dim = self.dimension + 1 one_base_point = self.space.random_uniform() one_point = self.space.random_uniform() n_points = self.space.random_uniform(n_samples=n_samples) n_base_points = self.space.random_uniform(n_samples=n_samples) result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) result = self.metric.log(n_points, one_base_point) self.assertAllClose(gs.shape(result), (n_samples, dim)) result = self.metric.log(one_point, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) result = self.metric.log(n_points, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) @geomstats.tests.np_and_pytorch_only def test_exp_and_log_and_projection_to_tangent_space_general_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # TODO(nina): Fix that this test fails, also in numpy # Riemannian Exp then Riemannian Log # General case # NB: Riemannian log gives a regularized tangent vector, # so we take the norm modulo 2 * pi. base_point = gs.array([0., -3., 0., 3., 4.]) base_point = base_point / gs.linalg.norm(base_point) vector = gs.array([9., 5., 0., 0., -1.]) vector = self.space.projection_to_tangent_space(vector=vector, base_point=base_point) # exp = self.metric.exp(tangent_vec=vector, base_point=base_point) # result = self.metric.log(point=exp, base_point=base_point) expected = vector norm_expected = gs.linalg.norm(expected) regularized_norm_expected = gs.mod(norm_expected, 2 * gs.pi) expected = expected / norm_expected * regularized_norm_expected expected = helper.to_vector(expected) @geomstats.tests.np_and_pytorch_only def test_exp_and_log_and_projection_to_tangent_space_edge_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Exp then Riemannian Log # Edge case: tangent vector has norm < epsilon base_point = gs.array([10., -2., -.5, 34., 3.]) base_point = base_point / gs.linalg.norm(base_point) vector = 1e-10 * gs.array([.06, -51., 6., 5., 3.]) vector = self.space.projection_to_tangent_space(vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=vector, base_point=base_point) result = self.metric.log(point=exp, base_point=base_point) expected = self.space.projection_to_tangent_space( vector=vector, base_point=base_point) expected = helper.to_vector(expected) self.assertAllClose(result, expected, atol=1e-8) def test_squared_norm_and_squared_dist(self): """ Test that the squared distance between two points is the squared norm of their logarithm. """ point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) log = self.metric.log(point=point_a, base_point=point_b) result = self.metric.squared_norm(vector=log) expected = self.metric.squared_dist(point_a, point_b) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_squared_dist_vectorization(self): n_samples = self.n_samples one_point_a = self.space.random_uniform() one_point_b = self.space.random_uniform() n_points_a = self.space.random_uniform(n_samples=n_samples) n_points_b = self.space.random_uniform(n_samples=n_samples) result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), (1, 1)) result = self.metric.squared_dist(n_points_a, one_point_b) self.assertAllClose(gs.shape(result), (n_samples, 1)) result = self.metric.squared_dist(one_point_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, 1)) result = self.metric.squared_dist(n_points_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, 1)) def test_norm_and_dist(self): """ Test that the distance between two points is the norm of their logarithm. """ point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) log = self.metric.log(point=point_a, base_point=point_b) result = self.metric.norm(vector=log) expected = self.metric.dist(point_a, point_b) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) def test_dist_point_and_itself(self): # Distance between a point and itself is 0 point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = point_a result = self.metric.dist(point_a, point_b) expected = 0. expected = helper.to_scalar(expected) self.assertAllClose(result, expected) def test_dist_orthogonal_points(self): # Distance between two orthogonal points is pi / 2. point_a = gs.array([10., -2., -.5, 0., 0.]) point_a = point_a / gs.linalg.norm(point_a) point_b = gs.array([2., 10, 0., 0., 0.]) point_b = point_b / gs.linalg.norm(point_b) result = gs.dot(point_a, point_b) result = helper.to_scalar(result) expected = 0 expected = helper.to_scalar(expected) self.assertAllClose(result, expected) result = self.metric.dist(point_a, point_b) expected = gs.pi / 2 expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_exp_and_dist_and_projection_to_tangent_space(self): base_point = gs.array([16., -2., -2.5, 84., 3.]) base_point = base_point / gs.linalg.norm(base_point) vector = gs.array([9., 0., -1., -2., 1.]) tangent_vec = self.space.projection_to_tangent_space( vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=tangent_vec, base_point=base_point) result = self.metric.dist(base_point, exp) expected = gs.linalg.norm(tangent_vec) % (2 * gs.pi) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_exp_and_dist_and_projection_to_tangent_space_vec(self): base_point = gs.array([[16., -2., -2.5, 84., 3.], [16., -2., -2.5, 84., 3.]]) base_single_point = gs.array([16., -2., -2.5, 84., 3.]) scalar_norm = gs.linalg.norm(base_single_point) base_point = base_point / scalar_norm vector = gs.array([[9., 0., -1., -2., 1.], [9., 0., -1., -2., 1]]) tangent_vec = self.space.projection_to_tangent_space( vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=tangent_vec, base_point=base_point) result = self.metric.dist(base_point, exp) expected = gs.linalg.norm(tangent_vec, axis=-1) % (2 * gs.pi) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_geodesic_and_belongs(self): n_geodesic_points = 100 initial_point = self.space.random_uniform() vector = gs.array([2., 0., -1., -2., 1.]) initial_tangent_vec = self.space.projection_to_tangent_space( vector=vector, base_point=initial_point) geodesic = self.metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) t = gs.linspace(start=0., stop=1., num=n_geodesic_points) points = geodesic(t) result = self.space.belongs(points) expected = gs.array(n_geodesic_points * [[True]]) self.assertAllClose(expected, result) def test_inner_product(self): tangent_vec_a = gs.array([1., 0., 0., 0., 0.]) tangent_vec_b = gs.array([0., 1., 0., 0., 0.]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = gs.array([[0.]]) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_variance(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.zeros((2, point.shape[0])) points[0, :] = point points[1, :] = point result = self.metric.variance(points) expected = helper.to_scalar(0.) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_mean(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.zeros((2, point.shape[0])) points[0, :] = point points[1, :] = point result = self.metric.mean(points) expected = helper.to_vector(point) self.assertAllClose(expected, result) @geomstats.tests.np_only def test_adaptive_gradientdescent_mean(self): n_tests = 100 result = gs.zeros(n_tests) expected = gs.zeros(n_tests) for i in range(n_tests): # take 2 random points, compute their mean, and verify that # log of each at the mean is opposite points = self.space.random_uniform(n_samples=2) mean = self.metric.adaptive_gradientdescent_mean(points) logs = self.metric.log(point=points, base_point=mean) result[i] = gs.linalg.norm(logs[1, :] + logs[0, :]) self.assertAllClose(expected, result, rtol=1e-10, atol=1e-10) @geomstats.tests.np_and_pytorch_only def test_mean_and_belongs(self): point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.zeros((2, point_a.shape[0])) points[0, :] = point_a points[1, :] = point_b mean = self.metric.mean(points) result = self.space.belongs(mean) expected = gs.array([[True]]) self.assertAllClose(result, expected) def test_diameter(self): dim = 2 sphere = Hypersphere(dim) point_a = gs.array([[0., 0., 1.]]) point_b = gs.array([[1., 0., 0.]]) point_c = gs.array([[0., 0., -1.]]) result = sphere.metric.diameter(gs.vstack((point_a, point_b, point_c))) expected = gs.pi self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_closest_neighbor_index(self): """ Check that the closest neighbor is one of neighbors. """ n_samples = 10 points = self.space.random_uniform(n_samples=n_samples) point = points[0, :] neighbors = points[1:, :] index = self.metric.closest_neighbor_index(point, neighbors) closest_neighbor = points[index, :] test = gs.sum(gs.all(points == closest_neighbor, axis=1)) result = test > 0 self.assertTrue(result) @geomstats.tests.np_and_pytorch_only def test_sample_von_mises_fisher(self): """ Check that the maximum likelihood estimates of the mean and concentration parameter are close to the real values. A first estimation of the concentration parameter is obtained by a closed-form expression and improved through the Newton method. """ dim = 2 n_points = 1000000 sphere = Hypersphere(dim) # check mean value for concentrated distribution kappa = 10000000 points = sphere.random_von_mises_fisher(kappa, n_points) sum_points = gs.sum(points, axis=0) mean = gs.array([0., 0., 1.]) mean_estimate = sum_points / gs.linalg.norm(sum_points) expected = mean result = mean_estimate self.assertTrue(gs.allclose(result, expected, atol=MEAN_ESTIMATION_TOL)) # check concentration parameter for dispersed distribution kappa = 1 points = sphere.random_von_mises_fisher(kappa, n_points) sum_points = gs.sum(points, axis=0) mean_norm = gs.linalg.norm(sum_points) / n_points kappa_estimate = (mean_norm * (dim + 1. - mean_norm**2) / (1. - mean_norm**2)) kappa_estimate = gs.cast(kappa_estimate, gs.float64) p = dim + 1 n_steps = 100 for i in range(n_steps): bessel_func_1 = scipy.special.iv(p / 2., kappa_estimate) bessel_func_2 = scipy.special.iv(p / 2. - 1., kappa_estimate) ratio = bessel_func_1 / bessel_func_2 denominator = 1. - ratio**2 - (p - 1.) * ratio / kappa_estimate mean_norm = gs.cast(mean_norm, gs.float64) kappa_estimate = kappa_estimate - (ratio - mean_norm) / denominator expected = kappa result = kappa_estimate self.assertTrue( gs.allclose(result, expected, atol=KAPPA_ESTIMATION_TOL)) @geomstats.tests.np_and_pytorch_only def test_spherical_to_extrinsic(self): """ Check vectorization of conversion from spherical to extrinsic coordinates on the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) result = sphere.spherical_to_extrinsic(points_spherical) expected = gs.array([[1., 0., 0.], [gs.sqrt(2) / 4, gs.sqrt(2) / 4, gs.sqrt(3) / 2]]) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_tangent_spherical_to_extrinsic(self): """ Check vectorization of conversion from spherical to extrinsic coordinates for tangent vectors to the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) base_points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 2, 0]]) tangent_vecs_spherical = gs.array([[0.25, 0.5], [0.3, 0.2]]) result = sphere.tangent_spherical_to_extrinsic(tangent_vecs_spherical, base_points_spherical) expected = gs.array([[0, 0.5, -0.25], [0, 0.2, -0.3]]) self.assertAllClose(result, expected) def test_christoffels_vectorization(self): """ Check vectorization of Christoffel symbols in spherical coordinates on the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) christoffel = sphere.metric.christoffels(points_spherical) result = christoffel.shape expected = gs.array([2, dim, dim, dim]) self.assertAllClose(result, expected)
class TestConnection(geomstats.tests.TestCase): def setup_method(self): warnings.simplefilter("ignore", category=UserWarning) gs.random.seed(0) self.dim = 4 self.euc_metric = EuclideanMetric(dim=self.dim) self.connection = Connection(dim=2) self.hypersphere = Hypersphere(dim=2) def test_metric_matrix(self): base_point = gs.array([0.0, 1.0, 0.0, 0.0]) result = self.euc_metric.metric_matrix(base_point) expected = gs.eye(self.dim) self.assertAllClose(result, expected) def test_parallel_transport(self): n_samples = 2 base_point = self.hypersphere.random_uniform(n_samples) tan_vec_a = self.hypersphere.to_tangent(gs.random.rand(n_samples, 3), base_point) tan_vec_b = self.hypersphere.to_tangent(gs.random.rand(n_samples, 3), base_point) expected = self.hypersphere.metric.parallel_transport( tan_vec_a, base_point, tan_vec_b) expected_point = self.hypersphere.metric.exp(tan_vec_b, base_point) base_point = gs.cast(base_point, gs.float64) base_point, tan_vec_a, tan_vec_b = gs.convert_to_wider_dtype( [base_point, tan_vec_a, tan_vec_b]) for step, alpha in zip(["pole", "schild"], [1, 2]): min_n = 1 if step == "pole" else 50 tol = 1e-5 if step == "pole" else 1e-2 for n_rungs in [min_n, 11]: ladder = self.hypersphere.metric.ladder_parallel_transport( tan_vec_a, base_point, tan_vec_b, n_rungs=n_rungs, scheme=step, alpha=alpha, ) result = ladder["transported_tangent_vec"] result_point = ladder["end_point"] self.assertAllClose(result, expected, rtol=tol, atol=tol) self.assertAllClose(result_point, expected_point) def test_parallel_transport_trajectory(self): n_samples = 2 for step in ["pole", "schild"]: n_steps = 1 if step == "pole" else 50 tol = 1e-6 if step == "pole" else 1e-2 base_point = self.hypersphere.random_uniform(n_samples) tan_vec_a = self.hypersphere.to_tangent( gs.random.rand(n_samples, 3), base_point) tan_vec_b = self.hypersphere.to_tangent( gs.random.rand(n_samples, 3), base_point) expected = self.hypersphere.metric.parallel_transport( tan_vec_a, base_point, tan_vec_b) expected_point = self.hypersphere.metric.exp(tan_vec_b, base_point) ladder = self.hypersphere.metric.ladder_parallel_transport( tan_vec_a, base_point, tan_vec_b, n_rungs=n_steps, scheme=step, return_geodesics=True, ) result = ladder["transported_tangent_vec"] result_point = ladder["end_point"] self.assertAllClose(result, expected, rtol=tol, atol=tol) self.assertAllClose(result_point, expected_point) def test_ladder_alpha(self): n_samples = 2 base_point = self.hypersphere.random_uniform(n_samples) tan_vec_a = self.hypersphere.to_tangent(gs.random.rand(n_samples, 3), base_point) tan_vec_b = self.hypersphere.to_tangent(gs.random.rand(n_samples, 3), base_point) with pytest.raises(ValueError): self.hypersphere.metric.ladder_parallel_transport( tan_vec_a, base_point, tan_vec_b, n_rungs=1, scheme="pole", alpha=0.5, return_geodesics=False, ) def test_exp_connection_metric(self): point = gs.array([gs.pi / 2, 0]) vector = gs.array([0.25, 0.5]) point_ext = self.hypersphere.spherical_to_extrinsic(point) vector_ext = self.hypersphere.tangent_spherical_to_extrinsic( vector, point) self.connection.christoffels = self.hypersphere.metric.christoffels expected = self.hypersphere.metric.exp(vector_ext, point_ext) result_spherical = self.connection.exp(vector, point, n_steps=50, step="rk4") result = self.hypersphere.spherical_to_extrinsic(result_spherical) self.assertAllClose(result, expected) def test_exp_connection_metric_vectorization(self): point = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) vector = gs.array([[0.25, 0.5], [0.30, 0.2]]) point_ext = self.hypersphere.spherical_to_extrinsic(point) vector_ext = self.hypersphere.tangent_spherical_to_extrinsic( vector, point) self.connection.christoffels = self.hypersphere.metric.christoffels expected = self.hypersphere.metric.exp(vector_ext, point_ext) result_spherical = self.connection.exp(vector, point, n_steps=50, step="rk4") result = self.hypersphere.spherical_to_extrinsic(result_spherical) self.assertAllClose(result, expected) @geomstats.tests.autograd_tf_and_torch_only def test_log_connection_metric(self): base_point = gs.array([gs.pi / 3, gs.pi / 4]) point = gs.array([1.0, gs.pi / 2]) self.connection.christoffels = self.hypersphere.metric.christoffels vector = self.connection.log(point=point, base_point=base_point, n_steps=75, step="rk4", tol=1e-10) result = self.hypersphere.tangent_spherical_to_extrinsic( vector, base_point) p_ext = self.hypersphere.spherical_to_extrinsic(base_point) q_ext = self.hypersphere.spherical_to_extrinsic(point) expected = self.hypersphere.metric.log(base_point=p_ext, point=q_ext) self.assertAllClose(result, expected) @geomstats.tests.autograd_tf_and_torch_only def test_log_connection_metric_vectorization(self): base_point = gs.array([[gs.pi / 3, gs.pi / 4], [gs.pi / 2, gs.pi / 4]]) point = gs.array([[1.0, gs.pi / 2], [gs.pi / 6, gs.pi / 3]]) self.connection.christoffels = self.hypersphere.metric.christoffels vector = self.connection.log(point=point, base_point=base_point, n_steps=75, step="rk4", tol=1e-10) result = self.hypersphere.tangent_spherical_to_extrinsic( vector, base_point) p_ext = self.hypersphere.spherical_to_extrinsic(base_point) q_ext = self.hypersphere.spherical_to_extrinsic(point) expected = self.hypersphere.metric.log(base_point=p_ext, point=q_ext) self.assertAllClose(result, expected, atol=1e-6) def test_geodesic_and_coincides_exp_hypersphere(self): n_geodesic_points = 10 initial_point = self.hypersphere.random_uniform(2) vector = gs.array([[2.0, 0.0, -1.0]] * 2) initial_tangent_vec = self.hypersphere.to_tangent( vector=vector, base_point=initial_point) geodesic = self.hypersphere.metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) t = gs.linspace(start=0.0, stop=1.0, num=n_geodesic_points) points = geodesic(t) result = points[:, -1] expected = self.hypersphere.metric.exp(vector, initial_point) self.assertAllClose(expected, result) initial_point = initial_point[0] initial_tangent_vec = initial_tangent_vec[0] geodesic = self.hypersphere.metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) points = geodesic(t) result = points[-1] expected = self.hypersphere.metric.exp(initial_tangent_vec, initial_point) self.assertAllClose(expected, result) def test_geodesic_and_coincides_exp_son(self): n_geodesic_points = 10 space = SpecialOrthogonal(n=4) initial_point = space.random_uniform(2) vector = gs.random.rand(2, 4, 4) initial_tangent_vec = space.to_tangent(vector=vector, base_point=initial_point) geodesic = space.bi_invariant_metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) t = gs.linspace(start=0.0, stop=1.0, num=n_geodesic_points) points = geodesic(t) result = points[:, -1] expected = space.bi_invariant_metric.exp(initial_tangent_vec, initial_point) self.assertAllClose(result, expected) initial_point = initial_point[0] initial_tangent_vec = initial_tangent_vec[0] geodesic = space.bi_invariant_metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) points = geodesic(t) result = points[-1] expected = space.bi_invariant_metric.exp(initial_tangent_vec, initial_point) self.assertAllClose(expected, result) def test_geodesic_invalid_initial_conditions(self): space = SpecialOrthogonal(n=4) initial_point = space.random_uniform(2) vector = gs.random.rand(2, 4, 4) initial_tangent_vec = space.to_tangent(vector=vector, base_point=initial_point) end_point = space.random_uniform(2) with pytest.raises(RuntimeError): space.bi_invariant_metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec, end_point=end_point, ) def test_geodesic_vectorization(self): space = Hypersphere(2) metric = space.metric initial_point = space.random_uniform(2) vector = gs.random.rand(2, 3) initial_tangent_vec = space.to_tangent(vector=vector, base_point=initial_point) end_point = space.random_uniform(2) time = gs.linspace(0, 1, 10) geo = metric.geodesic(initial_point, initial_tangent_vec) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected) geo = metric.geodesic(initial_point, end_point=end_point) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected) geo = metric.geodesic(initial_point, end_point=end_point[0]) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected) initial_tangent_vec = space.to_tangent(vector=vector, base_point=initial_point[0]) geo = metric.geodesic(initial_point[0], initial_tangent_vec) path = geo(time) result = path.shape expected = (2, 10, 3) self.assertAllClose(result, expected)
class TestToTangentSpace(geomstats.tests.TestCase): _multiprocess_can_split_ = True def setUp(self): gs.random.seed(123) self.sphere = Hypersphere(dim=4) self.hyperbolic = Hyperboloid(dim=3) self.euclidean = Euclidean(dim=2) self.minkowski = Minkowski(dim=2) self.so3 = SpecialOrthogonal(n=3, point_type='vector') self.so_matrix = SpecialOrthogonal(n=3, point_type='matrix') def test_estimate_transform_sphere(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.array([point, point]) transformer = ToTangentSpace(geometry=self.sphere) transformer.fit(X=points) result = transformer.transform(points) expected = gs.zeros_like(points) self.assertAllClose(expected, result) def test_inverse_transform_no_fit_sphere(self): point = self.sphere.random_uniform(3) base_point = point[0] point = point[1:] transformer = ToTangentSpace(geometry=self.sphere) X = transformer.transform(point, base_point=base_point) result = transformer.inverse_transform(X, base_point=base_point) expected = point self.assertAllClose(expected, result) @geomstats.tests.np_and_tf_only def test_estimate_transform_so_group(self): point = self.so_matrix.random_uniform() points = gs.array([point, point]) transformer = ToTangentSpace(geometry=self.so_matrix) transformer.fit(X=points) result = transformer.transform(points) expected = gs.zeros((2, 6)) self.assertAllClose(expected, result) def test_estimate_transform_spd(self): point = spd.SPDMatrices(3).random_uniform() points = gs.stack([point, point]) transformer = ToTangentSpace(geometry=spd.SPDMetricAffine(3)) transformer.fit(X=points) result = transformer.transform(points) expected = gs.zeros((2, 6)) self.assertAllClose(expected, result, atol=1e-5) def test_fit_transform_hyperbolic(self): point = gs.array([2., 1., 1., 1.]) points = gs.array([point, point]) transformer = ToTangentSpace(geometry=self.hyperbolic.metric) result = transformer.fit_transform(X=points) expected = gs.zeros_like(points) self.assertAllClose(expected, result) def test_inverse_transform_hyperbolic(self): points = self.hyperbolic.random_uniform(10) transformer = ToTangentSpace(geometry=self.hyperbolic.metric) X = transformer.fit_transform(X=points) result = transformer.inverse_transform(X) expected = points self.assertAllClose(expected, result) def test_inverse_transform_spd(self): point = spd.SPDMatrices(3).random_uniform(10) transformer = ToTangentSpace(geometry=spd.SPDMetricLogEuclidean(3)) X = transformer.fit_transform(X=point) result = transformer.inverse_transform(X) expected = point self.assertAllClose(expected, result, atol=1e-4) transformer = ToTangentSpace(geometry=spd.SPDMetricAffine(3)) X = transformer.fit_transform(X=point) result = transformer.inverse_transform(X) expected = point self.assertAllClose(expected, result, atol=1e-4) @geomstats.tests.np_only def test_inverse_transform_so(self): # FIXME: einsum vectorization error for invariant_metric log in tf point = self.so_matrix.random_uniform(10) transformer = ToTangentSpace( geometry=self.so_matrix.bi_invariant_metric) X = transformer.transform(X=point, base_point=self.so_matrix.identity) result = transformer.inverse_transform( X, base_point=self.so_matrix.identity) expected = point self.assertAllClose(expected, result)
class TestFrechetMean(geomstats.tests.TestCase): _multiprocess_can_split_ = True def setUp(self): gs.random.seed(123) self.sphere = Hypersphere(dim=4) self.hyperbolic = Hyperboloid(dim=3) self.euclidean = Euclidean(dim=2) self.minkowski = Minkowski(dim=2) self.so3 = SpecialOrthogonal(n=3, point_type='vector') self.so_matrix = SpecialOrthogonal(n=3) def test_logs_at_mean_default_gradient_descent_sphere(self): n_tests = 10 estimator = FrechetMean( metric=self.sphere.metric, method='default', lr=1.) result = [] for _ in range(n_tests): # take 2 random points, compute their mean, and verify that # log of each at the mean is opposite points = self.sphere.random_uniform(n_samples=2) estimator.fit(points) mean = estimator.estimate_ logs = self.sphere.metric.log(point=points, base_point=mean) result.append(gs.linalg.norm(logs[1, :] + logs[0, :])) result = gs.stack(result) expected = gs.zeros(n_tests) self.assertAllClose(expected, result) def test_logs_at_mean_adaptive_gradient_descent_sphere(self): n_tests = 10 estimator = FrechetMean(metric=self.sphere.metric, method='adaptive') result = [] for _ in range(n_tests): # take 2 random points, compute their mean, and verify that # log of each at the mean is opposite points = self.sphere.random_uniform(n_samples=2) estimator.fit(points) mean = estimator.estimate_ logs = self.sphere.metric.log(point=points, base_point=mean) result.append(gs.linalg.norm(logs[1, :] + logs[0, :])) result = gs.stack(result) expected = gs.zeros(n_tests) self.assertAllClose(expected, result) def test_estimate_shape_default_gradient_descent_sphere(self): dim = 5 point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.array([point_a, point_b]) mean = FrechetMean( metric=self.sphere.metric, method='default', verbose=True) mean.fit(points) result = mean.estimate_ self.assertAllClose(gs.shape(result), (dim,)) def test_estimate_shape_adaptive_gradient_descent_sphere(self): dim = 5 point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.array([point_a, point_b]) mean = FrechetMean(metric=self.sphere.metric, method='adaptive') mean.fit(points) result = mean.estimate_ self.assertAllClose(gs.shape(result), (dim,)) def test_estimate_and_belongs_default_gradient_descent_sphere(self): point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.array([point_a, point_b]) mean = FrechetMean(metric=self.sphere.metric, method='default') mean.fit(points) result = self.sphere.belongs(mean.estimate_) expected = True self.assertAllClose(result, expected) def test_estimate_default_gradient_descent_so3(self): points = self.so3.random_uniform(2) mean_vec = FrechetMean( metric=self.so3.bi_invariant_metric, method='default', lr=1.) mean_vec.fit(points) logs = self.so3.bi_invariant_metric.log(points, mean_vec.estimate_) result = gs.sum(logs, axis=0) expected = gs.zeros_like(points[0]) self.assertAllClose(result, expected) def test_estimate_and_belongs_default_gradient_descent_so3(self): point = self.so3.random_uniform(10) mean_vec = FrechetMean( metric=self.so3.bi_invariant_metric, method='default') mean_vec.fit(point) result = self.so3.belongs(mean_vec.estimate_) expected = True self.assertAllClose(result, expected) @geomstats.tests.np_and_tf_only def test_estimate_default_gradient_descent_so_matrix(self): points = self.so_matrix.random_uniform(2) mean_vec = FrechetMean( metric=self.so_matrix.bi_invariant_metric, method='default', lr=1.) mean_vec.fit(points) logs = self.so_matrix.bi_invariant_metric.log( points, mean_vec.estimate_) result = gs.sum(logs, axis=0) expected = gs.zeros_like(points[0]) self.assertAllClose(result, expected, atol=1e-5) @geomstats.tests.np_and_tf_only def test_estimate_and_belongs_default_gradient_descent_so_matrix(self): point = self.so_matrix.random_uniform(10) mean = FrechetMean( metric=self.so_matrix.bi_invariant_metric, method='default') mean.fit(point) result = self.so_matrix.belongs(mean.estimate_) expected = True self.assertAllClose(result, expected) @geomstats.tests.np_and_tf_only def test_estimate_and_belongs_adaptive_gradient_descent_so_matrix(self): point = self.so_matrix.random_uniform(10) mean = FrechetMean( metric=self.so_matrix.bi_invariant_metric, method='adaptive', verbose=True, lr=.5) mean.fit(point) result = self.so_matrix.belongs(mean.estimate_) self.assertTrue(result) @geomstats.tests.np_and_tf_only def test_estimate_and_coincide_default_so_vec_and_mat(self): point = self.so_matrix.random_uniform(3) mean = FrechetMean( metric=self.so_matrix.bi_invariant_metric, method='default') mean.fit(point) expected = mean.estimate_ mean_vec = FrechetMean( metric=self.so3.bi_invariant_metric, method='default') point_vec = self.so3.rotation_vector_from_matrix(point) mean_vec.fit(point_vec) result_vec = mean_vec.estimate_ result = self.so3.matrix_from_rotation_vector(result_vec) self.assertAllClose(result, expected) def test_estimate_and_belongs_adaptive_gradient_descent_sphere(self): point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.array([point_a, point_b]) mean = FrechetMean(metric=self.sphere.metric, method='adaptive') mean.fit(points) result = self.sphere.belongs(mean.estimate_) expected = True self.assertAllClose(result, expected) def test_variance_sphere(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.array([point, point]) result = variance( points, base_point=point, metric=self.sphere.metric) expected = gs.array(0.) self.assertAllClose(expected, result) def test_estimate_default_gradient_descent_sphere(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.array([point, point]) mean = FrechetMean(metric=self.sphere.metric, method='default') mean.fit(X=points) result = mean.estimate_ expected = point self.assertAllClose(expected, result) def test_estimate_adaptive_gradient_descent_sphere(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.array([point, point]) mean = FrechetMean(metric=self.sphere.metric, method='adaptive') mean.fit(X=points) result = mean.estimate_ expected = point self.assertAllClose(expected, result) def test_estimate_spd(self): point = SPDMatrices(3).random_point() points = gs.array([point, point]) mean = FrechetMean(metric=SPDMetricAffine(3), point_type='matrix') mean.fit(X=points) result = mean.estimate_ expected = point self.assertAllClose(expected, result) def test_variance_hyperbolic(self): point = gs.array([2., 1., 1., 1.]) points = gs.array([point, point]) result = variance( points, base_point=point, metric=self.hyperbolic.metric) expected = gs.array(0.) self.assertAllClose(result, expected) def test_estimate_hyperbolic(self): point = gs.array([2., 1., 1., 1.]) points = gs.array([point, point]) mean = FrechetMean(metric=self.hyperbolic.metric) mean.fit(X=points) expected = point result = mean.estimate_ self.assertAllClose(result, expected) def test_estimate_and_belongs_hyperbolic(self): point_a = self.hyperbolic.random_point() point_b = self.hyperbolic.random_point() point_c = self.hyperbolic.random_point() points = gs.stack([point_a, point_b, point_c], axis=0) mean = FrechetMean(metric=self.hyperbolic.metric) mean.fit(X=points) result = self.hyperbolic.belongs(mean.estimate_) expected = True self.assertAllClose(result, expected) def test_mean_euclidean_shape(self): dim = 2 point = gs.array([1., 4.]) mean = FrechetMean(metric=self.euclidean.metric) points = [point, point, point] mean.fit(points) result = mean.estimate_ self.assertAllClose(gs.shape(result), (dim,)) def test_mean_euclidean(self): point = gs.array([1., 4.]) mean = FrechetMean(metric=self.euclidean.metric) points = [point, point, point] mean.fit(points) result = mean.estimate_ expected = point self.assertAllClose(result, expected) points = gs.array([ [1., 2.], [2., 3.], [3., 4.], [4., 5.]]) weights = [1., 2., 1., 2.] mean = FrechetMean(metric=self.euclidean.metric) mean.fit(points, weights=weights) result = mean.estimate_ expected = gs.array([16. / 6., 22. / 6.]) self.assertAllClose(result, expected) def test_variance_euclidean(self): points = gs.array([ [1., 2.], [2., 3.], [3., 4.], [4., 5.]]) weights = gs.array([1., 2., 1., 2.]) base_point = gs.zeros(2) result = variance( points, weights=weights, base_point=base_point, metric=self.euclidean.metric) # we expect the average of the points' sq norms. expected = gs.array((1 * 5. + 2 * 13. + 1 * 25. + 2 * 41.) / 6.) self.assertAllClose(result, expected) def test_mean_matrices_shape(self): m, n = (2, 2) point = gs.array([ [1., 4.], [2., 3.]]) metric = MatricesMetric(m, n) mean = FrechetMean(metric=metric, point_type='matrix') points = [point, point, point] mean.fit(points) result = mean.estimate_ self.assertAllClose(gs.shape(result), (m, n)) def test_mean_matrices(self): m, n = (2, 2) point = gs.array([ [1., 4.], [2., 3.]]) metric = MatricesMetric(m, n) mean = FrechetMean(metric=metric, point_type='matrix') points = [point, point, point] mean.fit(points) result = mean.estimate_ expected = point self.assertAllClose(result, expected) def test_mean_minkowski_shape(self): dim = 2 point = gs.array([2., -math.sqrt(3)]) points = [point, point, point] mean = FrechetMean(metric=self.minkowski.metric) mean.fit(points) result = mean.estimate_ self.assertAllClose(gs.shape(result), (dim,)) def test_mean_minkowski(self): point = gs.array([2., -math.sqrt(3)]) points = [point, point, point] mean = FrechetMean(metric=self.minkowski.metric) mean.fit(points) result = mean.estimate_ expected = point self.assertAllClose(result, expected) points = gs.array([ [1., 0.], [2., math.sqrt(3)], [3., math.sqrt(8)], [4., math.sqrt(24)]]) weights = gs.array([1., 2., 1., 2.]) mean = FrechetMean(metric=self.minkowski.metric) mean.fit(points, weights=weights) result = mean.estimate_ result = self.minkowski.belongs(result) expected = gs.array(True) self.assertAllClose(result, expected) def test_variance_minkowski(self): points = gs.array([ [1., 0.], [2., math.sqrt(3)], [3., math.sqrt(8)], [4., math.sqrt(24)]]) weights = gs.array([1., 2., 1., 2.]) base_point = gs.array([-1., 0.]) var = variance( points, weights=weights, base_point=base_point, metric=self.minkowski.metric) result = var != 0 # we expect the average of the points' Minkowski sq norms. expected = True self.assertAllClose(result, expected) def test_one_point(self): point = gs.array([0., 0., 0., 0., 1.]) mean = FrechetMean(metric=self.sphere.metric, method='default') mean.fit(X=point) result = mean.estimate_ expected = point self.assertAllClose(expected, result) mean = FrechetMean( metric=self.sphere.metric, method='frechet-poincare-ball') mean.fit(X=point) result = mean.estimate_ expected = point self.assertAllClose(expected, result)
class TestFrechetMean(geomstats.tests.TestCase): _multiprocess_can_split_ = True def setUp(self): self.sphere = Hypersphere(dimension=4) self.hyperbolic = Hyperbolic(dimension=3) self.euclidean = Euclidean(dimension=2) self.minkowski = Minkowski(dimension=2) @geomstats.tests.np_only def test_adaptive_gradient_descent_sphere(self): n_tests = 100 result = gs.zeros(n_tests) expected = gs.zeros(n_tests) for i in range(n_tests): # take 2 random points, compute their mean, and verify that # log of each at the mean is opposite points = self.sphere.random_uniform(n_samples=2) mean = _adaptive_gradient_descent(points=points, metric=self.sphere.metric) logs = self.sphere.metric.log(point=points, base_point=mean) result[i] = gs.linalg.norm(logs[1, :] + logs[0, :]) self.assertAllClose(expected, result, rtol=1e-10, atol=1e-10) @geomstats.tests.np_and_pytorch_only def test_estimate_and_belongs_sphere(self): point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.zeros((2, point_a.shape[0])) points[0, :] = point_a points[1, :] = point_b mean = FrechetMean(metric=self.sphere.metric) mean.fit(points) result = self.sphere.belongs(mean.estimate_) expected = gs.array([[True]]) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_variance_sphere(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.zeros((2, point.shape[0])) points[0, :] = point points[1, :] = point result = variance(points, base_point=point, metric=self.sphere.metric) expected = helper.to_scalar(0.) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_estimate_sphere(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.zeros((2, point.shape[0])) points[0, :] = point points[1, :] = point mean = FrechetMean(metric=self.sphere.metric) mean.fit(X=points) result = mean.estimate_ expected = helper.to_vector(point) self.assertAllClose(expected, result) @geomstats.tests.np_and_tf_only def test_variance_hyperbolic(self): point = gs.array([2., 1., 1., 1.]) points = gs.array([point, point]) result = variance(points, base_point=point, metric=self.hyperbolic.metric) expected = helper.to_scalar(0.) self.assertAllClose(result, expected) @geomstats.tests.np_and_tf_only def test_estimate_hyperbolic(self): point = gs.array([2., 1., 1., 1.]) points = gs.array([point, point]) mean = FrechetMean(metric=self.hyperbolic.metric) mean.fit(X=points) result = mean.estimate_ expected = helper.to_vector(point) self.assertAllClose(result, expected) @geomstats.tests.np_and_tf_only def test_estimate_and_belongs_hyperbolic(self): point_a = self.hyperbolic.random_uniform() point_b = self.hyperbolic.random_uniform() point_c = self.hyperbolic.random_uniform() points = gs.concatenate([point_a, point_b, point_c], axis=0) mean = FrechetMean(metric=self.hyperbolic.metric) mean.fit(X=points) result = self.hyperbolic.belongs(mean.estimate_) expected = gs.array([[True]]) self.assertAllClose(result, expected) def test_mean_euclidean(self): point = gs.array([[1., 4.]]) mean = FrechetMean(metric=self.euclidean.metric) points = [point, point, point] mean.fit(points) result = mean.estimate_ expected = point expected = helper.to_vector(expected) self.assertAllClose(result, expected) points = gs.array([[1., 2.], [2., 3.], [3., 4.], [4., 5.]]) weights = gs.array([1., 2., 1., 2.]) mean = FrechetMean(metric=self.euclidean.metric) mean.fit(points, weights=weights) result = mean.estimate_ expected = gs.array([16. / 6., 22. / 6.]) expected = helper.to_vector(expected) self.assertAllClose(result, expected) def test_variance_euclidean(self): points = gs.array([[1., 2.], [2., 3.], [3., 4.], [4., 5.]]) weights = gs.array([1., 2., 1., 2.]) base_point = gs.zeros(2) result = variance(points, weights=weights, base_point=base_point, metric=self.euclidean.metric) # we expect the average of the points' sq norms. expected = (1 * 5. + 2 * 13. + 1 * 25. + 2 * 41.) / 6. expected = helper.to_scalar(expected) self.assertAllClose(result, expected) def test_mean_minkowski(self): point = gs.array([[2., -math.sqrt(3)]]) points = [point, point, point] mean = FrechetMean(metric=self.minkowski.metric) mean.fit(points) result = mean.estimate_ expected = point expected = helper.to_vector(expected) self.assertAllClose(result, expected) points = gs.array([[1., 0.], [2., math.sqrt(3)], [3., math.sqrt(8)], [4., math.sqrt(24)]]) weights = gs.array([1., 2., 1., 2.]) mean = FrechetMean(metric=self.minkowski.metric) mean.fit(points, weights=weights) result = mean.estimate_ result = self.minkowski.belongs(result) expected = gs.array([[True]]) self.assertAllClose(result, expected) def test_variance_minkowski(self): points = gs.array([[1., 0.], [2., math.sqrt(3)], [3., math.sqrt(8)], [4., math.sqrt(24)]]) weights = gs.array([1., 2., 1., 2.]) base_point = gs.array([-1., 0.]) var = variance(points, weights=weights, base_point=base_point, metric=self.minkowski.metric) result = helper.to_scalar(var != 0) # we expect the average of the points' Minkowski sq norms. expected = helper.to_scalar(gs.array([True])) self.assertAllClose(result, expected)