class TestGeodesicRegression(geomstats.tests.TestCase): _multiprocess_can_split_ = True def setup_method(self): gs.random.seed(1234) self.n_samples = 20 # Set up for euclidean self.dim_eucl = 3 self.shape_eucl = (self.dim_eucl, ) self.eucl = Euclidean(dim=self.dim_eucl) X = gs.random.rand(self.n_samples) self.X_eucl = X - gs.mean(X) self.intercept_eucl_true = self.eucl.random_point() self.coef_eucl_true = self.eucl.random_point() self.y_eucl = (self.intercept_eucl_true + self.X_eucl[:, None] * self.coef_eucl_true) self.param_eucl_true = gs.vstack( [self.intercept_eucl_true, self.coef_eucl_true]) self.param_eucl_guess = gs.vstack([ self.y_eucl[0], self.y_eucl[0] + gs.random.normal(size=self.shape_eucl) ]) # Set up for hypersphere self.dim_sphere = 4 self.shape_sphere = (self.dim_sphere + 1, ) self.sphere = Hypersphere(dim=self.dim_sphere) X = gs.random.rand(self.n_samples) self.X_sphere = X - gs.mean(X) self.intercept_sphere_true = self.sphere.random_point() self.coef_sphere_true = self.sphere.projection( gs.random.rand(self.dim_sphere + 1)) self.y_sphere = self.sphere.metric.exp( self.X_sphere[:, None] * self.coef_sphere_true, base_point=self.intercept_sphere_true, ) self.param_sphere_true = gs.vstack( [self.intercept_sphere_true, self.coef_sphere_true]) self.param_sphere_guess = gs.vstack([ self.y_sphere[0], self.sphere.to_tangent(gs.random.normal(size=self.shape_sphere), self.y_sphere[0]), ]) # Set up for special euclidean self.se2 = SpecialEuclidean(n=2) self.metric_se2 = self.se2.left_canonical_metric self.metric_se2.default_point_type = "matrix" self.shape_se2 = (3, 3) X = gs.random.rand(self.n_samples) self.X_se2 = X - gs.mean(X) self.intercept_se2_true = self.se2.random_point() self.coef_se2_true = self.se2.to_tangent( 5.0 * gs.random.rand(*self.shape_se2), self.intercept_se2_true) self.y_se2 = self.metric_se2.exp( self.X_se2[:, None, None] * self.coef_se2_true[None], self.intercept_se2_true, ) self.param_se2_true = gs.vstack([ gs.flatten(self.intercept_se2_true), gs.flatten(self.coef_se2_true), ]) self.param_se2_guess = gs.vstack([ gs.flatten(self.y_se2[0]), gs.flatten( self.se2.to_tangent(gs.random.normal(size=self.shape_se2), self.y_se2[0])), ]) # Set up for discrete curves n_sampling_points = 8 self.curves_2d = DiscreteCurves(R2) self.metric_curves_2d = self.curves_2d.srv_metric self.metric_curves_2d.default_point_type = "matrix" self.shape_curves_2d = (n_sampling_points, 2) X = gs.random.rand(self.n_samples) self.X_curves_2d = X - gs.mean(X) self.intercept_curves_2d_true = self.curves_2d.random_point( n_sampling_points=n_sampling_points) self.coef_curves_2d_true = self.curves_2d.to_tangent( 5.0 * gs.random.rand(*self.shape_curves_2d), self.intercept_curves_2d_true) # Added because of GitHub issue #1575 intercept_curves_2d_true_repeated = gs.tile( gs.expand_dims(self.intercept_curves_2d_true, axis=0), (self.n_samples, 1, 1), ) self.y_curves_2d = self.metric_curves_2d.exp( self.X_curves_2d[:, None, None] * self.coef_curves_2d_true[None], intercept_curves_2d_true_repeated, ) self.param_curves_2d_true = gs.vstack([ gs.flatten(self.intercept_curves_2d_true), gs.flatten(self.coef_curves_2d_true), ]) self.param_curves_2d_guess = gs.vstack([ gs.flatten(self.y_curves_2d[0]), gs.flatten( self.curves_2d.to_tangent( gs.random.normal(size=self.shape_curves_2d), self.y_curves_2d[0])), ]) def test_loss_euclidean(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss( self.X_eucl, self.y_eucl, self.param_eucl_true, self.shape_eucl, ) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) def test_loss_hypersphere(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss( self.X_sphere, self.y_sphere, self.param_sphere_true, self.shape_sphere, ) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) @geomstats.tests.autograd_and_tf_only def test_loss_se2(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss(self.X_se2, self.y_se2, self.param_se2_true, self.shape_se2) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) @geomstats.tests.autograd_only def test_loss_curves_2d(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.curves_2d, metric=self.metric_curves_2d, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss( self.X_curves_2d, self.y_curves_2d, self.param_curves_2d_true, self.shape_curves_2d, ) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) @geomstats.tests.autograd_tf_and_torch_only def test_value_and_grad_loss_euclidean(self): gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, regularization=0, ) def loss_of_param(param): return gr._loss(self.X_eucl, self.y_eucl, param, self.shape_eucl) # Without numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param) loss_value, loss_grad = objective_with_grad(self.param_eucl_guess) expected_grad_shape = (2, self.dim_eucl) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) # With numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) loss_value, loss_grad = objective_with_grad(self.param_eucl_guess) # Convert back to arrays/tensors loss_value = gs.array(loss_value) loss_grad = gs.array(loss_grad) expected_grad_shape = (2, self.dim_eucl) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) @geomstats.tests.autograd_tf_and_torch_only def test_value_and_grad_loss_hypersphere(self): gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, regularization=0, ) def loss_of_param(param): return gr._loss(self.X_sphere, self.y_sphere, param, self.shape_sphere) # Without numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param) loss_value, loss_grad = objective_with_grad(self.param_sphere_guess) expected_grad_shape = (2, self.dim_sphere + 1) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) # With numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) loss_value, loss_grad = objective_with_grad(self.param_sphere_guess) # Convert back to arrays/tensors loss_value = gs.array(loss_value) loss_grad = gs.array(loss_grad) expected_grad_shape = (2, self.dim_sphere + 1) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) @geomstats.tests.autograd_and_tf_only def test_value_and_grad_loss_se2(self): gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) def loss_of_param(param): return gr._loss(self.X_se2, self.y_se2, param, self.shape_se2) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param) loss_value, loss_grad = objective_with_grad(self.param_se2_true) expected_grad_shape = ( 2, self.shape_se2[0] * self.shape_se2[1], ) self.assertTrue(gs.isclose(loss_value, 0.0)) loss_value, loss_grad = objective_with_grad(self.param_se2_guess) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) loss_value, loss_grad = objective_with_grad(self.param_se2_guess) expected_grad_shape = ( 2, self.shape_se2[0] * self.shape_se2[1], ) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) @geomstats.tests.autograd_tf_and_torch_only def test_loss_minimization_extrinsic_euclidean(self): """Minimize loss from noiseless data.""" gr = GeodesicRegression(self.eucl, regularization=0) def loss_of_param(param): return gr._loss(self.X_eucl, self.y_eucl, param, self.shape_eucl) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) initial_guess = gs.flatten(self.param_eucl_guess) res = minimize( objective_with_grad, initial_guess, method="CG", jac=True, tol=10 * gs.atol, options={ "disp": True, "maxiter": 50 }, ) self.assertAllClose(gs.array(res.x).shape, (self.dim_eucl * 2, )) self.assertAllClose(res.fun, 0.0, atol=1000 * gs.atol) # Cast required because minimization happens in scipy in float64 param_hat = gs.cast(gs.array(res.x), self.param_eucl_true.dtype) intercept_hat, coef_hat = gs.split(param_hat, 2) coef_hat = self.eucl.to_tangent(coef_hat, intercept_hat) self.assertAllClose(intercept_hat, self.intercept_eucl_true) tangent_vec_of_transport = self.eucl.metric.log( self.intercept_eucl_true, base_point=intercept_hat) transported_coef_hat = self.eucl.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_eucl_true, atol=10 * gs.atol) @geomstats.tests.autograd_tf_and_torch_only def test_loss_minimization_extrinsic_hypersphere(self): """Minimize loss from noiseless data.""" gr = GeodesicRegression(self.sphere, regularization=0) def loss_of_param(param): return gr._loss(self.X_sphere, self.y_sphere, param, self.shape_sphere) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) initial_guess = gs.flatten(self.param_sphere_guess) res = minimize( objective_with_grad, initial_guess, method="CG", jac=True, tol=10 * gs.atol, options={ "disp": True, "maxiter": 50 }, ) self.assertAllClose( gs.array(res.x).shape, ((self.dim_sphere + 1) * 2, )) self.assertAllClose(res.fun, 0.0, atol=5e-3) # Cast required because minimization happens in scipy in float64 param_hat = gs.cast(gs.array(res.x), self.param_sphere_true.dtype) intercept_hat, coef_hat = gs.split(param_hat, 2) intercept_hat = self.sphere.projection(intercept_hat) coef_hat = self.sphere.to_tangent(coef_hat, intercept_hat) self.assertAllClose(intercept_hat, self.intercept_sphere_true, atol=5e-2) tangent_vec_of_transport = self.sphere.metric.log( self.intercept_sphere_true, base_point=intercept_hat) transported_coef_hat = self.sphere.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_sphere_true, atol=0.6) @geomstats.tests.autograd_and_tf_only def test_loss_minimization_extrinsic_se2(self): gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) def loss_of_param(param): return gr._loss(self.X_se2, self.y_se2, param, self.shape_se2) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) res = minimize( objective_with_grad, gs.flatten(self.param_se2_guess), method="CG", jac=True, options={ "disp": True, "maxiter": 50 }, ) self.assertAllClose(gs.array(res.x).shape, (18, )) self.assertAllClose(res.fun, 0.0, atol=1e-6) # Cast required because minimization happens in scipy in float64 param_hat = gs.cast(gs.array(res.x), self.param_se2_true.dtype) intercept_hat, coef_hat = gs.split(param_hat, 2) intercept_hat = gs.reshape(intercept_hat, self.shape_se2) coef_hat = gs.reshape(coef_hat, self.shape_se2) intercept_hat = self.se2.projection(intercept_hat) coef_hat = self.se2.to_tangent(coef_hat, intercept_hat) self.assertAllClose(intercept_hat, self.intercept_se2_true, atol=1e-4) tangent_vec_of_transport = self.se2.metric.log( self.intercept_se2_true, base_point=intercept_hat) transported_coef_hat = self.se2.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_se2_true, atol=0.6) @geomstats.tests.autograd_tf_and_torch_only def test_fit_extrinsic_euclidean(self): gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, initialization="random", regularization=0.9, ) gr.fit(self.X_eucl, self.y_eucl, compute_training_score=True) training_score = gr.training_score_ intercept_hat, coef_hat = gr.intercept_, gr.coef_ self.assertAllClose(intercept_hat.shape, self.shape_eucl) self.assertAllClose(coef_hat.shape, self.shape_eucl) self.assertAllClose(training_score, 1.0, atol=500 * gs.atol) self.assertAllClose(intercept_hat, self.intercept_eucl_true) tangent_vec_of_transport = self.eucl.metric.log( self.intercept_eucl_true, base_point=intercept_hat) transported_coef_hat = self.eucl.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_eucl_true) @geomstats.tests.autograd_tf_and_torch_only def test_fit_extrinsic_hypersphere(self): gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, initialization="random", regularization=0.9, ) gr.fit(self.X_sphere, self.y_sphere, compute_training_score=True) training_score = gr.training_score_ intercept_hat, coef_hat = gr.intercept_, gr.coef_ self.assertAllClose(intercept_hat.shape, self.shape_sphere) self.assertAllClose(coef_hat.shape, self.shape_sphere) self.assertAllClose(training_score, 1.0, atol=500 * gs.atol) self.assertAllClose(intercept_hat, self.intercept_sphere_true, atol=5e-3) tangent_vec_of_transport = self.sphere.metric.log( self.intercept_sphere_true, base_point=intercept_hat) transported_coef_hat = self.sphere.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_sphere_true, atol=0.6) @geomstats.tests.autograd_and_tf_only def test_fit_extrinsic_se2(self): gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, initialization="warm_start", ) gr.fit(self.X_se2, self.y_se2, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_se2) self.assertAllClose(coef_hat.shape, self.shape_se2) self.assertTrue(gs.isclose(training_score, 1.0)) self.assertAllClose(intercept_hat, self.intercept_se2_true, atol=1e-4) tangent_vec_of_transport = self.se2.metric.log( self.intercept_se2_true, base_point=intercept_hat) transported_coef_hat = self.se2.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_se2_true, atol=0.6) @geomstats.tests.autograd_tf_and_torch_only def test_fit_riemannian_euclidean(self): gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="riemannian", max_iter=50, init_step_size=0.1, verbose=True, ) gr.fit(self.X_eucl, self.y_eucl, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_eucl) self.assertAllClose(coef_hat.shape, self.shape_eucl) self.assertAllClose(training_score, 1.0, atol=0.1) self.assertAllClose(intercept_hat, self.intercept_eucl_true) tangent_vec_of_transport = self.eucl.metric.log( self.intercept_eucl_true, base_point=intercept_hat) transported_coef_hat = self.eucl.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_eucl_true, atol=1e-2) @geomstats.tests.autograd_tf_and_torch_only def test_fit_riemannian_hypersphere(self): gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="riemannian", max_iter=50, init_step_size=0.1, verbose=True, ) gr.fit(self.X_sphere, self.y_sphere, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_sphere) self.assertAllClose(coef_hat.shape, self.shape_sphere) self.assertAllClose(training_score, 1.0, atol=0.1) self.assertAllClose(intercept_hat, self.intercept_sphere_true, atol=1e-2) tangent_vec_of_transport = self.sphere.metric.log( self.intercept_sphere_true, base_point=intercept_hat) transported_coef_hat = self.sphere.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_sphere_true, atol=0.6) @geomstats.tests.autograd_and_tf_only def test_fit_riemannian_se2(self): init = (self.y_se2[0], gs.zeros_like(self.y_se2[0])) gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="riemannian", max_iter=50, init_step_size=0.1, verbose=True, initialization=init, ) gr.fit(self.X_se2, self.y_se2, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_se2) self.assertAllClose(coef_hat.shape, self.shape_se2) self.assertAllClose(training_score, 1.0, atol=1e-4) self.assertAllClose(intercept_hat, self.intercept_se2_true, atol=1e-4) tangent_vec_of_transport = self.se2.metric.log( self.intercept_se2_true, base_point=intercept_hat) transported_coef_hat = self.se2.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_se2_true, atol=0.6)
class TestHypersphereMethods(geomstats.tests.TestCase): def setUp(self): gs.random.seed(1234) self.dimension = 4 self.space = Hypersphere(dimension=self.dimension) self.metric = self.space.metric self.n_samples = 10 @geomstats.tests.np_and_pytorch_only def test_random_uniform_and_belongs(self): """ Test that the random uniform method samples on the hypersphere space. """ n_samples = self.n_samples point = self.space.random_uniform(n_samples) result = self.space.belongs(point) expected = gs.array([[True]] * n_samples) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_random_uniform(self): point = self.space.random_uniform() self.assertAllClose(gs.shape(point), (1, self.dimension + 1)) def test_projection_and_belongs(self): point = gs.array([1., 2., 3., 4., 5.]) proj = self.space.projection(point) result = self.space.belongs(proj) expected = gs.array([[True]]) self.assertAllClose(expected, result) def test_intrinsic_and_extrinsic_coords(self): """ Test that the composition of intrinsic_to_extrinsic_coords and extrinsic_to_intrinsic_coords gives the identity. """ point_int = gs.array([.1, 0., 0., .1]) point_ext = self.space.intrinsic_to_extrinsic_coords(point_int) result = self.space.extrinsic_to_intrinsic_coords(point_ext) expected = point_int expected = helper.to_vector(expected) self.assertAllClose(result, expected) point_ext = (1. / (gs.sqrt(6.)) * gs.array([1., 0., 0., 1., 2.])) point_int = self.space.extrinsic_to_intrinsic_coords(point_ext) result = self.space.intrinsic_to_extrinsic_coords(point_int) expected = point_ext expected = helper.to_vector(expected) self.assertAllClose(result, expected) def test_intrinsic_and_extrinsic_coords_vectorization(self): """ Test that the composition of intrinsic_to_extrinsic_coords and extrinsic_to_intrinsic_coords gives the identity. """ point_int = gs.array([[.1, 0., 0., .1], [.1, .1, .1, .4], [.1, .3, 0., .1], [-0.1, .1, -.4, .1], [0., 0., .1, .1], [.1, .1, .1, .1]]) point_ext = self.space.intrinsic_to_extrinsic_coords(point_int) result = self.space.extrinsic_to_intrinsic_coords(point_ext) expected = point_int expected = helper.to_vector(expected) self.assertAllClose(result, expected) point_int = self.space.extrinsic_to_intrinsic_coords(point_ext) result = self.space.intrinsic_to_extrinsic_coords(point_int) expected = point_ext expected = helper.to_vector(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_log_and_exp_general_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Log then Riemannian Exp # General case base_point = gs.array([1., 2., 3., 4., 6.]) base_point = base_point / gs.linalg.norm(base_point) point = gs.array([0., 5., 6., 2., -1.]) point = point / gs.linalg.norm(point) log = self.metric.log(point=point, base_point=base_point) result = self.metric.exp(tangent_vec=log, base_point=base_point) expected = point expected = helper.to_vector(expected) self.assertAllClose(result, expected, atol=1e-6) @geomstats.tests.np_and_pytorch_only def test_log_and_exp_edge_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Log then Riemannian Exp # Edge case: two very close points, base_point_2 and point_2, # form an angle < epsilon base_point = gs.array([1., 2., 3., 4., 6.]) base_point = base_point / gs.linalg.norm(base_point) point = (base_point + 1e-12 * gs.array([-1., -2., 1., 1., .1])) point = point / gs.linalg.norm(point) log = self.metric.log(point=point, base_point=base_point) result = self.metric.exp(tangent_vec=log, base_point=base_point) expected = point expected = helper.to_vector(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_exp_vectorization(self): n_samples = self.n_samples dim = self.dimension + 1 one_vec = self.space.random_uniform() one_base_point = self.space.random_uniform() n_vecs = self.space.random_uniform(n_samples=n_samples) n_base_points = self.space.random_uniform(n_samples=n_samples) one_tangent_vec = self.space.projection_to_tangent_space( one_vec, base_point=one_base_point) result = self.metric.exp(one_tangent_vec, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) n_tangent_vecs = self.space.projection_to_tangent_space( n_vecs, base_point=one_base_point) result = self.metric.exp(n_tangent_vecs, one_base_point) self.assertAllClose(gs.shape(result), (n_samples, dim)) one_tangent_vec = self.space.projection_to_tangent_space( one_vec, base_point=n_base_points) result = self.metric.exp(one_tangent_vec, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) n_tangent_vecs = self.space.projection_to_tangent_space( n_vecs, base_point=n_base_points) result = self.metric.exp(n_tangent_vecs, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) @geomstats.tests.np_and_pytorch_only def test_log_vectorization(self): n_samples = self.n_samples dim = self.dimension + 1 one_base_point = self.space.random_uniform() one_point = self.space.random_uniform() n_points = self.space.random_uniform(n_samples=n_samples) n_base_points = self.space.random_uniform(n_samples=n_samples) result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) result = self.metric.log(n_points, one_base_point) self.assertAllClose(gs.shape(result), (n_samples, dim)) result = self.metric.log(one_point, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) result = self.metric.log(n_points, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) @geomstats.tests.np_and_pytorch_only def test_exp_and_log_and_projection_to_tangent_space_general_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # TODO(nina): Fix that this test fails, also in numpy # Riemannian Exp then Riemannian Log # General case # NB: Riemannian log gives a regularized tangent vector, # so we take the norm modulo 2 * pi. base_point = gs.array([0., -3., 0., 3., 4.]) base_point = base_point / gs.linalg.norm(base_point) vector = gs.array([9., 5., 0., 0., -1.]) vector = self.space.projection_to_tangent_space(vector=vector, base_point=base_point) # exp = self.metric.exp(tangent_vec=vector, base_point=base_point) # result = self.metric.log(point=exp, base_point=base_point) expected = vector norm_expected = gs.linalg.norm(expected) regularized_norm_expected = gs.mod(norm_expected, 2 * gs.pi) expected = expected / norm_expected * regularized_norm_expected expected = helper.to_vector(expected) @geomstats.tests.np_and_pytorch_only def test_exp_and_log_and_projection_to_tangent_space_edge_case(self): """ Test that the riemannian exponential and the riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Exp then Riemannian Log # Edge case: tangent vector has norm < epsilon base_point = gs.array([10., -2., -.5, 34., 3.]) base_point = base_point / gs.linalg.norm(base_point) vector = 1e-10 * gs.array([.06, -51., 6., 5., 3.]) vector = self.space.projection_to_tangent_space(vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=vector, base_point=base_point) result = self.metric.log(point=exp, base_point=base_point) expected = self.space.projection_to_tangent_space( vector=vector, base_point=base_point) expected = helper.to_vector(expected) self.assertAllClose(result, expected, atol=1e-8) def test_squared_norm_and_squared_dist(self): """ Test that the squared distance between two points is the squared norm of their logarithm. """ point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) log = self.metric.log(point=point_a, base_point=point_b) result = self.metric.squared_norm(vector=log) expected = self.metric.squared_dist(point_a, point_b) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_squared_dist_vectorization(self): n_samples = self.n_samples one_point_a = self.space.random_uniform() one_point_b = self.space.random_uniform() n_points_a = self.space.random_uniform(n_samples=n_samples) n_points_b = self.space.random_uniform(n_samples=n_samples) result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), (1, 1)) result = self.metric.squared_dist(n_points_a, one_point_b) self.assertAllClose(gs.shape(result), (n_samples, 1)) result = self.metric.squared_dist(one_point_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, 1)) result = self.metric.squared_dist(n_points_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, 1)) def test_norm_and_dist(self): """ Test that the distance between two points is the norm of their logarithm. """ point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) log = self.metric.log(point=point_a, base_point=point_b) result = self.metric.norm(vector=log) expected = self.metric.dist(point_a, point_b) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) def test_dist_point_and_itself(self): # Distance between a point and itself is 0 point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = point_a result = self.metric.dist(point_a, point_b) expected = 0. expected = helper.to_scalar(expected) self.assertAllClose(result, expected) def test_dist_orthogonal_points(self): # Distance between two orthogonal points is pi / 2. point_a = gs.array([10., -2., -.5, 0., 0.]) point_a = point_a / gs.linalg.norm(point_a) point_b = gs.array([2., 10, 0., 0., 0.]) point_b = point_b / gs.linalg.norm(point_b) result = gs.dot(point_a, point_b) result = helper.to_scalar(result) expected = 0 expected = helper.to_scalar(expected) self.assertAllClose(result, expected) result = self.metric.dist(point_a, point_b) expected = gs.pi / 2 expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_exp_and_dist_and_projection_to_tangent_space(self): base_point = gs.array([16., -2., -2.5, 84., 3.]) base_point = base_point / gs.linalg.norm(base_point) vector = gs.array([9., 0., -1., -2., 1.]) tangent_vec = self.space.projection_to_tangent_space( vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=tangent_vec, base_point=base_point) result = self.metric.dist(base_point, exp) expected = gs.linalg.norm(tangent_vec) % (2 * gs.pi) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_exp_and_dist_and_projection_to_tangent_space_vec(self): base_point = gs.array([[16., -2., -2.5, 84., 3.], [16., -2., -2.5, 84., 3.]]) base_single_point = gs.array([16., -2., -2.5, 84., 3.]) scalar_norm = gs.linalg.norm(base_single_point) base_point = base_point / scalar_norm vector = gs.array([[9., 0., -1., -2., 1.], [9., 0., -1., -2., 1]]) tangent_vec = self.space.projection_to_tangent_space( vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=tangent_vec, base_point=base_point) result = self.metric.dist(base_point, exp) expected = gs.linalg.norm(tangent_vec, axis=-1) % (2 * gs.pi) expected = helper.to_scalar(expected) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_geodesic_and_belongs(self): n_geodesic_points = 100 initial_point = self.space.random_uniform() vector = gs.array([2., 0., -1., -2., 1.]) initial_tangent_vec = self.space.projection_to_tangent_space( vector=vector, base_point=initial_point) geodesic = self.metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) t = gs.linspace(start=0., stop=1., num=n_geodesic_points) points = geodesic(t) result = self.space.belongs(points) expected = gs.array(n_geodesic_points * [[True]]) self.assertAllClose(expected, result) def test_inner_product(self): tangent_vec_a = gs.array([1., 0., 0., 0., 0.]) tangent_vec_b = gs.array([0., 1., 0., 0., 0.]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = gs.array([[0.]]) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_variance(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.zeros((2, point.shape[0])) points[0, :] = point points[1, :] = point result = self.metric.variance(points) expected = helper.to_scalar(0.) self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_mean(self): point = gs.array([0., 0., 0., 0., 1.]) points = gs.zeros((2, point.shape[0])) points[0, :] = point points[1, :] = point result = self.metric.mean(points) expected = helper.to_vector(point) self.assertAllClose(expected, result) @geomstats.tests.np_only def test_adaptive_gradientdescent_mean(self): n_tests = 100 result = gs.zeros(n_tests) expected = gs.zeros(n_tests) for i in range(n_tests): # take 2 random points, compute their mean, and verify that # log of each at the mean is opposite points = self.space.random_uniform(n_samples=2) mean = self.metric.adaptive_gradientdescent_mean(points) logs = self.metric.log(point=points, base_point=mean) result[i] = gs.linalg.norm(logs[1, :] + logs[0, :]) self.assertAllClose(expected, result, rtol=1e-10, atol=1e-10) @geomstats.tests.np_and_pytorch_only def test_mean_and_belongs(self): point_a = gs.array([1., 0., 0., 0., 0.]) point_b = gs.array([0., 1., 0., 0., 0.]) points = gs.zeros((2, point_a.shape[0])) points[0, :] = point_a points[1, :] = point_b mean = self.metric.mean(points) result = self.space.belongs(mean) expected = gs.array([[True]]) self.assertAllClose(result, expected) def test_diameter(self): dim = 2 sphere = Hypersphere(dim) point_a = gs.array([[0., 0., 1.]]) point_b = gs.array([[1., 0., 0.]]) point_c = gs.array([[0., 0., -1.]]) result = sphere.metric.diameter(gs.vstack((point_a, point_b, point_c))) expected = gs.pi self.assertAllClose(expected, result) @geomstats.tests.np_and_pytorch_only def test_closest_neighbor_index(self): """ Check that the closest neighbor is one of neighbors. """ n_samples = 10 points = self.space.random_uniform(n_samples=n_samples) point = points[0, :] neighbors = points[1:, :] index = self.metric.closest_neighbor_index(point, neighbors) closest_neighbor = points[index, :] test = gs.sum(gs.all(points == closest_neighbor, axis=1)) result = test > 0 self.assertTrue(result) @geomstats.tests.np_and_pytorch_only def test_sample_von_mises_fisher(self): """ Check that the maximum likelihood estimates of the mean and concentration parameter are close to the real values. A first estimation of the concentration parameter is obtained by a closed-form expression and improved through the Newton method. """ dim = 2 n_points = 1000000 sphere = Hypersphere(dim) # check mean value for concentrated distribution kappa = 10000000 points = sphere.random_von_mises_fisher(kappa, n_points) sum_points = gs.sum(points, axis=0) mean = gs.array([0., 0., 1.]) mean_estimate = sum_points / gs.linalg.norm(sum_points) expected = mean result = mean_estimate self.assertTrue(gs.allclose(result, expected, atol=MEAN_ESTIMATION_TOL)) # check concentration parameter for dispersed distribution kappa = 1 points = sphere.random_von_mises_fisher(kappa, n_points) sum_points = gs.sum(points, axis=0) mean_norm = gs.linalg.norm(sum_points) / n_points kappa_estimate = (mean_norm * (dim + 1. - mean_norm**2) / (1. - mean_norm**2)) kappa_estimate = gs.cast(kappa_estimate, gs.float64) p = dim + 1 n_steps = 100 for i in range(n_steps): bessel_func_1 = scipy.special.iv(p / 2., kappa_estimate) bessel_func_2 = scipy.special.iv(p / 2. - 1., kappa_estimate) ratio = bessel_func_1 / bessel_func_2 denominator = 1. - ratio**2 - (p - 1.) * ratio / kappa_estimate mean_norm = gs.cast(mean_norm, gs.float64) kappa_estimate = kappa_estimate - (ratio - mean_norm) / denominator expected = kappa result = kappa_estimate self.assertTrue( gs.allclose(result, expected, atol=KAPPA_ESTIMATION_TOL)) @geomstats.tests.np_and_pytorch_only def test_spherical_to_extrinsic(self): """ Check vectorization of conversion from spherical to extrinsic coordinates on the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) result = sphere.spherical_to_extrinsic(points_spherical) expected = gs.array([[1., 0., 0.], [gs.sqrt(2) / 4, gs.sqrt(2) / 4, gs.sqrt(3) / 2]]) self.assertAllClose(result, expected) @geomstats.tests.np_and_pytorch_only def test_tangent_spherical_to_extrinsic(self): """ Check vectorization of conversion from spherical to extrinsic coordinates for tangent vectors to the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) base_points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 2, 0]]) tangent_vecs_spherical = gs.array([[0.25, 0.5], [0.3, 0.2]]) result = sphere.tangent_spherical_to_extrinsic(tangent_vecs_spherical, base_points_spherical) expected = gs.array([[0, 0.5, -0.25], [0, 0.2, -0.3]]) self.assertAllClose(result, expected) def test_christoffels_vectorization(self): """ Check vectorization of Christoffel symbols in spherical coordinates on the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) christoffel = sphere.metric.christoffels(points_spherical) result = christoffel.shape expected = gs.array([2, dim, dim, dim]) self.assertAllClose(result, expected)
class TestHypersphere(geomstats.tests.TestCase): def setUp(self): gs.random.seed(1234) self.dimension = 4 self.space = Hypersphere(dim=self.dimension) self.metric = self.space.metric self.n_samples = 10 def test_random_uniform_and_belongs(self): """Test random uniform and belongs. Test that the random uniform method samples on the hypersphere space. """ n_samples = self.n_samples point = self.space.random_uniform(n_samples) result = self.space.belongs(point) expected = gs.array([True] * n_samples) self.assertAllClose(expected, result) def test_random_uniform(self): point = self.space.random_uniform() self.assertAllClose(gs.shape(point), (self.dimension + 1, )) def test_replace_values(self): points = gs.ones((3, 5)) new_points = gs.zeros((2, 5)) indcs = [True, False, True] update = self.space._replace_values(points, new_points, indcs) self.assertAllClose(update, gs.stack([gs.zeros(5), gs.ones(5), gs.zeros(5)])) def test_projection_and_belongs(self): point = gs.array([1., 2., 3., 4., 5.]) proj = self.space.projection(point) result = self.space.belongs(proj) expected = True self.assertAllClose(expected, result) def test_intrinsic_and_extrinsic_coords(self): """ Test that the composition of intrinsic_to_extrinsic_coords and extrinsic_to_intrinsic_coords gives the identity. """ point_int = gs.array([.1, 0., 0., .1]) point_ext = self.space.intrinsic_to_extrinsic_coords(point_int) result = self.space.extrinsic_to_intrinsic_coords(point_ext) expected = point_int self.assertAllClose(result, expected) point_ext = (1. / (gs.sqrt(6.)) * gs.array([1., 0., 0., 1., 2.])) point_int = self.space.extrinsic_to_intrinsic_coords(point_ext) result = self.space.intrinsic_to_extrinsic_coords(point_int) expected = point_ext self.assertAllClose(result, expected) def test_intrinsic_and_extrinsic_coords_vectorization(self): """Test change of coordinates. Test that the composition of intrinsic_to_extrinsic_coords and extrinsic_to_intrinsic_coords gives the identity. """ point_int = gs.array([[.1, 0., 0., .1], [.1, .1, .1, .4], [.1, .3, 0., .1], [-0.1, .1, -.4, .1], [0., 0., .1, .1], [.1, .1, .1, .1]]) point_ext = self.space.intrinsic_to_extrinsic_coords(point_int) result = self.space.extrinsic_to_intrinsic_coords(point_ext) expected = point_int self.assertAllClose(result, expected) point_int = self.space.extrinsic_to_intrinsic_coords(point_ext) result = self.space.intrinsic_to_extrinsic_coords(point_int) expected = point_ext self.assertAllClose(result, expected) def test_log_and_exp_general_case(self): """Test Log and Exp. Test that the Riemannian exponential and the Riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Log then Riemannian Exp # General case base_point = gs.array([1., 2., 3., 4., 6.]) base_point = base_point / gs.linalg.norm(base_point) point = gs.array([0., 5., 6., 2., -1.]) point = point / gs.linalg.norm(point) log = self.metric.log(point=point, base_point=base_point) result = self.metric.exp(tangent_vec=log, base_point=base_point) expected = point self.assertAllClose(result, expected, atol=1e-6) def test_log_and_exp_edge_case(self): """Test Log and Exp. Test that the Riemannian exponential and the Riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Log then Riemannian Exp # Edge case: two very close points, base_point_2 and point_2, # form an angle < epsilon base_point = gs.array([1., 2., 3., 4., 6.]) base_point = base_point / gs.linalg.norm(base_point) point = (base_point + 1e-4 * gs.array([-1., -2., 1., 1., .1])) point = point / gs.linalg.norm(point) log = self.metric.log(point=point, base_point=base_point) result = self.metric.exp(tangent_vec=log, base_point=base_point) expected = point self.assertAllClose(result, expected) def test_exp_vectorization_single_samples(self): dim = self.dimension + 1 one_vec = self.space.random_uniform() one_base_point = self.space.random_uniform() one_tangent_vec = self.space.to_tangent(one_vec, base_point=one_base_point) result = self.metric.exp(one_tangent_vec, one_base_point) self.assertAllClose(gs.shape(result), (dim, )) one_base_point = gs.to_ndarray(one_base_point, to_ndim=2) result = self.metric.exp(one_tangent_vec, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) one_tangent_vec = gs.to_ndarray(one_tangent_vec, to_ndim=2) result = self.metric.exp(one_tangent_vec, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) one_base_point = self.space.random_uniform() result = self.metric.exp(one_tangent_vec, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) def test_exp_vectorization_n_samples(self): n_samples = self.n_samples dim = self.dimension + 1 one_vec = self.space.random_uniform() one_base_point = self.space.random_uniform() n_vecs = self.space.random_uniform(n_samples=n_samples) n_base_points = self.space.random_uniform(n_samples=n_samples) n_tangent_vecs = self.space.to_tangent(n_vecs, base_point=one_base_point) result = self.metric.exp(n_tangent_vecs, one_base_point) self.assertAllClose(gs.shape(result), (n_samples, dim)) one_tangent_vec = self.space.to_tangent(one_vec, base_point=n_base_points) result = self.metric.exp(one_tangent_vec, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) n_tangent_vecs = self.space.to_tangent(n_vecs, base_point=n_base_points) result = self.metric.exp(n_tangent_vecs, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) def test_log_vectorization_single_samples(self): dim = self.dimension + 1 one_base_point = self.space.random_uniform() one_point = self.space.random_uniform() result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (dim, )) one_base_point = gs.to_ndarray(one_base_point, to_ndim=2) result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) one_point = gs.to_ndarray(one_base_point, to_ndim=2) result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) one_base_point = self.space.random_uniform() result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (1, dim)) def test_log_vectorization_n_samples(self): n_samples = self.n_samples dim = self.dimension + 1 one_base_point = self.space.random_uniform() one_point = self.space.random_uniform() n_points = self.space.random_uniform(n_samples=n_samples) n_base_points = self.space.random_uniform(n_samples=n_samples) result = self.metric.log(one_point, one_base_point) self.assertAllClose(gs.shape(result), (dim, )) result = self.metric.log(n_points, one_base_point) self.assertAllClose(gs.shape(result), (n_samples, dim)) result = self.metric.log(one_point, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) result = self.metric.log(n_points, n_base_points) self.assertAllClose(gs.shape(result), (n_samples, dim)) def test_exp_log_are_inverse(self): initial_point = self.space.random_uniform(2) end_point = self.space.random_uniform(2) vec = self.space.metric.log(point=end_point, base_point=initial_point) result = self.space.metric.exp(vec, initial_point) self.assertAllClose(end_point, result) def test_log_extreme_case(self): initial_point = self.space.random_uniform(2) vec = 1e-4 * gs.random.rand(*initial_point.shape) vec = self.space.to_tangent(vec, initial_point) point = self.space.metric.exp(vec, base_point=initial_point) result = self.space.metric.log(point, initial_point) self.assertAllClose(vec, result) def test_exp_and_log_and_projection_to_tangent_space_general_case(self): """Test Log and Exp. Test that the Riemannian exponential and the Riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Exp then Riemannian Log # General case # NB: Riemannian log gives a regularized tangent vector, # so we take the norm modulo 2 * pi. base_point = gs.array([0., -3., 0., 3., 4.]) base_point = base_point / gs.linalg.norm(base_point) vector = gs.array([3., 2., 0., 0., -1.]) vector = self.space.to_tangent(vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=vector, base_point=base_point) result = self.metric.log(point=exp, base_point=base_point) expected = vector norm_expected = gs.linalg.norm(expected) regularized_norm_expected = gs.mod(norm_expected, 2 * gs.pi) expected = expected / norm_expected * regularized_norm_expected # The Log can be the opposite vector on the tangent space, # whose Exp gives the base_point are_close = gs.allclose(result, expected) norm_2pi = gs.isclose(gs.linalg.norm(result - expected), 2 * gs.pi) self.assertTrue(are_close or norm_2pi) def test_exp_and_log_and_projection_to_tangent_space_edge_case(self): """Test Log and Exp. Test that the Riemannian exponential and the Riemannian logarithm are inverse. Expect their composition to give the identity function. NB: points on the n-dimensional sphere are (n+1)-D vectors of norm 1. """ # Riemannian Exp then Riemannian Log # Edge case: tangent vector has norm < epsilon base_point = gs.array([10., -2., -.5, 34., 3.]) base_point = base_point / gs.linalg.norm(base_point) vector = 1e-4 * gs.array([.06, -51., 6., 5., 3.]) vector = self.space.to_tangent(vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=vector, base_point=base_point) result = self.metric.log(point=exp, base_point=base_point) self.assertAllClose(result, vector, atol=1e-7) def test_squared_norm_and_squared_dist(self): """ Test that the squared distance between two points is the squared norm of their logarithm. """ point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) log = self.metric.log(point=point_a, base_point=point_b) result = self.metric.squared_norm(vector=log) expected = self.metric.squared_dist(point_a, point_b) self.assertAllClose(result, expected) def test_squared_dist_vectorization_single_sample(self): one_point_a = self.space.random_uniform() one_point_b = self.space.random_uniform() result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), ()) one_point_a = gs.to_ndarray(one_point_a, to_ndim=2) result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), (1, )) one_point_b = gs.to_ndarray(one_point_b, to_ndim=2) result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), (1, )) one_point_a = self.space.random_uniform() result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), (1, )) def test_squared_dist_vectorization_n_samples(self): n_samples = self.n_samples one_point_a = self.space.random_uniform() one_point_b = self.space.random_uniform() n_points_a = self.space.random_uniform(n_samples=n_samples) n_points_b = self.space.random_uniform(n_samples=n_samples) result = self.metric.squared_dist(one_point_a, one_point_b) self.assertAllClose(gs.shape(result), ()) result = self.metric.squared_dist(n_points_a, one_point_b) self.assertAllClose(gs.shape(result), (n_samples, )) result = self.metric.squared_dist(one_point_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, )) result = self.metric.squared_dist(n_points_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, )) one_point_a = gs.to_ndarray(one_point_a, to_ndim=2) one_point_b = gs.to_ndarray(one_point_b, to_ndim=2) result = self.metric.squared_dist(n_points_a, one_point_b) self.assertAllClose(gs.shape(result), (n_samples, )) result = self.metric.squared_dist(one_point_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, )) result = self.metric.squared_dist(n_points_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, )) def test_norm_and_dist(self): """ Test that the distance between two points is the norm of their logarithm. """ point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) log = self.metric.log(point=point_a, base_point=point_b) self.assertAllClose(gs.shape(log), (5, )) result = self.metric.norm(vector=log) self.assertAllClose(gs.shape(result), ()) expected = self.metric.dist(point_a, point_b) self.assertAllClose(gs.shape(expected), ()) self.assertAllClose(result, expected) def test_dist_point_and_itself(self): # Distance between a point and itself is 0 point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = point_a result = self.metric.dist(point_a, point_b) expected = 0. self.assertAllClose(result, expected) def test_dist_pairwise(self): point_a = (1. / gs.sqrt(129.) * gs.array([10., -2., -5., 0., 0.])) point_b = (1. / gs.sqrt(435.) * gs.array([1., -20., -5., 0., 3.])) point = gs.array([point_a, point_b]) result = self.metric.dist_pairwise(point) expected = gs.array([[0., 1.24864502], [1.24864502, 0.]]) self.assertAllClose(result, expected, rtol=1e-3) def test_dist_pairwise_parallel(self): n_samples = 15 points = self.space.random_uniform(n_samples) result = self.metric.dist_pairwise(points, n_jobs=2, prefer='threads') is_sym = Matrices.is_symmetric(result) belongs = Matrices(n_samples, n_samples).belongs(result) self.assertTrue(is_sym) self.assertTrue(belongs) def test_dist_orthogonal_points(self): # Distance between two orthogonal points is pi / 2. point_a = gs.array([10., -2., -.5, 0., 0.]) point_a = point_a / gs.linalg.norm(point_a) point_b = gs.array([2., 10, 0., 0., 0.]) point_b = point_b / gs.linalg.norm(point_b) result = gs.dot(point_a, point_b) expected = 0 self.assertAllClose(result, expected) result = self.metric.dist(point_a, point_b) expected = gs.pi / 2 self.assertAllClose(result, expected) def test_exp_and_dist_and_projection_to_tangent_space(self): base_point = gs.array([16., -2., -2.5, 84., 3.]) base_point = base_point / gs.linalg.norm(base_point) vector = gs.array([9., 0., -1., -2., 1.]) tangent_vec = self.space.to_tangent(vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=tangent_vec, base_point=base_point) result = self.metric.dist(base_point, exp) expected = gs.linalg.norm(tangent_vec) % (2 * gs.pi) self.assertAllClose(result, expected) def test_exp_and_dist_and_projection_to_tangent_space_vec(self): base_point = gs.array([[16., -2., -2.5, 84., 3.], [16., -2., -2.5, 84., 3.]]) base_single_point = gs.array([16., -2., -2.5, 84., 3.]) scalar_norm = gs.linalg.norm(base_single_point) base_point = base_point / scalar_norm vector = gs.array([[9., 0., -1., -2., 1.], [9., 0., -1., -2., 1]]) tangent_vec = self.space.to_tangent(vector=vector, base_point=base_point) exp = self.metric.exp(tangent_vec=tangent_vec, base_point=base_point) result = self.metric.dist(base_point, exp) expected = gs.linalg.norm(tangent_vec, axis=-1) % (2 * gs.pi) self.assertAllClose(result, expected) def test_geodesic_and_belongs(self): n_geodesic_points = 10 initial_point = self.space.random_uniform(2) vector = gs.array([[2., 0., -1., -2., 1.]] * 2) initial_tangent_vec = self.space.to_tangent(vector=vector, base_point=initial_point) geodesic = self.metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) t = gs.linspace(start=0., stop=1., num=n_geodesic_points) points = geodesic(t) result = gs.stack([self.space.belongs(pt) for pt in points]) self.assertTrue(gs.all(result)) initial_point = initial_point[0] initial_tangent_vec = initial_tangent_vec[0] geodesic = self.metric.geodesic( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec) points = geodesic(t) result = self.space.belongs(points) expected = gs.array(n_geodesic_points * [True]) self.assertAllClose(expected, result) def test_geodesic_end_point(self): n_geodesic_points = 10 initial_point = self.space.random_uniform(4) geodesic = self.metric.geodesic(initial_point=initial_point[:2], end_point=initial_point[2:]) t = gs.linspace(start=0., stop=1., num=n_geodesic_points) points = geodesic(t) result = points[-1] expected = initial_point[2:] self.assertAllClose(expected, result) def test_inner_product(self): tangent_vec_a = gs.array([1., 0., 0., 0., 0.]) tangent_vec_b = gs.array([0., 1., 0., 0., 0.]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = 0. self.assertAllClose(expected, result) def test_inner_product_vectorization_single_samples(self): tangent_vec_a = gs.array([1., 0., 0., 0., 0.]) tangent_vec_b = gs.array([0., 1., 0., 0., 0.]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = 0. self.assertAllClose(expected, result) tangent_vec_a = gs.array([[1., 0., 0., 0., 0.]]) tangent_vec_b = gs.array([0., 1., 0., 0., 0.]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = gs.array([0.]) self.assertAllClose(expected, result) tangent_vec_a = gs.array([1., 0., 0., 0., 0.]) tangent_vec_b = gs.array([[0., 1., 0., 0., 0.]]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = gs.array([0.]) self.assertAllClose(expected, result) tangent_vec_a = gs.array([[1., 0., 0., 0., 0.]]) tangent_vec_b = gs.array([[0., 1., 0., 0., 0.]]) base_point = gs.array([0., 0., 0., 0., 1.]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = gs.array([0.]) self.assertAllClose(expected, result) tangent_vec_a = gs.array([[1., 0., 0., 0., 0.]]) tangent_vec_b = gs.array([[0., 1., 0., 0., 0.]]) base_point = gs.array([[0., 0., 0., 0., 1.]]) result = self.metric.inner_product(tangent_vec_a, tangent_vec_b, base_point) expected = gs.array([0.]) self.assertAllClose(expected, result) def test_diameter(self): dim = 2 sphere = Hypersphere(dim) point_a = gs.array([[0., 0., 1.]]) point_b = gs.array([[1., 0., 0.]]) point_c = gs.array([[0., 0., -1.]]) result = sphere.metric.diameter(gs.vstack((point_a, point_b, point_c))) expected = gs.pi self.assertAllClose(expected, result) def test_closest_neighbor_index(self): """Check that the closest neighbor is one of neighbors.""" n_samples = 10 points = self.space.random_uniform(n_samples=n_samples) point = points[0, :] neighbors = points[1:, :] index = self.metric.closest_neighbor_index(point, neighbors) closest_neighbor = points[index, :] test = gs.sum(gs.all(points == closest_neighbor, axis=1)) result = test > 0 self.assertTrue(result) def test_sample_von_mises_fisher_arbitrary_mean(self): """ Check that the maximum likelihood estimates of the mean and concentration parameter are close to the real values. A first estimation of the concentration parameter is obtained by a closed-form expression and improved through the Newton method. """ for dim in [2, 9]: n_points = 10000 sphere = Hypersphere(dim) # check mean value for concentrated distribution for different mean kappa = 1000. mean = sphere.random_uniform() points = sphere.random_von_mises_fisher(mu=mean, kappa=kappa, n_samples=n_points) sum_points = gs.sum(points, axis=0) result = sum_points / gs.linalg.norm(sum_points) expected = mean self.assertAllClose(result, expected, atol=MEAN_ESTIMATION_TOL) def test_random_von_mises_kappa(self): # check concentration parameter for dispersed distribution kappa = 1. n_points = 100000 for dim in [2, 9]: sphere = Hypersphere(dim) points = sphere.random_von_mises_fisher(kappa=kappa, n_samples=n_points) sum_points = gs.sum(points, axis=0) mean_norm = gs.linalg.norm(sum_points) / n_points kappa_estimate = (mean_norm * (dim + 1. - mean_norm**2) / (1. - mean_norm**2)) kappa_estimate = gs.cast(kappa_estimate, gs.float64) p = dim + 1 n_steps = 100 for _ in range(n_steps): bessel_func_1 = scipy.special.iv(p / 2., kappa_estimate) bessel_func_2 = scipy.special.iv(p / 2. - 1., kappa_estimate) ratio = bessel_func_1 / bessel_func_2 denominator = 1. - ratio**2 - (p - 1.) * ratio / kappa_estimate mean_norm = gs.cast(mean_norm, gs.float64) kappa_estimate = (kappa_estimate - (ratio - mean_norm) / denominator) result = kappa_estimate expected = kappa self.assertAllClose(result, expected, atol=KAPPA_ESTIMATION_TOL) def test_random_von_mises_general_dim_mean(self): for dim in [2, 9]: sphere = Hypersphere(dim) n_points = 100000 # check mean value for concentrated distribution kappa = 10 points = sphere.random_von_mises_fisher(kappa=kappa, n_samples=n_points) sum_points = gs.sum(points, axis=0) expected = gs.array([0.] * dim + [1.]) result = sum_points / gs.linalg.norm(sum_points) self.assertAllClose(result, expected, atol=KAPPA_ESTIMATION_TOL) def test_random_von_mises_one_sample_belongs(self): for dim in [2, 9]: sphere = Hypersphere(dim) point = sphere.random_von_mises_fisher() self.assertAllClose(point.shape, (dim + 1, )) result = sphere.belongs(point) self.assertTrue(result) def test_spherical_to_extrinsic(self): """ Check vectorization of conversion from spherical to extrinsic coordinates on the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([gs.pi / 2, 0]) result = sphere.spherical_to_extrinsic(points_spherical) expected = gs.array([1., 0., 0.]) self.assertAllClose(result, expected) def test_spherical_to_extrinsic_vectorization(self): dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) result = sphere.spherical_to_extrinsic(points_spherical) expected = gs.array( [[1., 0., 0.], [gs.sqrt(2.) / 4., gs.sqrt(2.) / 4., gs.sqrt(3.) / 2.]]) self.assertAllClose(result, expected) def test_tangent_spherical_to_extrinsic(self): """ Check vectorization of conversion from spherical to extrinsic coordinates for tangent vectors to the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) base_points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 2, 0]]) tangent_vecs_spherical = gs.array([[0.25, 0.5], [0.3, 0.2]]) result = sphere.tangent_spherical_to_extrinsic(tangent_vecs_spherical, base_points_spherical) expected = gs.array([[0, 0.5, -0.25], [0, 0.2, -0.3]]) self.assertAllClose(result, expected) def test_christoffels_vectorization(self): """ Check vectorization of Christoffel symbols in spherical coordinates on the 2-sphere. """ dim = 2 sphere = Hypersphere(dim) points_spherical = gs.array([[gs.pi / 2, 0], [gs.pi / 6, gs.pi / 4]]) christoffel = sphere.metric.christoffels(points_spherical) result = christoffel.shape expected = gs.array([2, dim, dim, dim]) self.assertAllClose(result, expected) @geomstats.tests.np_and_tf_only def test_parallel_transport_vectorization(self): sphere = Hypersphere(2) metric = sphere.metric shape = (4, 3) results = helper.test_parallel_transport(sphere, metric, shape) for res in results: self.assertTrue(res) def test_is_tangent(self): space = self.space vec = space.random_uniform() result = space.is_tangent(vec, vec) self.assertFalse(result) base_point = space.random_uniform() tangent_vec = space.to_tangent(vec, base_point) result = space.is_tangent(tangent_vec, base_point) self.assertTrue(result) base_point = space.random_uniform(2) vec = space.random_uniform(2) tangent_vec = space.to_tangent(vec, base_point) result = space.is_tangent(tangent_vec, base_point) self.assertAllClose(gs.shape(result), (2, )) self.assertTrue(gs.all(result)) def test_sectional_curvature(self): n_samples = 4 sphere = self.space base_point = sphere.random_uniform(n_samples) tan_vec_a = sphere.to_tangent( gs.random.rand(n_samples, sphere.dim + 1), base_point) tan_vec_b = sphere.to_tangent( gs.random.rand(n_samples, sphere.dim + 1), base_point) result = sphere.metric.sectional_curvature(tan_vec_a, tan_vec_b, base_point) expected = gs.ones(result.shape) self.assertAllClose(result, expected)