class TestGeodesicRegression(geomstats.tests.TestCase): _multiprocess_can_split_ = True def setup_method(self): gs.random.seed(1234) self.n_samples = 20 # Set up for euclidean self.dim_eucl = 3 self.shape_eucl = (self.dim_eucl, ) self.eucl = Euclidean(dim=self.dim_eucl) X = gs.random.rand(self.n_samples) self.X_eucl = X - gs.mean(X) self.intercept_eucl_true = self.eucl.random_point() self.coef_eucl_true = self.eucl.random_point() self.y_eucl = (self.intercept_eucl_true + self.X_eucl[:, None] * self.coef_eucl_true) self.param_eucl_true = gs.vstack( [self.intercept_eucl_true, self.coef_eucl_true]) self.param_eucl_guess = gs.vstack([ self.y_eucl[0], self.y_eucl[0] + gs.random.normal(size=self.shape_eucl) ]) # Set up for hypersphere self.dim_sphere = 4 self.shape_sphere = (self.dim_sphere + 1, ) self.sphere = Hypersphere(dim=self.dim_sphere) X = gs.random.rand(self.n_samples) self.X_sphere = X - gs.mean(X) self.intercept_sphere_true = self.sphere.random_point() self.coef_sphere_true = self.sphere.projection( gs.random.rand(self.dim_sphere + 1)) self.y_sphere = self.sphere.metric.exp( self.X_sphere[:, None] * self.coef_sphere_true, base_point=self.intercept_sphere_true, ) self.param_sphere_true = gs.vstack( [self.intercept_sphere_true, self.coef_sphere_true]) self.param_sphere_guess = gs.vstack([ self.y_sphere[0], self.sphere.to_tangent(gs.random.normal(size=self.shape_sphere), self.y_sphere[0]), ]) # Set up for special euclidean self.se2 = SpecialEuclidean(n=2) self.metric_se2 = self.se2.left_canonical_metric self.metric_se2.default_point_type = "matrix" self.shape_se2 = (3, 3) X = gs.random.rand(self.n_samples) self.X_se2 = X - gs.mean(X) self.intercept_se2_true = self.se2.random_point() self.coef_se2_true = self.se2.to_tangent( 5.0 * gs.random.rand(*self.shape_se2), self.intercept_se2_true) self.y_se2 = self.metric_se2.exp( self.X_se2[:, None, None] * self.coef_se2_true[None], self.intercept_se2_true, ) self.param_se2_true = gs.vstack([ gs.flatten(self.intercept_se2_true), gs.flatten(self.coef_se2_true), ]) self.param_se2_guess = gs.vstack([ gs.flatten(self.y_se2[0]), gs.flatten( self.se2.to_tangent(gs.random.normal(size=self.shape_se2), self.y_se2[0])), ]) # Set up for discrete curves n_sampling_points = 8 self.curves_2d = DiscreteCurves(R2) self.metric_curves_2d = self.curves_2d.srv_metric self.metric_curves_2d.default_point_type = "matrix" self.shape_curves_2d = (n_sampling_points, 2) X = gs.random.rand(self.n_samples) self.X_curves_2d = X - gs.mean(X) self.intercept_curves_2d_true = self.curves_2d.random_point( n_sampling_points=n_sampling_points) self.coef_curves_2d_true = self.curves_2d.to_tangent( 5.0 * gs.random.rand(*self.shape_curves_2d), self.intercept_curves_2d_true) # Added because of GitHub issue #1575 intercept_curves_2d_true_repeated = gs.tile( gs.expand_dims(self.intercept_curves_2d_true, axis=0), (self.n_samples, 1, 1), ) self.y_curves_2d = self.metric_curves_2d.exp( self.X_curves_2d[:, None, None] * self.coef_curves_2d_true[None], intercept_curves_2d_true_repeated, ) self.param_curves_2d_true = gs.vstack([ gs.flatten(self.intercept_curves_2d_true), gs.flatten(self.coef_curves_2d_true), ]) self.param_curves_2d_guess = gs.vstack([ gs.flatten(self.y_curves_2d[0]), gs.flatten( self.curves_2d.to_tangent( gs.random.normal(size=self.shape_curves_2d), self.y_curves_2d[0])), ]) def test_loss_euclidean(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss( self.X_eucl, self.y_eucl, self.param_eucl_true, self.shape_eucl, ) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) def test_loss_hypersphere(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss( self.X_sphere, self.y_sphere, self.param_sphere_true, self.shape_sphere, ) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) @geomstats.tests.autograd_and_tf_only def test_loss_se2(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss(self.X_se2, self.y_se2, self.param_se2_true, self.shape_se2) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) @geomstats.tests.autograd_only def test_loss_curves_2d(self): """Test that the loss is 0 at the true parameters.""" gr = GeodesicRegression( self.curves_2d, metric=self.metric_curves_2d, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) loss = gr._loss( self.X_curves_2d, self.y_curves_2d, self.param_curves_2d_true, self.shape_curves_2d, ) self.assertAllClose(loss.shape, ()) self.assertTrue(gs.isclose(loss, 0.0)) @geomstats.tests.autograd_tf_and_torch_only def test_value_and_grad_loss_euclidean(self): gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, regularization=0, ) def loss_of_param(param): return gr._loss(self.X_eucl, self.y_eucl, param, self.shape_eucl) # Without numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param) loss_value, loss_grad = objective_with_grad(self.param_eucl_guess) expected_grad_shape = (2, self.dim_eucl) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) # With numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) loss_value, loss_grad = objective_with_grad(self.param_eucl_guess) # Convert back to arrays/tensors loss_value = gs.array(loss_value) loss_grad = gs.array(loss_grad) expected_grad_shape = (2, self.dim_eucl) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) @geomstats.tests.autograd_tf_and_torch_only def test_value_and_grad_loss_hypersphere(self): gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, regularization=0, ) def loss_of_param(param): return gr._loss(self.X_sphere, self.y_sphere, param, self.shape_sphere) # Without numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param) loss_value, loss_grad = objective_with_grad(self.param_sphere_guess) expected_grad_shape = (2, self.dim_sphere + 1) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) # With numpy conversion objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) loss_value, loss_grad = objective_with_grad(self.param_sphere_guess) # Convert back to arrays/tensors loss_value = gs.array(loss_value) loss_grad = gs.array(loss_grad) expected_grad_shape = (2, self.dim_sphere + 1) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) @geomstats.tests.autograd_and_tf_only def test_value_and_grad_loss_se2(self): gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) def loss_of_param(param): return gr._loss(self.X_se2, self.y_se2, param, self.shape_se2) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param) loss_value, loss_grad = objective_with_grad(self.param_se2_true) expected_grad_shape = ( 2, self.shape_se2[0] * self.shape_se2[1], ) self.assertTrue(gs.isclose(loss_value, 0.0)) loss_value, loss_grad = objective_with_grad(self.param_se2_guess) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) loss_value, loss_grad = objective_with_grad(self.param_se2_guess) expected_grad_shape = ( 2, self.shape_se2[0] * self.shape_se2[1], ) self.assertAllClose(loss_value.shape, ()) self.assertAllClose(loss_grad.shape, expected_grad_shape) self.assertFalse(gs.isclose(loss_value, 0.0)) self.assertFalse(gs.isnan(loss_value)) self.assertFalse( gs.all(gs.isclose(loss_grad, gs.zeros(expected_grad_shape)))) self.assertTrue(gs.all(~gs.isnan(loss_grad))) @geomstats.tests.autograd_tf_and_torch_only def test_loss_minimization_extrinsic_euclidean(self): """Minimize loss from noiseless data.""" gr = GeodesicRegression(self.eucl, regularization=0) def loss_of_param(param): return gr._loss(self.X_eucl, self.y_eucl, param, self.shape_eucl) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) initial_guess = gs.flatten(self.param_eucl_guess) res = minimize( objective_with_grad, initial_guess, method="CG", jac=True, tol=10 * gs.atol, options={ "disp": True, "maxiter": 50 }, ) self.assertAllClose(gs.array(res.x).shape, (self.dim_eucl * 2, )) self.assertAllClose(res.fun, 0.0, atol=1000 * gs.atol) # Cast required because minimization happens in scipy in float64 param_hat = gs.cast(gs.array(res.x), self.param_eucl_true.dtype) intercept_hat, coef_hat = gs.split(param_hat, 2) coef_hat = self.eucl.to_tangent(coef_hat, intercept_hat) self.assertAllClose(intercept_hat, self.intercept_eucl_true) tangent_vec_of_transport = self.eucl.metric.log( self.intercept_eucl_true, base_point=intercept_hat) transported_coef_hat = self.eucl.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_eucl_true, atol=10 * gs.atol) @geomstats.tests.autograd_tf_and_torch_only def test_loss_minimization_extrinsic_hypersphere(self): """Minimize loss from noiseless data.""" gr = GeodesicRegression(self.sphere, regularization=0) def loss_of_param(param): return gr._loss(self.X_sphere, self.y_sphere, param, self.shape_sphere) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) initial_guess = gs.flatten(self.param_sphere_guess) res = minimize( objective_with_grad, initial_guess, method="CG", jac=True, tol=10 * gs.atol, options={ "disp": True, "maxiter": 50 }, ) self.assertAllClose( gs.array(res.x).shape, ((self.dim_sphere + 1) * 2, )) self.assertAllClose(res.fun, 0.0, atol=5e-3) # Cast required because minimization happens in scipy in float64 param_hat = gs.cast(gs.array(res.x), self.param_sphere_true.dtype) intercept_hat, coef_hat = gs.split(param_hat, 2) intercept_hat = self.sphere.projection(intercept_hat) coef_hat = self.sphere.to_tangent(coef_hat, intercept_hat) self.assertAllClose(intercept_hat, self.intercept_sphere_true, atol=5e-2) tangent_vec_of_transport = self.sphere.metric.log( self.intercept_sphere_true, base_point=intercept_hat) transported_coef_hat = self.sphere.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_sphere_true, atol=0.6) @geomstats.tests.autograd_and_tf_only def test_loss_minimization_extrinsic_se2(self): gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, ) def loss_of_param(param): return gr._loss(self.X_se2, self.y_se2, param, self.shape_se2) objective_with_grad = gs.autodiff.value_and_grad(loss_of_param, to_numpy=True) res = minimize( objective_with_grad, gs.flatten(self.param_se2_guess), method="CG", jac=True, options={ "disp": True, "maxiter": 50 }, ) self.assertAllClose(gs.array(res.x).shape, (18, )) self.assertAllClose(res.fun, 0.0, atol=1e-6) # Cast required because minimization happens in scipy in float64 param_hat = gs.cast(gs.array(res.x), self.param_se2_true.dtype) intercept_hat, coef_hat = gs.split(param_hat, 2) intercept_hat = gs.reshape(intercept_hat, self.shape_se2) coef_hat = gs.reshape(coef_hat, self.shape_se2) intercept_hat = self.se2.projection(intercept_hat) coef_hat = self.se2.to_tangent(coef_hat, intercept_hat) self.assertAllClose(intercept_hat, self.intercept_se2_true, atol=1e-4) tangent_vec_of_transport = self.se2.metric.log( self.intercept_se2_true, base_point=intercept_hat) transported_coef_hat = self.se2.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_se2_true, atol=0.6) @geomstats.tests.autograd_tf_and_torch_only def test_fit_extrinsic_euclidean(self): gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, initialization="random", regularization=0.9, ) gr.fit(self.X_eucl, self.y_eucl, compute_training_score=True) training_score = gr.training_score_ intercept_hat, coef_hat = gr.intercept_, gr.coef_ self.assertAllClose(intercept_hat.shape, self.shape_eucl) self.assertAllClose(coef_hat.shape, self.shape_eucl) self.assertAllClose(training_score, 1.0, atol=500 * gs.atol) self.assertAllClose(intercept_hat, self.intercept_eucl_true) tangent_vec_of_transport = self.eucl.metric.log( self.intercept_eucl_true, base_point=intercept_hat) transported_coef_hat = self.eucl.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_eucl_true) @geomstats.tests.autograd_tf_and_torch_only def test_fit_extrinsic_hypersphere(self): gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, initialization="random", regularization=0.9, ) gr.fit(self.X_sphere, self.y_sphere, compute_training_score=True) training_score = gr.training_score_ intercept_hat, coef_hat = gr.intercept_, gr.coef_ self.assertAllClose(intercept_hat.shape, self.shape_sphere) self.assertAllClose(coef_hat.shape, self.shape_sphere) self.assertAllClose(training_score, 1.0, atol=500 * gs.atol) self.assertAllClose(intercept_hat, self.intercept_sphere_true, atol=5e-3) tangent_vec_of_transport = self.sphere.metric.log( self.intercept_sphere_true, base_point=intercept_hat) transported_coef_hat = self.sphere.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_sphere_true, atol=0.6) @geomstats.tests.autograd_and_tf_only def test_fit_extrinsic_se2(self): gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="extrinsic", max_iter=50, init_step_size=0.1, verbose=True, initialization="warm_start", ) gr.fit(self.X_se2, self.y_se2, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_se2) self.assertAllClose(coef_hat.shape, self.shape_se2) self.assertTrue(gs.isclose(training_score, 1.0)) self.assertAllClose(intercept_hat, self.intercept_se2_true, atol=1e-4) tangent_vec_of_transport = self.se2.metric.log( self.intercept_se2_true, base_point=intercept_hat) transported_coef_hat = self.se2.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_se2_true, atol=0.6) @geomstats.tests.autograd_tf_and_torch_only def test_fit_riemannian_euclidean(self): gr = GeodesicRegression( self.eucl, metric=self.eucl.metric, center_X=False, method="riemannian", max_iter=50, init_step_size=0.1, verbose=True, ) gr.fit(self.X_eucl, self.y_eucl, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_eucl) self.assertAllClose(coef_hat.shape, self.shape_eucl) self.assertAllClose(training_score, 1.0, atol=0.1) self.assertAllClose(intercept_hat, self.intercept_eucl_true) tangent_vec_of_transport = self.eucl.metric.log( self.intercept_eucl_true, base_point=intercept_hat) transported_coef_hat = self.eucl.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_eucl_true, atol=1e-2) @geomstats.tests.autograd_tf_and_torch_only def test_fit_riemannian_hypersphere(self): gr = GeodesicRegression( self.sphere, metric=self.sphere.metric, center_X=False, method="riemannian", max_iter=50, init_step_size=0.1, verbose=True, ) gr.fit(self.X_sphere, self.y_sphere, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_sphere) self.assertAllClose(coef_hat.shape, self.shape_sphere) self.assertAllClose(training_score, 1.0, atol=0.1) self.assertAllClose(intercept_hat, self.intercept_sphere_true, atol=1e-2) tangent_vec_of_transport = self.sphere.metric.log( self.intercept_sphere_true, base_point=intercept_hat) transported_coef_hat = self.sphere.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_sphere_true, atol=0.6) @geomstats.tests.autograd_and_tf_only def test_fit_riemannian_se2(self): init = (self.y_se2[0], gs.zeros_like(self.y_se2[0])) gr = GeodesicRegression( self.se2, metric=self.metric_se2, center_X=False, method="riemannian", max_iter=50, init_step_size=0.1, verbose=True, initialization=init, ) gr.fit(self.X_se2, self.y_se2, compute_training_score=True) intercept_hat, coef_hat = gr.intercept_, gr.coef_ training_score = gr.training_score_ self.assertAllClose(intercept_hat.shape, self.shape_se2) self.assertAllClose(coef_hat.shape, self.shape_se2) self.assertAllClose(training_score, 1.0, atol=1e-4) self.assertAllClose(intercept_hat, self.intercept_se2_true, atol=1e-4) tangent_vec_of_transport = self.se2.metric.log( self.intercept_se2_true, base_point=intercept_hat) transported_coef_hat = self.se2.metric.parallel_transport( tangent_vec=coef_hat, base_point=intercept_hat, direction=tangent_vec_of_transport, ) self.assertAllClose(transported_coef_hat, self.coef_se2_true, atol=0.6)
class TestSpecialEuclidean(geomstats.tests.TestCase): def setUp(self): gs.random.seed(12) self.n = 2 self.group = SpecialEuclidean(n=self.n) self.n_samples = 3 self.point = self.group.random_point(self.n_samples) self.tangent_vec = self.group.to_tangent( gs.random.rand(self.n_samples, self.group.n + 1, self.group.n + 1), self.point) def test_belongs(self): theta = gs.pi / 3 point_1 = gs.array([[gs.cos(theta), -gs.sin(theta), 2.], [gs.sin(theta), gs.cos(theta), 3.], [0., 0., 1.]]) result = self.group.belongs(point_1) self.assertTrue(result) point_2 = gs.array([[gs.cos(theta), -gs.sin(theta), 2.], [gs.sin(theta), gs.cos(theta), 3.], [0., 0., 0.]]) result = self.group.belongs(point_2) self.assertFalse(result) point = gs.array([point_1, point_2]) expected = gs.array([True, False]) result = self.group.belongs(point) self.assertAllClose(result, expected) point = point_1[0] result = self.group.belongs(point) self.assertFalse(result) point = gs.zeros((2, 3)) result = self.group.belongs(point) self.assertFalse(result) point = gs.zeros((2, 2, 3)) result = self.group.belongs(point) self.assertFalse(gs.all(result)) self.assertAllClose(result.shape, (2, )) def test_random_point_and_belongs(self): point = self.group.random_point() result = self.group.belongs(point) self.assertTrue(result) point = self.group.random_point(self.n_samples) result = self.group.belongs(point) expected = gs.array([True] * self.n_samples) self.assertAllClose(result, expected) def test_identity(self): result = self.group.identity expected = gs.eye(self.n + 1) self.assertAllClose(result, expected) def test_is_tangent(self): theta = gs.pi / 3 vec_1 = gs.array([[0., -theta, 2.], [theta, 0., 3.], [0., 0., 0.]]) point = self.group.random_point() tangent_vec = self.group.compose(point, vec_1) result = self.group.is_tangent(tangent_vec, point) self.assertTrue(result) vec_2 = gs.array([[0., -theta, 2.], [theta, 0., 3.], [0., 0., 1.]]) tangent_vec = self.group.compose(point, vec_2) result = self.group.is_tangent(tangent_vec, point) self.assertFalse(result) vec = gs.array([vec_1, vec_2]) expected = gs.array([True, False]) result = self.group.is_tangent(vec) self.assertAllClose(result, expected) def test_to_tangent_vec_vectorization(self): n = self.group.n tangent_vecs = gs.arange(self.n_samples * (n + 1)**2) tangent_vecs = gs.cast(tangent_vecs, gs.float32) tangent_vecs = gs.reshape(tangent_vecs, (self.n_samples, ) + (n + 1, ) * 2) point = self.group.random_point(self.n_samples) tangent_vecs = Matrices.mul(point, tangent_vecs) regularized = self.group.to_tangent(tangent_vecs, point) result = Matrices.mul( Matrices.transpose(point), regularized) + \ Matrices.mul(Matrices.transpose(regularized), point) result = result[:, :n, :n] expected = gs.zeros_like(result) self.assertAllClose(result, expected) def test_compose_and_inverse_matrix_form(self): point = self.group.random_point() inv_point = self.group.inverse(point) result = self.group.compose(point, inv_point) expected = self.group.identity self.assertAllClose(result, expected) if not geomstats.tests.tf_backend(): result = self.group.compose(inv_point, point) expected = self.group.identity self.assertAllClose(result, expected) def test_compose_vectorization(self): n_samples = self.n_samples n_points_a = self.group.random_point(n_samples=n_samples) n_points_b = self.group.random_point(n_samples=n_samples) one_point = self.group.random_point(n_samples=1) result = self.group.compose(one_point, n_points_a) self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) result = self.group.compose(n_points_a, one_point) if not geomstats.tests.tf_backend(): self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) result = self.group.compose(n_points_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) def test_inverse_vectorization(self): n_samples = self.n_samples points = self.group.random_point(n_samples=n_samples) result = self.group.inverse(points) self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) def test_compose_matrix_form(self): point = self.group.random_point() result = self.group.compose(point, self.group.identity) expected = point self.assertAllClose(result, expected) if not geomstats.tests.tf_backend(): # Composition by identity, on the left # Expect the original transformation result = self.group.compose(self.group.identity, point) expected = point self.assertAllClose(result, expected) # Composition of translations (no rotational part) # Expect the sum of the translations point_a = gs.array([[1., 0., 1.], [0., 1., 1.5], [0., 0., 1.]]) point_b = gs.array([[1., 0., 2.], [0., 1., 2.5], [0., 0., 1.]]) result = self.group.compose(point_a, point_b) last_line_0 = gs.array_from_sparse([(0, 2), (1, 2)], [1., 1.], (3, 3)) expected = point_a + point_b * last_line_0 self.assertAllClose(result, expected) def test_left_exp_coincides(self): vector_group = SpecialEuclidean(n=2, point_type='vector') theta = gs.pi / 3 initial_vec = gs.array([theta, 2., 2.]) initial_matrix_vec = self.group.lie_algebra.matrix_representation( initial_vec) vector_exp = vector_group.left_canonical_metric.exp(initial_vec) result = self.group.left_canonical_metric.exp(initial_matrix_vec) expected = vector_group.matrix_from_vector(vector_exp) self.assertAllClose(result, expected) def test_right_exp_coincides(self): vector_group = SpecialEuclidean(n=2, point_type='vector') theta = gs.pi / 2 initial_vec = gs.array([theta, 1., 1.]) initial_matrix_vec = self.group.lie_algebra.matrix_representation( initial_vec) vector_exp = vector_group.right_canonical_metric.exp(initial_vec) result = self.group.right_canonical_metric.exp(initial_matrix_vec, n_steps=25) expected = vector_group.matrix_from_vector(vector_exp) self.assertAllClose(result, expected, atol=1e-6) def test_basis_belongs(self): lie_algebra = self.group.lie_algebra result = lie_algebra.belongs(lie_algebra.basis) self.assertTrue(gs.all(result)) def test_basis_has_the_right_dimension(self): for n in range(2, 5): algebra = SpecialEuclideanMatrixLieAlgebra(n) self.assertEqual(int(n * (n + 1) / 2), algebra.dim) def test_belongs_lie_algebra(self): theta = gs.pi / 3 vec_1 = gs.array([[0., -theta, 2.], [theta, 0., 3.], [0., 0., 0.]]) result = self.group.lie_algebra.belongs(vec_1) expected = True self.assertAllClose(result, expected) vec_2 = gs.array([[0., -theta, 2.], [theta, 0., 3.], [0., 0., 1.]]) result = self.group.lie_algebra.belongs(vec_2) expected = False self.assertAllClose(result, expected) vec = gs.array([vec_1, vec_2]) expected = gs.array([True, False]) result = self.group.lie_algebra.belongs(vec) self.assertAllClose(result, expected) def test_basis_representation_is_correctly_vectorized(self): for n in range(2, 5): algebra = SpecialEuclideanMatrixLieAlgebra(n) shape = gs.shape(algebra.basis_representation(algebra.basis)) dim = int(n * (n + 1) / 2) self.assertAllClose(shape, (dim, dim)) def test_left_metric_wrong_group(self): group = self.group.rotations self.assertRaises( ValueError, lambda: SpecialEuclideanMatrixCannonicalLeftMetric(group)) group = SpecialEuclidean(3, point_type='vector') self.assertRaises( ValueError, lambda: SpecialEuclideanMatrixCannonicalLeftMetric(group)) def test_exp_and_belongs(self): exp = self.group.left_canonical_metric.exp(self.tangent_vec, self.point) result = self.group.belongs(exp) self.assertTrue(gs.all(result)) exp = self.group.left_canonical_metric.exp(self.tangent_vec[0], self.point[0]) result = self.group.belongs(exp) self.assertTrue(result) @geomstats.tests.np_and_tf_only def test_log_and_is_tan(self): exp = self.group.left_canonical_metric.exp(self.tangent_vec, self.point) log = self.group.left_canonical_metric.log(exp, self.point) result = self.group.is_tangent(log, self.point) self.assertTrue(gs.all(result)) exp = self.group.left_canonical_metric.exp(self.tangent_vec[0], self.point[0]) log = self.group.left_canonical_metric.log(exp, self.point) result = self.group.is_tangent(log, self.point) self.assertTrue(gs.all(result)) log = self.group.left_canonical_metric.log(exp, self.point[0]) result = self.group.is_tangent(log, self.point[0]) self.assertTrue(result) @geomstats.tests.np_and_tf_only def test_exp_log(self): exp = self.group.left_canonical_metric.exp(self.tangent_vec, self.point) result = self.group.left_canonical_metric.log(exp, self.point) self.assertAllClose(result, self.tangent_vec) exp = self.group.left_canonical_metric.exp(self.tangent_vec[0], self.point[0]) result = self.group.left_canonical_metric.log(exp, self.point[0]) self.assertAllClose(result, self.tangent_vec[0]) def test_parallel_transport(self): metric = self.group.left_canonical_metric shape = (self.n_samples, self.group.n + 1, self.group.n + 1) results = helper.test_parallel_transport(self.group, metric, shape) for res in results: self.assertTrue(res) def test_lie_algebra_basis_belongs(self): basis = self.group.lie_algebra.basis result = self.group.lie_algebra.belongs(basis) self.assertTrue(gs.all(result)) def test_lie_algebra_projection_and_belongs(self): vec = gs.random.rand(self.n_samples, self.group.n + 1, self.group.n + 1) tangent_vec = self.group.lie_algebra.projection(vec) result = self.group.lie_algebra.belongs(tangent_vec) self.assertTrue(gs.all(result)) def test_basis_representation(self): vec = gs.random.rand(self.n_samples, self.group.dim) tangent_vec = self.group.lie_algebra.matrix_representation(vec) result = self.group.lie_algebra.basis_representation(tangent_vec) self.assertAllClose(result, vec) result = self.group.lie_algebra.basis_representation(tangent_vec[0]) self.assertAllClose(result, vec[0]) def test_metrics_expected_point_type(self): left = self.group.left_canonical_metric right = self.group.right_canonical_metric metric = self.group.metric for m in [left, right, metric]: self.assertTrue(m.default_point_type == 'matrix') def test_metric_left_invariant(self): group = self.group point = group.random_point() expected = group.left_canonical_metric.norm(self.tangent_vec) translated = group.tangent_translation_map(point)(self.tangent_vec) result = group.left_canonical_metric.norm(translated) self.assertAllClose(result, expected) def test_projection_and_belongs(self): shape = (self.n_samples, self.n + 1, self.n + 1) result = helper.test_projection_and_belongs(self.group, shape) for res in result: self.assertTrue(res)
class TestSpecialEuclidean(geomstats.tests.TestCase): def setUp(self): self.n = 2 self.group = SpecialEuclidean(n=self.n) self.n_samples = 4 def test_belongs(self): theta = gs.pi / 3 point_1 = gs.array([[gs.cos(theta), -gs.sin(theta), 2.], [gs.sin(theta), gs.cos(theta), 3.], [0., 0., 1.]]) result = self.group.belongs(point_1) expected = True self.assertAllClose(result, expected) point_2 = gs.array([[gs.cos(theta), -gs.sin(theta), 2.], [gs.sin(theta), gs.cos(theta), 3.], [0., 0., 0.]]) result = self.group.belongs(point_2) expected = False self.assertAllClose(result, expected) point = gs.array([point_1, point_2]) expected = gs.array([True, False]) result = self.group.belongs(point) self.assertAllClose(result, expected) def test_random_uniform_and_belongs(self): point = self.group.random_uniform() result = self.group.belongs(point) expected = True self.assertAllClose(result, expected) point = self.group.random_uniform(self.n_samples) result = self.group.belongs(point) expected = gs.array([True] * self.n_samples) self.assertAllClose(result, expected) def test_identity(self): result = self.group.identity expected = gs.eye(self.n + 1) self.assertAllClose(result, expected) def test_is_in_lie_algebra(self): theta = gs.pi / 3 vec_1 = gs.array([[0., -theta, 2.], [theta, 0., 3.], [0., 0., 0.]]) result = self.group.is_tangent(vec_1) expected = True self.assertAllClose(result, expected) vec_2 = gs.array([[0., -theta, 2.], [theta, 0., 3.], [0., 0., 1.]]) result = self.group.is_tangent(vec_2) expected = False self.assertAllClose(result, expected) vec = gs.array([vec_1, vec_2]) expected = gs.array([True, False]) result = self.group.is_tangent(vec) self.assertAllClose(result, expected) def test_to_tangent_vec_vectorization(self): n = self.group.n tangent_vecs = gs.arange(self.n_samples * (n + 1)**2) tangent_vecs = gs.cast(tangent_vecs, gs.float32) tangent_vecs = gs.reshape(tangent_vecs, (self.n_samples, ) + (n + 1, ) * 2) point = self.group.random_uniform(self.n_samples) tangent_vecs = self.group.compose(point, tangent_vecs) regularized = self.group.to_tangent(tangent_vecs, point) result = self.group.compose( self.group.transpose(point), regularized) + \ self.group.compose(self.group.transpose(regularized), point) result = result[:, :n, :n] expected = gs.zeros_like(result) self.assertAllClose(result, expected) def test_compose_and_inverse_matrix_form(self): point = self.group.random_uniform() inv_point = self.group.inverse(point) result = self.group.compose(point, inv_point) expected = self.group.identity self.assertAllClose(result, expected) if not geomstats.tests.tf_backend(): result = self.group.compose(inv_point, point) expected = self.group.identity self.assertAllClose(result, expected) def test_compose_vectorization(self): n_samples = self.n_samples n_points_a = self.group.random_uniform(n_samples=n_samples) n_points_b = self.group.random_uniform(n_samples=n_samples) one_point = self.group.random_uniform(n_samples=1) result = self.group.compose(one_point, n_points_a) self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) result = self.group.compose(n_points_a, one_point) if not geomstats.tests.tf_backend(): self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) result = self.group.compose(n_points_a, n_points_b) self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) def test_inverse_vectorization(self): n_samples = self.n_samples points = self.group.random_uniform(n_samples=n_samples) result = self.group.inverse(points) self.assertAllClose(gs.shape(result), (n_samples, ) + (self.group.n + 1, ) * 2) def test_compose_matrix_form(self): point = self.group.random_uniform() result = self.group.compose(point, self.group.identity) expected = point self.assertAllClose(result, expected) if not geomstats.tests.tf_backend(): # Composition by identity, on the left # Expect the original transformation result = self.group.compose(self.group.identity, point) expected = point self.assertAllClose(result, expected) # Composition of translations (no rotational part) # Expect the sum of the translations point_a = gs.array([[1., 0., 1.], [0., 1., 1.5], [0., 0., 1.]]) point_b = gs.array([[1., 0., 2.], [0., 1., 2.5], [0., 0., 1.]]) result = self.group.compose(point_a, point_b) last_line_0 = gs.array_from_sparse([(0, 2), (1, 2)], [1., 1.], (3, 3)) expected = point_a + point_b * last_line_0 self.assertAllClose(result, expected)