def draw_points(self, ax, points=None, **scatter_kwargs): if points is None: points = self.points points_x = gs.vstack([point[0] for point in points]) points_y = gs.vstack([point[1] for point in points]) points_z = gs.vstack([point[2] for point in points]) ax.scatter(points_x, points_y, points_z, **scatter_kwargs)
def mean(self, points, weights=None): """ The Frechet mean of (weighted) points is the weighted average of the points in the Minkowski space. Parameters ---------- points: array-like, shape=[n_samples, dimension] weights: array-like, shape=[n_samples, 1], optional Returns ------- mean: array-like, shape=[1, dimension] """ if isinstance(points, list): points = gs.vstack(points) points = gs.to_ndarray(points, to_ndim=2) n_points = gs.shape(points)[0] if isinstance(weights, list): weights = gs.vstack(weights) elif weights is None: weights = gs.ones((n_points,)) weighted_points = gs.einsum('n,nj->nj', weights, points) mean = (gs.sum(weighted_points, axis=0) / gs.sum(weights)) mean = gs.to_ndarray(mean, to_ndim=2) return mean
def linear_mean(points, weights=None): """Compute the weighted linear mean. The linear mean is the Frechet mean when points: - lie in a Euclidean space with Euclidean metric, - lie in a Minkowski space with Minkowski metric. Parameters ---------- points : array-like, shape=[n_samples, dimension] Points to be averaged. weights : array-like, shape=[n_samples, 1], optional Weights associated to the points. Returns ------- mean : array-like, shape=[1, dimension] Weighted linear mean of the points. """ if isinstance(points, list): points = gs.vstack(points) points = gs.to_ndarray(points, to_ndim=2) n_points = gs.shape(points)[0] if isinstance(weights, list): weights = gs.vstack(weights) elif weights is None: weights = gs.ones((n_points, )) weighted_points = gs.einsum('...,...j->...j', weights, points) mean = (gs.sum(weighted_points, axis=0) / gs.sum(weights)) mean = gs.to_ndarray(mean, to_ndim=2) return mean
def main(): """Plot the geodesics.""" initial_point = gs.array([np.sqrt(2), 1., 0.]) stack_initial_point = gs.vstack( [initial_point for i_disk in range(N_DISKS)]) initial_point = gs.to_ndarray(stack_initial_point, to_ndim=3) end_point_intrinsic = gs.array([1.5, 1.5]) end_point_intrinsic = end_point_intrinsic.reshape(1, 1, 2) end_point = POINCARE_POLYDISK.intrinsic_to_extrinsic_coords( end_point_intrinsic) end_point = gs.concatenate( [end_point for i_disk in range(N_DISKS)], axis=1) vector = gs.array([3.5, 0.6, 0.8]) stack_vector = gs.vstack([vector for i_disk in range(N_DISKS)]) vector = gs.to_ndarray(stack_vector, to_ndim=3) initial_tangent_vec = POINCARE_POLYDISK.projection_to_tangent_space( vector=vector, base_point=initial_point) fig = plt.figure() plot_geodesic_between_two_points(initial_point=initial_point, end_point=end_point, ax=fig) plot_geodesic_with_initial_tangent_vector( initial_point=initial_point, initial_tangent_vec=initial_tangent_vec, ax=fig) plt.show()
def draw(self, ax, **kwargs): """Draw.""" circle = plt.Circle((0, 0), radius=1., color='black', fill=False) ax.add_artist(circle) points_x = gs.vstack([point[0] for point in self.points]) points_y = gs.vstack([point[1] for point in self.points]) ax.scatter(points_x, points_y, **kwargs)
def variance(self, points, weights=None, base_point=None): """ Variance of (weighted) points wrt a base point. """ if isinstance(points, list): points = gs.vstack(points) n_points = gs.shape(points)[0] if isinstance(weights, list): weights = gs.vstack(weights) if weights is None: weights = gs.ones((n_points, 1)) weights = gs.array(weights) weights = gs.to_ndarray(weights, to_ndim=2, axis=1) sum_weights = gs.sum(weights) if base_point is None: base_point = self.mean(points, weights) variance = 0. sq_dists = self.squared_dist(base_point, points) variance += gs.einsum('nk,nj->j', weights, sq_dists) variance /= sum_weights variance = gs.to_ndarray(variance, to_ndim=2, axis=1) return variance
def mean(self, points, weights=None, n_max_iterations=32, epsilon=EPSILON): """ Frechet mean of (weighted) points. """ # TODO(nina): profile this code to study performance, # i.e. what to do with sq_dists_between_iterates. if isinstance(points, list): points = gs.vstack(points) n_points = gs.shape(points)[0] if isinstance(weights, list): weights = gs.vstack(weights) if weights is None: weights = gs.ones((n_points, 1)) weights = gs.array(weights) weights = gs.to_ndarray(weights, to_ndim=2, axis=1) sum_weights = gs.sum(weights) mean = points[0] if n_points == 1: return mean sq_dists_between_iterates = [] iteration = 0 while iteration < n_max_iterations: a_tangent_vector = self.log(mean, mean) tangent_mean = gs.zeros_like(a_tangent_vector) logs = self.log(point=points, base_point=mean) tangent_mean += gs.einsum('nk,nj->j', weights, logs) tangent_mean /= sum_weights mean_next = self.exp(tangent_vec=tangent_mean, base_point=mean) sq_dist = self.squared_dist(mean_next, mean) sq_dists_between_iterates.append(sq_dist) variance = self.variance(points=points, weights=weights, base_point=mean_next) if gs.isclose(variance, 0.)[0, 0]: break if (sq_dist <= epsilon * variance)[0, 0]: break mean = mean_next iteration += 1 if iteration is n_max_iterations: print('Maximum number of iterations {} reached.' 'The mean may be inaccurate'.format(n_max_iterations)) mean = gs.to_ndarray(mean, to_ndim=2) return mean
def setup_method(self): gs.random.seed(1234) self.n_samples = 20 # Set up for hypersphere self.dim_sphere = 4 self.shape_sphere = (self.dim_sphere + 1, ) self.sphere = Hypersphere(dim=self.dim_sphere) X = gs.random.rand(self.n_samples) self.X_sphere = X - gs.mean(X) self.intercept_sphere_true = self.sphere.random_point() self.coef_sphere_true = self.sphere.projection( gs.random.rand(self.dim_sphere + 1)) self.y_sphere = self.sphere.metric.exp( self.X_sphere[:, None] * self.coef_sphere_true, base_point=self.intercept_sphere_true, ) self.param_sphere_true = gs.vstack( [self.intercept_sphere_true, self.coef_sphere_true]) self.param_sphere_guess = gs.vstack([ self.y_sphere[0], self.sphere.to_tangent(gs.random.normal(size=self.shape_sphere), self.y_sphere[0]), ]) # Set up for special euclidean self.se2 = SpecialEuclidean(n=2) self.metric_se2 = self.se2.left_canonical_metric self.metric_se2.default_point_type = "matrix" self.shape_se2 = (3, 3) X = gs.random.rand(self.n_samples) self.X_se2 = X - gs.mean(X) self.intercept_se2_true = self.se2.random_point() self.coef_se2_true = self.se2.to_tangent( 5.0 * gs.random.rand(*self.shape_se2), self.intercept_se2_true) self.y_se2 = self.metric_se2.exp( self.X_se2[:, None, None] * self.coef_se2_true[None], self.intercept_se2_true, ) self.param_se2_true = gs.vstack([ gs.flatten(self.intercept_se2_true), gs.flatten(self.coef_se2_true), ]) self.param_se2_guess = gs.vstack([ gs.flatten(self.y_se2[0]), gs.flatten( self.se2.to_tangent(gs.random.normal(size=self.shape_se2), self.y_se2[0])), ])
def adjoint_map(state): r"""Construct the matrix associated to the adjoint representation. The inner automorphism is given by :math:`Ad_X : g |-> XgX^-1`. For a state :math:`X = (\theta, x, y)`, the matrix associated to its tangent map, the adjoint representation, is :math:`\begin{bmatrix} 1 & \\ -J [x, y] & R(\theta) \end{bmatrix}`, where :math:`R(\theta)` is the rotation matrix of angle theta, and :math:`J = \begin{bmatrix} 0 & -1 \\ 1 & 0 \end{bmatrix}` Parameters ---------- state : array-like, shape=[dim] Vector representing a state. Returns ------- adjoint : array-like, shape=[dim, dim] Adjoint representation of the state. """ theta, _, _ = state tangent_base = gs.array([[0.0, -1.0], [1.0, 0.0]]) orientation_part = gs.eye(Localization.dim_rot, Localization.dim) pos_column = gs.reshape(state[1:], (Localization.group.n, 1)) position_wrt_orientation = Matrices.mul(-tangent_base, pos_column) position_wrt_position = Localization.rotation_matrix(theta) last_lines = gs.hstack( (position_wrt_orientation, position_wrt_position)) ad = gs.vstack((orientation_part, last_lines)) return ad
def regularize(self, point, point_type=None): """Regularize the point into the manifold's canonical representation. Parameters ---------- point point_type : str, {'vector', 'matrix'} Returns ------- regularize_points """ # TODO(nina): Vectorize. if point_type is None: point_type = self.default_point_type assert point_type in ['vector', 'matrix'] regularize_points = [self.manifold[i].regularize(point[i]) for i in range(self.n_manifolds)] # TODO(nina): Put this in a decorator if point_type == 'vector': regularize_points = gs.hstack(regularize_points) elif point_type == 'matrix': regularize_points = gs.vstack(regularize_points) return gs.all(regularize_points) return regularize_points
def _fit_extrinsic(self, X, y, weights=None, compute_training_score=False): """Estimate the parameters using the extrinsic gradient descent. Estimate the intercept and the coefficient defining the geodesic regression model, using the extrinsic gradient. Parameters ---------- X : {array-like, sparse matrix}, shape=[...,}] Training input samples. y : array-like, shape=[..., {dim, [n,n]}] Training target values. weights : array-like, shape=[...,] Weights associated to the points. Optional, default: None. compute_training_score : bool Whether to compute R^2. Optional, default: False. Returns ------- self : object Returns self. """ shape = ( y.shape[-1:] if self.space.default_point_type == "vector" else y.shape[-2:] ) intercept_init, coef_init = self.initialize_parameters(y) intercept_hat = self.space.projection(intercept_init) coef_hat = self.space.to_tangent(coef_init, intercept_hat) initial_guess = gs.vstack([gs.flatten(intercept_hat), gs.flatten(coef_hat)]) objective_with_grad = gs.autodiff.value_and_grad( lambda param: self._loss(X, y, param, shape, weights), to_numpy=True ) res = minimize( objective_with_grad, initial_guess, method="CG", jac=True, options={"disp": self.verbose, "maxiter": self.max_iter}, tol=self.tol, ) intercept_hat, coef_hat = gs.split(gs.array(res.x), 2) intercept_hat = gs.reshape(intercept_hat, shape) intercept_hat = gs.cast(intercept_hat, dtype=y.dtype) coef_hat = gs.reshape(coef_hat, shape) coef_hat = gs.cast(coef_hat, dtype=y.dtype) self.intercept_ = self.space.projection(intercept_hat) self.coef_ = self.space.to_tangent(coef_hat, self.intercept_) if compute_training_score: variance = gs.sum(self.metric.squared_dist(y, self.intercept_)) self.training_score_ = 1 - 2 * res.fun / variance return self
def main(): """Plot a square on H2 with Poincare half-plane visualization.""" top = SQUARE_SIZE / 2.0 bot = -SQUARE_SIZE / 2.0 left = -SQUARE_SIZE / 2.0 right = SQUARE_SIZE / 2.0 corners_int = gs.array([[bot, left], [bot, right], [top, right], [top, left]]) corners_ext = H2.from_coordinates(corners_int, "intrinsic") n_steps = 20 ax = plt.gca() edge_points = [] for i, src in enumerate(corners_ext): dst_id = (i + 1) % len(corners_ext) dst = corners_ext[dst_id] geodesic = METRIC.geodesic(initial_point=src, end_point=dst) t = gs.linspace(0.0, 1.0, n_steps) edge_points.append(geodesic(t)) edge_points = gs.vstack(edge_points) visualization.plot( edge_points, ax=ax, space="H2_poincare_half_plane", point_type="extrinsic", marker=".", color="black", ) plt.show()
def test_space_derivative(self): """Test space derivative. Check result on an example and vectorization. """ n_points = 3 dim = 3 curve = gs.random.rand(n_points, dim) result = self.srv_metric_r3.space_derivative(curve) delta = 1 / n_points d_curve_1 = (curve[1] - curve[0]) / delta d_curve_2 = (curve[2] - curve[0]) / (2 * delta) d_curve_3 = (curve[2] - curve[1]) / delta expected = gs.squeeze( gs.vstack( ( gs.to_ndarray(d_curve_1, 2), gs.to_ndarray(d_curve_2, 2), gs.to_ndarray(d_curve_3, 2), ) ) ) self.assertAllClose(result, expected) path_of_curves = gs.random.rand( self.n_discretized_curves, self.n_sampling_points, dim ) result = self.srv_metric_r3.space_derivative(path_of_curves) expected = [] for i in range(self.n_discretized_curves): expected.append(self.srv_metric_r3.space_derivative(path_of_curves[i])) expected = gs.stack(expected) self.assertAllClose(result, expected)
def regularize(self, point, point_type=None): """Regularize the point into the manifold's canonical representation. Parameters ---------- point : array-like, shape=[n_samples, dim] or shape=[n_samples, dim_2, dim_2] Point to be regularized. point_type : str, {'vector', 'matrix'} Representation of point. Returns ------- regularized_point : array-like, shape=[n_samples, dim] or shape=[n_samples, dim_2, dim_2] Point in the manifold's canonical representation. """ # TODO(nina): Vectorize. if point_type is None: point_type = self.default_point_type assert point_type in ['vector', 'matrix'] regularized_point = [ manifold_i.regularize(point_i) for manifold_i, point_i in zip(self.manifolds, point)] # TODO(nina): Put this in a decorator if point_type == 'vector': regularized_point = gs.hstack(regularized_point) elif point_type == 'matrix': regularized_point = gs.vstack(regularized_point) return gs.all(regularized_point)
def test_compute_gain(self): self.kalman.initialize_covariances(self.prior_cov, self.process_cov, self.obs_cov) innovation_cov = 3 * gs.eye(1) expected = gs.vstack( (1.0 / innovation_cov, gs.zeros_like(innovation_cov))) result = self.kalman.compute_gain(None) self.assertAllClose(expected, result)
def random_von_mises_fisher(self, kappa=10, n_samples=1): """ Sample in the 2-sphere with the von Mises distribution centered in the north pole. """ if self.dimension != 2: raise NotImplementedError( 'Sampling from the von Mises Fisher distribution' 'is only implemented in dimension 2.') angle = 2 * gs.pi * gs.random.rand(n_samples) unit_vector = gs.vstack((gs.cos(angle), gs.sin(angle))) scalar = gs.random.rand(n_samples) coord_z = 1 + 1/kappa*gs.log(scalar + (1-scalar)*gs.exp(-2*kappa)) coord_xy = gs.sqrt(1 - coord_z**2) * unit_vector point = gs.vstack((coord_xy, coord_z)) return point.T
def test_vstack(self): import tensorflow as tf tensor_1 = tf.convert_to_tensor([1.0, 2.0, 3.0]) tensor_2 = tf.convert_to_tensor([7.0, 8.0, 9.0]) result = gs.vstack([tensor_1, tensor_2]) expected = tf.convert_to_tensor([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]) self.assertAllClose(result, expected) tensor_1 = tf.convert_to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) tensor_2 = tf.convert_to_tensor([7.0, 8.0, 9.0]) result = gs.vstack([tensor_1, tensor_2]) expected = tf.convert_to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) self.assertAllClose(result, expected)
def cost_jacobian(param): """Compute the jacobian of the cost function at polynomial curve. Parameters ---------- param : array-like, shape=(degree - 1, dim) Parameters of the curve coordinates' polynomial functions of time. Returns ------- jac : array-like, shape=(dim * (degree - 1),) Jacobian of the cost function at polynomial curve. """ last_coef = end_point - initial_point - gs.sum(param, axis=0) coef = gs.vstack((initial_point, param, last_coef)) t = gs.linspace(0.0, 1.0, n_times) t_position = [t**i for i in range(degree + 1)] t_position = gs.stack(t_position) position = gs.einsum("ij,ik->kj", coef, t_position) t_velocity = [i * t**(i - 1) for i in range(1, degree + 1)] t_velocity = gs.stack(t_velocity) velocity = gs.einsum("ij,ik->kj", coef[1:], t_velocity) kappa, gamma = position[:, 0], position[:, 1] kappa_dot, gamma_dot = velocity[:, 0], velocity[:, 1] jac_kappa_0 = ( (gs.polygamma(2, kappa) + 1 / kappa**2) * kappa_dot + gamma_dot**2 / gamma) * t_position[1:-1] jac_kappa_1 = (2 * gs.polygamma(1, kappa) * kappa_dot) * t_velocity[:-1] jac_kappa = jac_kappa_0 + jac_kappa_1 jac_gamma_0 = (-kappa * gamma_dot**2 / gamma**2) * t_position[1:-1] jac_gamma_1 = (2 * kappa * gamma_dot / gamma) * t_velocity[:-1] jac_gamma = jac_gamma_0 + jac_gamma_1 jac = gs.vstack([jac_kappa, jac_gamma]) cost_jac = gs.sum(jac, axis=1) return cost_jac
def test_vstack(self): import tensorflow as tf tensor_1 = tf.convert_to_tensor([[1., 2., 3.], [4., 5., 6.]]) tensor_2 = tf.convert_to_tensor([[7., 8., 9.]]) result = gs.vstack([tensor_1, tensor_2]) expected = tf.convert_to_tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) self.assertAllClose(result, expected)
def grad(y_pred, y_true, metric=SE3.left_canonical_metric, representation='vector'): """ Closed-form for the gradient of pose_loss. :return: tangent vector at point y_pred. """ if gs.ndim(y_pred) == 1: y_pred = gs.expand_dims(y_pred, axis=0) if gs.ndim(y_true) == 1: y_true = gs.expand_dims(y_true, axis=0) if representation == 'vector': grad = lie_group.grad(y_pred, y_true, SE3, metric) if representation == 'quaternion': y_pred_rot_vec = SO3.rotation_vector_from_quaternion(y_pred[:, :4]) y_pred_pose = gs.hstack([y_pred_rot_vec, y_pred[:, 4:]]) y_true_rot_vec = SO3.rotation_vector_from_quaternion(y_true[:, :4]) y_true_pose = gs.hstack([y_true_rot_vec, y_true[:, 4:]]) grad = lie_group.grad(y_pred_pose, y_true_pose, SE3, metric) quat_scalar = y_pred[:, :1] quat_vec = y_pred[:, 1:4] quat_vec_norm = gs.linalg.norm(quat_vec, axis=1) quat_sq_norm = quat_vec_norm**2 + quat_scalar**2 quat_arctan2 = gs.arctan2(quat_vec_norm, quat_scalar) differential_scalar = -2 * quat_vec / (quat_sq_norm) differential_vec = ( 2 * (quat_scalar / quat_sq_norm - 2 * quat_arctan2 / quat_vec_norm) * (gs.einsum('ni,nj->nij', quat_vec, quat_vec) / quat_vec_norm * quat_vec_norm) + 2 * quat_arctan2 / quat_vec_norm * gs.eye(3)) differential_scalar_t = gs.transpose(differential_scalar, axes=(1, 0)) upper_left_block = gs.hstack( (differential_scalar_t, differential_vec[0])) upper_right_block = gs.zeros((3, 3)) lower_right_block = gs.eye(3) lower_left_block = gs.zeros((3, 4)) top = gs.hstack((upper_left_block, upper_right_block)) bottom = gs.hstack((lower_left_block, lower_right_block)) differential = gs.vstack((top, bottom)) differential = gs.expand_dims(differential, axis=0) grad = gs.einsum('ni,nij->ni', grad, differential) grad = gs.squeeze(grad, axis=0) return grad
def test_diameter(self): dim = 2 sphere = Hypersphere(dim) point_a = gs.array([[0., 0., 1.]]) point_b = gs.array([[1., 0., 0.]]) point_c = gs.array([[0., 0., -1.]]) result = sphere.metric.diameter(gs.vstack((point_a, point_b, point_c))) expected = gs.pi self.assertAllClose(expected, result)
def test_vstack(self): with self.test_session(): tensor_1 = tf.convert_to_tensor([[1., 2., 3.], [4., 5., 6.]]) tensor_2 = tf.convert_to_tensor([[7., 8., 9.]]) result = gs.vstack([tensor_1, tensor_2]) expected = tf.convert_to_tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) self.assertAllClose(result, expected)
def diameter_test_data(self): point_a = gs.array([[0.0, 0.0, 1.0]]) point_b = gs.array([[1.0, 0.0, 0.0]]) point_c = gs.array([[0.0, 0.0, -1.0]]) smoke_data = [ dict( dim=2, points=gs.vstack((point_a, point_b, point_c)), expected=gs.pi ) ] return self.generate_tests(smoke_data)
def test_diameter(self): dim = 2 sphere = Hypersphere(dim) point_a = [0., 0., 1.] point_b = [1., 0., 0.] point_c = [0., 0., -1.] result = sphere.metric.diameter(gs.vstack((point_a, point_b, point_c))) expected = gs.pi gs.testing.assert_allclose(result, expected) gs.testing.assert_allclose(result.size, 1)
def mean(self, points, weights=None): """ The Frechet mean of (weighted) points is the weighted average of the points in the Minkowski space. """ if isinstance(points, list): points = gs.vstack(points) points = gs.to_ndarray(points, to_ndim=2) n_points = gs.shape(points)[0] if isinstance(weights, list): weights = gs.vstack(weights) elif weights is None: weights = gs.ones((n_points, )) weighted_points = gs.einsum('n,nj->nj', weights, points) mean = (gs.sum(weighted_points, axis=0) / gs.sum(weights)) mean = gs.to_ndarray(mean, to_ndim=2) return mean
def test_Localization_adjoint_map(self): initial_state = gs.array([0.5, 1.0, 2.0]) angle = initial_state[0] rotation = gs.array([[gs.cos(angle), -gs.sin(angle)], [gs.sin(angle), gs.cos(angle)]]) first_line = gs.eye(1, 3) last_lines = gs.hstack((gs.array([[2.0], [-1.0]]), rotation)) expected = gs.vstack((first_line, last_lines)) result = self.nonlinear_model.adjoint_map(initial_state) self.assertAllClose(expected, result)
def test_product_distance_extrinsic_representation(self): """Test the distance using the extrinsic representation.""" coords_type = 'extrinsic' point_a_intrinsic = gs.array([[0.01, 0.0]]) point_b_intrinsic = gs.array([[0.0, 0.0]]) hyperbolic_space = Hyperbolic(dimension=2, coords_type=coords_type) point_a = hyperbolic_space.from_coordinates(point_a_intrinsic, "intrinsic") point_b = hyperbolic_space.from_coordinates(point_b_intrinsic, "intrinsic") duplicate_point_a = gs.vstack([point_a, point_a]) duplicate_point_b = gs.vstack([point_b, point_b]) single_disk = PoincarePolydisk(n_disks=1, coords_type=coords_type) two_disks = PoincarePolydisk(n_disks=2, coords_type=coords_type) distance_single_disk = single_disk.metric.dist(point_a, point_b) distance_two_disks = two_disks.metric.dist(duplicate_point_a, duplicate_point_b) result = distance_two_disks expected = 3**0.5 * distance_single_disk self.assertAllClose(result, expected)
def test_Localization_propagation_jacobian(self): time_step = gs.array([0.5]) linear_vel = gs.array([1.0, 0.5]) angular_vel = gs.array([0.0]) increment = gs.concatenate((time_step, linear_vel, angular_vel), axis=0) first_line = gs.eye(1, 3) last_lines = gs.hstack((gs.array([[-0.25], [0.5]]), gs.eye(2))) expected = gs.vstack((first_line, last_lines)) result = self.nonlinear_model.propagation_jacobian(None, increment) self.assertAllClose(expected, result)
def bvp(time, state): """Reformat the boundary value problem geodesic ODE. Parameters ---------- state : vector of the state variables: y = [a,b,u,v] time : time """ position, velocity = state[:2].T, state[2:].T eq = self.geodesic_equation( velocity=velocity, position=position) return gs.vstack((velocity.T, eq.T))
def bvp(_, state): """Reformat the boundary value problem geodesic ODE. Parameters ---------- state : array-like, shape[4,] Vector of the state variables: y = [a,b,u,v] _ : unused Any (time). """ position, velocity = state[:2].T, state[2:].T eq = self.geodesic_equation(velocity=velocity, position=position) return gs.vstack((velocity.T, eq.T))