def test_fit_transform(): """Test fit_transform method for class ConnectivityMeasure""" n_subjects = 10 n_features = 49 n_samples = 200 # Generate signals and compute empirical covariances covs = [] signals = [] random_state = check_random_state(0) for k in range(n_subjects): signal = random_state.randn(n_samples, n_features) signals.append(signal) signal -= signal.mean(axis=0) covs.append((signal.T).dot(signal) / n_samples) input_covs = copy.copy(covs) kinds = ["correlation", "tangent", "precision", "partial correlation"] for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=EmpiricalCovariance()) connectivities = conn_measure.fit_transform(signals) # Generic assert_true(isinstance(connectivities, np.ndarray)) assert_equal(len(connectivities), len(covs)) for k, cov_new in enumerate(connectivities): assert_array_equal(input_covs[k], covs[k]) assert(is_spd(covs[k], decimal=7)) # Positive definiteness if expected and output value checks if kind == "tangent": assert_array_almost_equal(cov_new, cov_new.T) gmean_sqrt = _map_eigenvalues(np.sqrt, conn_measure.mean_) assert(is_spd(gmean_sqrt, decimal=7)) assert(is_spd(conn_measure.whitening_, decimal=7)) assert_array_almost_equal(conn_measure.whitening_.dot( gmean_sqrt), np.eye(n_features)) assert_array_almost_equal(gmean_sqrt.dot( _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), covs[k]) elif kind == "precision": assert(is_spd(cov_new, decimal=7)) assert_array_almost_equal(cov_new.dot(covs[k]), np.eye(n_features)) elif kind == "correlation": assert(is_spd(cov_new, decimal=7)) d = np.sqrt(np.diag(np.diag(covs[k]))) assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) elif kind == "partial correlation": prec = linalg.inv(covs[k]) d = np.sqrt(np.diag(np.diag(prec))) assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + 2 * np.diag(np.diag(prec)))
def fit(self, X, y=None): """Fit PoSCE to the given time series for each subject Parameters ---------- X : list of n_subjects numpy.ndarray, shapes (n_samples, n_features) The input subjects time series. The number of samples may differ from one subject to another Returns ------- self : PopulationShrunkCovariance instance The object itself. Useful for chaining operations. """ # compute covariances from timeseries self.cov_estimator_ = clone(self.cov_estimator) covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] # compute prior mean if self.prior_mean_type == "geometric": self.prior_mean_ = _geometric_mean(covariances, max_iter=30, tol=1e-7) elif self.prior_mean_type == "empirical": self.prior_mean_ = np.mean(covariances, axis=0) else: raise ValueError("Allowed mean types are" '"geometric", "euclidean"' ', got type "{}"'.format(self.prior_mean_type)) self.prior_whitening_ = _map_eigenvalues(lambda x: 1.0 / np.sqrt(x), self.prior_mean_) self.prior_whitening_inv_ = _map_eigenvalues(lambda x: np.sqrt(x), self.prior_mean_) # compute the population prior dispersion connectivities = [ _map_eigenvalues( np.log, self.prior_whitening_.dot(cov).dot(self.prior_whitening_)) for cov in covariances ] connectivities = np.array(connectivities) connectivities = sym_matrix_to_vec(connectivities) self.prior_cov_ = np.mean( [ np.expand_dims(c, 1).dot(np.expand_dims(c, 0)) for c in connectivities ], axis=0, ) # approximate the population prior dispersion self.prior_cov_approx_ = regularized_eigenvalue_decomposition( self.prior_cov_, explained_variance_threshold=0.7) return self
def test_map_eigenvalues(): # Test on exp map sym = np.ones((2, 2)) sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]]) assert_array_almost_equal(_map_eigenvalues(np.exp, sym), sym_exp) # Test on sqrt map spd_sqrt = np.array([[2., -1., 0.], [-1., 2., -1.], [0., -1., 2.]]) spd = spd_sqrt.dot(spd_sqrt) assert_array_almost_equal(_map_eigenvalues(np.sqrt, spd), spd_sqrt) # Test on log map spd = np.array([[1.25, 0.75], [0.75, 1.25]]) spd_log = np.array([[0., log(2.)], [log(2.), 0.]]) assert_array_almost_equal(_map_eigenvalues(np.log, spd), spd_log)
def test_geometric_mean_geodesic(): n_matrices = 10 n_features = 6 sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features)) sym = sym * sym[:, np.newaxis] times = np.arange(n_matrices) non_singular = np.eye(n_features) non_singular[1:3, 1:3] = np.array([[-1, -.5], [-.5, -1]]) spds = [] for time in times: spds.append(non_singular.dot(_map_eigenvalues(np.exp, time * sym)).dot( non_singular.T)) gmean = non_singular.dot(_map_eigenvalues(np.exp, times.mean() * sym)).dot( non_singular.T) assert_array_almost_equal(_geometric_mean(spds), gmean)
def test_map_eigenvalues(): # Test on exp map sym = np.ones((2, 2)) sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]]) assert_array_almost_equal(_map_eigenvalues(np.exp, sym), sym_exp) # Test on sqrt map spd_sqrt = np.array([[2., -1., 0.], [-1., 2., -1.], [0., -1., 2.]]) spd = spd_sqrt.dot(spd_sqrt) assert_array_almost_equal(_map_eigenvalues(np.sqrt, spd), spd_sqrt) # Test on log map spd = np.array([[1.25, 0.75], [0.75, 1.25]]) spd_log = np.array([[0., log(2.)], [log(2.), 0.]]) assert_array_almost_equal(_map_eigenvalues(np.log, spd), spd_log)
def test_geometric_mean_geodesic(): n_matrices = 10 n_features = 6 sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features)) sym = sym * sym[:, np.newaxis] times = np.arange(n_matrices) non_singular = np.eye(n_features) non_singular[1:3, 1:3] = np.array([[-1, -.5], [-.5, -1]]) spds = [] for time in times: spds.append(non_singular.dot(_map_eigenvalues(np.exp, time * sym)).dot( non_singular.T)) gmean = non_singular.dot(_map_eigenvalues(np.exp, times.mean() * sym)).dot( non_singular.T) assert_array_almost_equal(_geometric_mean(spds), gmean)
def test_geometric_mean_couple(): n_features = 7 spd1 = np.ones((n_features, n_features)) spd1 = spd1.dot(spd1) + n_features * np.eye(n_features) spd2 = np.tril(np.ones((n_features, n_features))) spd2 = spd2.dot(spd2.T) vals_spd2, vecs_spd2 = np.linalg.eigh(spd2) spd2_sqrt = _form_symmetric(np.sqrt, vals_spd2, vecs_spd2) spd2_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_spd2, vecs_spd2) geo = spd2_sqrt.dot(_map_eigenvalues(np.sqrt, spd2_inv_sqrt.dot(spd1).dot( spd2_inv_sqrt))).dot(spd2_sqrt) assert_array_almost_equal(_geometric_mean([spd1, spd2]), geo)
def test_geometric_mean_couple(): n_features = 7 spd1 = np.ones((n_features, n_features)) spd1 = spd1.dot(spd1) + n_features * np.eye(n_features) spd2 = np.tril(np.ones((n_features, n_features))) spd2 = spd2.dot(spd2.T) vals_spd2, vecs_spd2 = np.linalg.eigh(spd2) spd2_sqrt = _form_symmetric(np.sqrt, vals_spd2, vecs_spd2) spd2_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_spd2, vecs_spd2) geo = spd2_sqrt.dot(_map_eigenvalues(np.sqrt, spd2_inv_sqrt.dot(spd1).dot( spd2_inv_sqrt))).dot(spd2_sqrt) assert_array_almost_equal(_geometric_mean([spd1, spd2]), geo)
def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7): """Return the norm of the covariant derivative at each iteration step of geometric_mean. See its docstring for details. Norm is intrinsic norm on the tangent space of the manifold of symmetric positive definite matrices. Returns ------- grad_norm : list of float Norm of the covariant derivative in the tangent space at each step. """ mats = np.array(mats) # Initialization if init is None: gmean = np.mean(mats, axis=0) else: gmean = init norm_old = np.inf step = 1. grad_norm = [] for n in range(max_iter): # Computation of the gradient vals_gmean, vecs_gmean = linalg.eigh(gmean) gmean_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_gmean, vecs_gmean) whitened_mats = [ gmean_inv_sqrt.dot(mat).dot(gmean_inv_sqrt) for mat in mats ] logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_mats] logs_mean = np.mean(logs, axis=0) # Covariant derivative is # - gmean.dot(logms_mean) norm = np.linalg.norm(logs_mean) # Norm of the covariant derivative on # the tangent space at point gmean # Update of the minimizer vals_log, vecs_log = linalg.eigh(logs_mean) gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean) gmean = gmean_sqrt.dot( _form_symmetric(np.exp, vals_log * step, vecs_log)).dot(gmean_sqrt) # Update the norm and the step size if norm < norm_old: norm_old = norm if norm > norm_old: step = step / 2. norm = norm_old grad_norm.append(norm / gmean.size) if tol is not None and norm / gmean.size < tol: break return grad_norm
def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7): """Return the norm of the covariant derivative at each iteration step of geometric_mean. See its docstring for details. Norm is intrinsic norm on the tangent space of the manifold of symmetric positive definite matrices. Returns ------- grad_norm : list of float Norm of the covariant derivative in the tangent space at each step. """ mats = np.array(mats) # Initialization if init is None: gmean = np.mean(mats, axis=0) else: gmean = init norm_old = np.inf step = 1. grad_norm = [] for n in range(max_iter): # Computation of the gradient vals_gmean, vecs_gmean = linalg.eigh(gmean) gmean_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_gmean, vecs_gmean) whitened_mats = [gmean_inv_sqrt.dot(mat).dot(gmean_inv_sqrt) for mat in mats] logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_mats] logs_mean = np.mean(logs, axis=0) # Covariant derivative is # - gmean.dot(logms_mean) norm = np.linalg.norm(logs_mean) # Norm of the covariant derivative on # the tangent space at point gmean # Update of the minimizer vals_log, vecs_log = linalg.eigh(logs_mean) gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean) gmean = gmean_sqrt.dot( _form_symmetric(np.exp, vals_log * step, vecs_log)).dot(gmean_sqrt) # Update the norm and the step size if norm < norm_old: norm_old = norm if norm > norm_old: step = step / 2. norm = norm_old grad_norm.append(norm / gmean.size) if tol is not None and norm / gmean.size < tol: break return grad_norm
def map_tangent(data, diag=False): """Transform to tangent space. Parameters ---------- data: list of numpy.ndarray of shape(n_features, n_features) List of semi-positive definite matrices. diag: bool Whether to discard the diagonal elements before vectorizing. Default is False. Returns ------- tangent: numpy.ndarray, shape(n_features * (n_features - 1) / 2) """ mean_ = _geometric_mean(data, max_iter=30, tol=1e-7) whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x), mean_) tangent = [_map_eigenvalues(np.log, whitening_.dot(c).dot(whitening_)) for c in data] tangent = np.array(tangent) return sym_matrix_to_vec(tangent, discard_diagonal=diag)
def transform(self, X): """Transform subjects timeseries to shrunk covariances in the tangent space using the population prior. Parameters ---------- X : list of n_subjects numpy.ndarray, shapes (n_samples, n_features) The input subjects time series. The number of samples may differ from one subject to another Returns ------- shrunk_connectivities : numpy.ndarray, shape (n_subjects, n_features * (n_features + 1) / 2). Shrunk individual connectivities as vectors. """ # compute covariances from timeseries covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] # transform in the tangent space connectivities = [ _map_eigenvalues( np.log, self.prior_whitening_.dot(cov).dot(self.prior_whitening_)) for cov in covariances ] connectivities = np.array(connectivities) connectivities = sym_matrix_to_vec(connectivities) shrunk_connectivities = [ shrunk_covariance_embedding( cov_embedding=c, prior_cov_approx=self.prior_cov_approx_, shrinkage=self.shrinkage, ) for c in connectivities ] return shrunk_connectivities
def test_connectivity_measure_outputs(): n_subjects = 10 n_features = 49 # Generate signals and compute covariances emp_covs = [] ledoit_covs = [] signals = [] ledoit_estimator = LedoitWolf() for k in range(n_subjects): n_samples = 200 + k signal, _, _ = generate_signals(n_features=n_features, n_confounds=5, length=n_samples, same_variance=False) signals.append(signal) signal -= signal.mean(axis=0) emp_covs.append((signal.T).dot(signal) / n_samples) ledoit_covs.append(ledoit_estimator.fit(signal).covariance_) kinds = ["covariance", "correlation", "tangent", "precision", "partial correlation"] # Check outputs properties for cov_estimator, covs in zip([EmpiricalCovariance(), LedoitWolf()], [emp_covs, ledoit_covs]): input_covs = copy.copy(covs) for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=cov_estimator) connectivities = conn_measure.fit_transform(signals) # Generic assert_true(isinstance(connectivities, np.ndarray)) assert_equal(len(connectivities), len(covs)) for k, cov_new in enumerate(connectivities): assert_array_equal(input_covs[k], covs[k]) assert(is_spd(covs[k], decimal=7)) # Positive definiteness if expected and output value checks if kind == "tangent": assert_array_almost_equal(cov_new, cov_new.T) gmean_sqrt = _map_eigenvalues(np.sqrt, conn_measure.mean_) assert(is_spd(gmean_sqrt, decimal=7)) assert(is_spd(conn_measure.whitening_, decimal=7)) assert_array_almost_equal(conn_measure.whitening_.dot( gmean_sqrt), np.eye(n_features)) assert_array_almost_equal(gmean_sqrt.dot( _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), covs[k]) elif kind == "precision": assert(is_spd(cov_new, decimal=7)) assert_array_almost_equal(cov_new.dot(covs[k]), np.eye(n_features)) elif kind == "correlation": assert(is_spd(cov_new, decimal=7)) d = np.sqrt(np.diag(np.diag(covs[k]))) if cov_estimator == EmpiricalCovariance(): assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) assert_array_almost_equal(np.diag(cov_new), np.ones((n_features))) elif kind == "partial correlation": prec = linalg.inv(covs[k]) d = np.sqrt(np.diag(np.diag(prec))) assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + 2 * np.diag(np.diag(prec))) # Check the mean_ for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind) conn_measure.fit_transform(signals) assert_equal((conn_measure.mean_).shape, (n_features, n_features)) if kind != 'tangent': assert_array_almost_equal( conn_measure.mean_, np.mean(conn_measure.transform(signals), axis=0)) # Check that the mean isn't modified in transform conn_measure = ConnectivityMeasure(kind='covariance') conn_measure.fit(signals[:1]) mean = conn_measure.mean_ conn_measure.transform(signals[1:]) assert_array_equal(mean, conn_measure.mean_) # Check vectorization option for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind) connectivities = conn_measure.fit_transform(signals) conn_measure = ConnectivityMeasure(vectorize=True, kind=kind) vectorized_connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal(vectorized_connectivities, sym_matrix_to_vec(connectivities)) # Check not fitted error assert_raises_regex( ValueError, 'has not been fitted. ', ConnectivityMeasure().inverse_transform, vectorized_connectivities) # Check inverse transformation kinds.remove('tangent') for kind in kinds: # without vectorization: input matrices are returned with no change conn_measure = ConnectivityMeasure(kind=kind) connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal( conn_measure.inverse_transform(connectivities), connectivities) # with vectorization: input vectors are reshaped into matrices # if diagonal has not been discarded conn_measure = ConnectivityMeasure(kind=kind, vectorize=True) vectorized_connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal( conn_measure.inverse_transform(vectorized_connectivities), connectivities) # with vectorization if diagonal has been discarded for kind in ['correlation', 'partial correlation']: connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals) conn_measure = ConnectivityMeasure(kind=kind, vectorize=True, discard_diagonal=True) vectorized_connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal( conn_measure.inverse_transform(vectorized_connectivities), connectivities) for kind in ['covariance', 'precision']: connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals) conn_measure = ConnectivityMeasure(kind=kind, vectorize=True, discard_diagonal=True) vectorized_connectivities = conn_measure.fit_transform(signals) diagonal = np.array([np.diagonal(conn) / sqrt(2) for conn in connectivities]) inverse_transformed = conn_measure.inverse_transform( vectorized_connectivities, diagonal=diagonal) assert_array_almost_equal(inverse_transformed, connectivities) assert_raises_regex(ValueError, 'can not reconstruct connectivity matrices', conn_measure.inverse_transform, vectorized_connectivities) # for 'tangent' kind, covariance matrices are reconstructed # without vectorization tangent_measure = ConnectivityMeasure(kind='tangent') displacements = tangent_measure.fit_transform(signals) covariances = ConnectivityMeasure(kind='covariance').fit_transform( signals) assert_array_almost_equal( tangent_measure.inverse_transform(displacements), covariances) # with vectorization # when diagonal has not been discarded tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True) vectorized_displacements = tangent_measure.fit_transform(signals) assert_array_almost_equal( tangent_measure.inverse_transform(vectorized_displacements), covariances) # when diagonal has been discarded tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True, discard_diagonal=True) vectorized_displacements = tangent_measure.fit_transform(signals) diagonal = np.array([np.diagonal(matrix) / sqrt(2) for matrix in displacements]) inverse_transformed = tangent_measure.inverse_transform( vectorized_displacements, diagonal=diagonal) assert_array_almost_equal(inverse_transformed, covariances) assert_raises_regex(ValueError, 'can not reconstruct connectivity matrices', tangent_measure.inverse_transform, vectorized_displacements)
def test_connectivity_measure_outputs(): n_subjects = 10 n_features = 49 # Generate signals and compute covariances emp_covs = [] ledoit_covs = [] signals = [] ledoit_estimator = LedoitWolf() for k in range(n_subjects): n_samples = 200 + k signal, _, _ = generate_signals(n_features=n_features, n_confounds=5, length=n_samples, same_variance=False) signals.append(signal) signal -= signal.mean(axis=0) emp_covs.append((signal.T).dot(signal) / n_samples) ledoit_covs.append(ledoit_estimator.fit(signal).covariance_) kinds = ["covariance", "correlation", "tangent", "precision", "partial correlation"] # Check outputs properties for cov_estimator, covs in zip([EmpiricalCovariance(), LedoitWolf()], [emp_covs, ledoit_covs]): input_covs = copy.copy(covs) for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=cov_estimator) connectivities = conn_measure.fit_transform(signals) # Generic assert isinstance(connectivities, np.ndarray) assert len(connectivities) == len(covs) for k, cov_new in enumerate(connectivities): assert_array_equal(input_covs[k], covs[k]) assert(is_spd(covs[k], decimal=7)) # Positive definiteness if expected and output value checks if kind == "tangent": assert_array_almost_equal(cov_new, cov_new.T) gmean_sqrt = _map_eigenvalues(np.sqrt, conn_measure.mean_) assert(is_spd(gmean_sqrt, decimal=7)) assert(is_spd(conn_measure.whitening_, decimal=7)) assert_array_almost_equal(conn_measure.whitening_.dot( gmean_sqrt), np.eye(n_features)) assert_array_almost_equal(gmean_sqrt.dot( _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), covs[k]) elif kind == "precision": assert(is_spd(cov_new, decimal=7)) assert_array_almost_equal(cov_new.dot(covs[k]), np.eye(n_features)) elif kind == "correlation": assert(is_spd(cov_new, decimal=7)) d = np.sqrt(np.diag(np.diag(covs[k]))) if cov_estimator == EmpiricalCovariance(): assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) assert_array_almost_equal(np.diag(cov_new), np.ones((n_features))) elif kind == "partial correlation": prec = linalg.inv(covs[k]) d = np.sqrt(np.diag(np.diag(prec))) assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + 2 * np.diag(np.diag(prec))) # Check the mean_ for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind) conn_measure.fit_transform(signals) assert (conn_measure.mean_).shape == (n_features, n_features) if kind != 'tangent': assert_array_almost_equal( conn_measure.mean_, np.mean(conn_measure.transform(signals), axis=0)) # Check that the mean isn't modified in transform conn_measure = ConnectivityMeasure(kind='covariance') conn_measure.fit(signals[:1]) mean = conn_measure.mean_ conn_measure.transform(signals[1:]) assert_array_equal(mean, conn_measure.mean_) # Check vectorization option for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind) connectivities = conn_measure.fit_transform(signals) conn_measure = ConnectivityMeasure(vectorize=True, kind=kind) vectorized_connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal(vectorized_connectivities, sym_matrix_to_vec(connectivities)) # Check not fitted error with pytest.raises(ValueError, match='has not been fitted. '): ConnectivityMeasure().inverse_transform(vectorized_connectivities) # Check inverse transformation kinds.remove('tangent') for kind in kinds: # without vectorization: input matrices are returned with no change conn_measure = ConnectivityMeasure(kind=kind) connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal( conn_measure.inverse_transform(connectivities), connectivities) # with vectorization: input vectors are reshaped into matrices # if diagonal has not been discarded conn_measure = ConnectivityMeasure(kind=kind, vectorize=True) vectorized_connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal( conn_measure.inverse_transform(vectorized_connectivities), connectivities) # with vectorization if diagonal has been discarded for kind in ['correlation', 'partial correlation']: connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals) conn_measure = ConnectivityMeasure(kind=kind, vectorize=True, discard_diagonal=True) vectorized_connectivities = conn_measure.fit_transform(signals) assert_array_almost_equal( conn_measure.inverse_transform(vectorized_connectivities), connectivities) for kind in ['covariance', 'precision']: connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals) conn_measure = ConnectivityMeasure(kind=kind, vectorize=True, discard_diagonal=True) vectorized_connectivities = conn_measure.fit_transform(signals) diagonal = np.array([np.diagonal(conn) / sqrt(2) for conn in connectivities]) inverse_transformed = conn_measure.inverse_transform( vectorized_connectivities, diagonal=diagonal) assert_array_almost_equal(inverse_transformed, connectivities) with pytest.raises(ValueError, match='can not reconstruct connectivity matrices'): conn_measure.inverse_transform(vectorized_connectivities) # for 'tangent' kind, covariance matrices are reconstructed # without vectorization tangent_measure = ConnectivityMeasure(kind='tangent') displacements = tangent_measure.fit_transform(signals) covariances = ConnectivityMeasure(kind='covariance').fit_transform( signals) assert_array_almost_equal( tangent_measure.inverse_transform(displacements), covariances) # with vectorization # when diagonal has not been discarded tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True) vectorized_displacements = tangent_measure.fit_transform(signals) assert_array_almost_equal( tangent_measure.inverse_transform(vectorized_displacements), covariances) # when diagonal has been discarded tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True, discard_diagonal=True) vectorized_displacements = tangent_measure.fit_transform(signals) diagonal = np.array([np.diagonal(matrix) / sqrt(2) for matrix in displacements]) inverse_transformed = tangent_measure.inverse_transform( vectorized_displacements, diagonal=diagonal) assert_array_almost_equal(inverse_transformed, covariances) with pytest.raises(ValueError, match='can not reconstruct connectivity matrices'): tangent_measure.inverse_transform(vectorized_displacements)