def test_integration_adaptive_graph_lasso(self, params_in): ''' Just tests inputs/outputs (not validity of result). ''' n_features = 20 n_samples = 25 cov, prec, adj = ClusterGraph( n_blocks=1, chain_blocks=False, seed=1, ).create(n_features, 0.8) prng = np.random.RandomState(2) X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) model = AdaptiveGraphLasso(**params_in) model.fit(X) assert model.estimator_ is not None assert model.lam_ is not None assert np.sum(model.lam_[np.diag_indices(n_features)]) == 0 if params_in['method'] == 'binary': uvals = set(model.lam_.flat) assert len(uvals) == 2 assert 0 in uvals assert 1 in uvals elif params_in['method'] == 'inverse' or\ params_in['method'] == 'inverse_squared': uvals = set(model.lam_.flat[model.lam_.flat != 0]) assert len(uvals) > 0
def adaptive_graph_lasso(X, model_selector, method): '''Run QuicGraphLassoCV or QuicGraphLassoEBIC as a two step adaptive fit with method of choice (currently: 'binary', 'inverse', 'inverse_squared'). Compare the support and values to the model-selection estimator. ''' metric = 'log_likelihood' print 'Adaptive {} with:'.format(model_selector) print ' adaptive-method: {}'.format(method) if model_selector == 'QuicGraphLassoCV': print ' metric: {}'.format(metric) model = AdaptiveGraphLasso( estimator=QuicGraphLassoCV( cv=2, # cant deal w more folds at small size n_refinements=6, init_method='cov', score_metric=metric), method=method, ) elif model_selector == 'QuicGraphLassoEBIC': model = AdaptiveGraphLasso( estimator=QuicGraphLassoEBIC(), method=method, ) model.fit(X) lam_norm_ = np.linalg.norm(model.estimator_.lam_) print ' ||lam_||_2: {}'.format(lam_norm_) return model.estimator_.covariance_, model.estimator_.precision_, lam_norm_
def adaptive_model_average(X, penalization, method): '''Run ModelAverage in default mode (QuicGraphLassoCV) to obtain proportion matrix. NOTE: Only method = 'binary' really makes sense in this case. ''' n_trials = 100 print('Adaptive ModelAverage with:') print(' estimator: QuicGraphLasso (default)') print(' n_trials: {}'.format(n_trials)) print(' penalization: {}'.format(penalization)) print(' adaptive-method: {}'.format(method)) # if penalization is random, first find a decent scalar lam_ to build # random perturbation matrix around. lam doesn't matter for fully-random. lam = 0.5 if penalization == 'random': cv_model = QuicGraphLassoCV( cv=2, n_refinements=6, sc=spark.sparkContext, # NOQA init_method='cov', score_metric=metric) cv_model.fit(X) lam = cv_model.lam_ print(' lam: {}'.format(lam)) model = AdaptiveGraphLasso( estimator=ModelAverage(n_trials=n_trials, penalization=penalization, lam=lam, sc=spark.sparkContext), # NOQA method=method, ) model.fit(X) lam_norm_ = np.linalg.norm(model.estimator_.lam_) print(' ||lam_||_2: {}'.format(lam_norm_)) return model.estimator_.covariance_, model.estimator_.precision_, lam_norm_
def test_integration_adaptive_graph_lasso(self, params_in): ''' Just tests inputs/outputs (not validity of result). ''' X = datasets.load_diabetes().data n_examples, n_features = X.shape model = AdaptiveGraphLasso(**params_in) model.fit(X) assert model.estimator_ is not None assert model.lam_ is not None assert np.sum(model.lam_[np.diag_indices(n_features)]) == 0 if params_in['method'] == 'binary': uvals = set(model.lam_.flat) assert len(uvals) == 2 assert 0 in uvals assert 1 in uvals elif params_in['method'] == 'inverse' or\ params_in['method'] == 'inverse_squared': uvals = set(model.lam_.flat[model.lam_.flat != 0]) assert len(uvals) > 0
def get_conn_matrix(time_series, conn_model): import warnings warnings.simplefilter("ignore") from nilearn.connectome import ConnectivityMeasure from sklearn.covariance import GraphLassoCV try: from brainiak.fcma.util import compute_correlation except ImportError: pass if conn_model == 'corr': # credit: nilearn print('\nComputing correlation matrix...\n') conn_measure = ConnectivityMeasure(kind='correlation') conn_matrix = conn_measure.fit_transform([time_series])[0] elif conn_model == 'corr_fast': # credit: brainiak try: print('\nComputing accelerated fcma correlation matrix...\n') conn_matrix = compute_correlation(time_series, time_series) except RuntimeError: print( 'Cannot run accelerated correlation computation due to a missing dependency. You need brainiak installed!' ) elif conn_model == 'partcorr': # credit: nilearn print('\nComputing partial correlation matrix...\n') conn_measure = ConnectivityMeasure(kind='partial correlation') conn_matrix = conn_measure.fit_transform([time_series])[0] elif conn_model == 'tangent': # credit: nilearn print('\nComputing tangent matrix...\n') conn_measure = ConnectivityMeasure(kind='tangent') conn_matrix = conn_measure.fit_transform([time_series])[0] elif conn_model == 'cov' or conn_model == 'sps': ##Fit estimator to matrix to get sparse matrix estimator = GraphLassoCV() try: print('\nComputing covariance...\n') estimator.fit(time_series) except: try: print( 'Unstable Lasso estimation--Attempting to re-run by first applying shrinkage...' ) from sklearn.covariance import GraphLasso, empirical_covariance, shrunk_covariance emp_cov = empirical_covariance(time_series) for i in np.arange(0.8, 0.99, 0.01): shrunk_cov = shrunk_covariance(emp_cov, shrinkage=i) alphaRange = 10.0**np.arange(-8, 0) for alpha in alphaRange: try: estimator_shrunk = GraphLasso(alpha) estimator_shrunk.fit(shrunk_cov) print( "Calculated graph-lasso covariance matrix for alpha=%s" % alpha) break except FloatingPointError: print("Failed at alpha=%s" % alpha) if estimator_shrunk == None: pass else: break except: raise ValueError( 'Unstable Lasso estimation! Shrinkage failed.') if conn_model == 'sps': try: print( '\nFetching precision matrix from covariance estimator...\n' ) conn_matrix = -estimator.precision_ except: print( '\nFetching shrunk precision matrix from covariance estimator...\n' ) conn_matrix = -estimator_shrunk.precision_ elif conn_model == 'cov': try: print( '\nFetching covariance matrix from covariance estimator...\n' ) conn_matrix = estimator.covariance_ except: conn_matrix = estimator_shrunk.covariance_ elif conn_model == 'QuicGraphLasso': from inverse_covariance import QuicGraphLasso # Compute the sparse inverse covariance via QuicGraphLasso # credit: skggm model = QuicGraphLasso(init_method='cov', lam=0.5, mode='default', verbose=1) print('\nCalculating QuicGraphLasso precision matrix using skggm...\n') model.fit(time_series) conn_matrix = -model.precision_ elif conn_model == 'QuicGraphLassoCV': from inverse_covariance import QuicGraphLassoCV # Compute the sparse inverse covariance via QuicGraphLassoCV # credit: skggm model = QuicGraphLassoCV(init_method='cov', verbose=1) print( '\nCalculating QuicGraphLassoCV precision matrix using skggm...\n') model.fit(time_series) conn_matrix = -model.precision_ elif conn_model == 'QuicGraphLassoEBIC': from inverse_covariance import QuicGraphLassoEBIC # Compute the sparse inverse covariance via QuicGraphLassoEBIC # credit: skggm model = QuicGraphLassoEBIC(init_method='cov', verbose=1) print( '\nCalculating QuicGraphLassoEBIC precision matrix using skggm...\n' ) model.fit(time_series) conn_matrix = -model.precision_ elif conn_model == 'AdaptiveQuicGraphLasso': from inverse_covariance import AdaptiveGraphLasso, QuicGraphLassoEBIC # Compute the sparse inverse covariance via # AdaptiveGraphLasso + QuicGraphLassoEBIC + method='binary' # credit: skggm model = AdaptiveGraphLasso( estimator=QuicGraphLassoEBIC(init_method='cov', ), method='binary', ) print( '\nCalculating AdaptiveQuicGraphLasso precision matrix using skggm...\n' ) model.fit(time_series) conn_matrix = -model.estimator_.precision_ return (conn_matrix)
def get_conn_matrix(time_series, conn_model): from nilearn.connectome import ConnectivityMeasure from sklearn.covariance import GraphLassoCV conn_matrix = None if conn_model == 'corr': # credit: nilearn print('\nComputing correlation matrix...\n') conn_measure = ConnectivityMeasure(kind='correlation') conn_matrix = conn_measure.fit_transform([time_series])[0] elif conn_model == 'partcorr': # credit: nilearn print('\nComputing partial correlation matrix...\n') conn_measure = ConnectivityMeasure(kind='partial correlation') conn_matrix = conn_measure.fit_transform([time_series])[0] elif conn_model == 'cov' or conn_model == 'sps': # Fit estimator to matrix to get sparse matrix estimator_shrunk = None estimator = GraphLassoCV() try: print('\nComputing covariance...\n') estimator.fit(time_series) except RuntimeWarning: print('Unstable Lasso estimation--Attempting to re-run by first applying shrinkage...') try: from sklearn.covariance import GraphLasso, empirical_covariance, shrunk_covariance emp_cov = empirical_covariance(time_series) for i in np.arange(0.8, 0.99, 0.01): shrunk_cov = shrunk_covariance(emp_cov, shrinkage=i) alphaRange = 10.0 ** np.arange(-8, 0) for alpha in alphaRange: try: estimator_shrunk = GraphLasso(alpha) estimator_shrunk.fit(shrunk_cov) print("Retrying covariance matrix estimate with alpha=%s" % alpha) if estimator_shrunk is None: pass else: break except RuntimeWarning: print("Covariance estimation failed with shrinkage at alpha=%s" % alpha) continue except ValueError: print('Unstable Lasso estimation! Shrinkage failed. A different connectivity model may be needed.') if estimator is None and estimator_shrunk is None: raise RuntimeError('ERROR: Covariance estimation failed.') if conn_model == 'sps': if estimator_shrunk is None: print('\nFetching precision matrix from covariance estimator...\n') conn_matrix = -estimator.precision_ else: print('\nFetching shrunk precision matrix from covariance estimator...\n') conn_matrix = -estimator_shrunk.precision_ elif conn_model == 'cov': if estimator_shrunk is None: print('\nFetching covariance matrix from covariance estimator...\n') conn_matrix = estimator.covariance_ else: conn_matrix = estimator_shrunk.covariance_ elif conn_model == 'QuicGraphLasso': from inverse_covariance import QuicGraphLasso # Compute the sparse inverse covariance via QuicGraphLasso # credit: skggm model = QuicGraphLasso( init_method='cov', lam=0.5, mode='default', verbose=1) print('\nCalculating QuicGraphLasso precision matrix using skggm...\n') model.fit(time_series) conn_matrix = -model.precision_ elif conn_model == 'QuicGraphLassoCV': from inverse_covariance import QuicGraphLassoCV # Compute the sparse inverse covariance via QuicGraphLassoCV # credit: skggm model = QuicGraphLassoCV( init_method='cov', verbose=1) print('\nCalculating QuicGraphLassoCV precision matrix using skggm...\n') model.fit(time_series) conn_matrix = -model.precision_ elif conn_model == 'QuicGraphLassoEBIC': from inverse_covariance import QuicGraphLassoEBIC # Compute the sparse inverse covariance via QuicGraphLassoEBIC # credit: skggm model = QuicGraphLassoEBIC( init_method='cov', verbose=1) print('\nCalculating QuicGraphLassoEBIC precision matrix using skggm...\n') model.fit(time_series) conn_matrix = -model.precision_ elif conn_model == 'AdaptiveQuicGraphLasso': from inverse_covariance import AdaptiveGraphLasso, QuicGraphLassoEBIC # Compute the sparse inverse covariance via # AdaptiveGraphLasso + QuicGraphLassoEBIC + method='binary' # credit: skggm model = AdaptiveGraphLasso( estimator=QuicGraphLassoEBIC( init_method='cov', ), method='binary', ) print('\nCalculating AdaptiveQuicGraphLasso precision matrix using skggm...\n') model.fit(time_series) conn_matrix = -model.estimator_.precision_ return conn_matrix
def fit(self, X=None, y=None): n_alpha_grid_points = 4 self.results_ = np.zeros((n_alpha_grid_points, self.n_grid_points)) self.grid_ = np.logspace(0, np.log10(200), self.n_grid_points) if self.adj_type=='erdos-renyi': self.alphas_ = np.logspace(-2.3,np.log10(.025), n_alpha_grid_points)[::1] else: self.alphas_ = np.logspace(np.log(.1),np.log10(.3), n_alpha_grid_points)[::1] self.ks_ = [] for aidx, alpha in enumerate(self.alphas_): if self.verbose: print ('at alpha {} ({}/{})'.format( alpha, aidx, n_alpha_grid_points, )) # draw a new fixed graph for alpha cov, prec, adj = new_graph(self.n_features, alpha, adj_type=self.adj_type,random_sign=False,seed=1) n_nonzero_prec = np.count_nonzero(np.triu(adj,1).flat) self.ks_.append(n_nonzero_prec) mcmc_prng = np.random.RandomState(2) if self.verbose: print (' Graph has {} nonzero entries'.format(n_nonzero_prec)) for sidx, sample_grid in enumerate(self.grid_): n_samples = int(sample_grid * self.n_features) # Debugging # print alpha, n_samples # model selection (once) X = mvn(n_samples, self.n_features, cov,random_state=mcmc_prng) ms_estimator = clone(self.model_selection_estimator) ms_estimator.fit(X) lam = getattr(ms_estimator, self.penalty_) if self.verbose: display_lam = lam if isinstance(lam, np.ndarray): display_lam = np.linalg.norm(lam) print (' ({}/{}), n_samples = {}, selected lambda = {}'.format( sidx, self.n_grid_points, n_samples, display_lam)) # setup default trial estimator if self.trial_estimator is None: trial_estimator = QuicGraphLasso(lam=lam, mode='default', init_method='corrcoef') elif self.trial_estimator == 'Adaptive': trial_estimator = AdaptiveGraphLasso(estimator = QuicGraphLasso(lam=lam,mode='default',init_method='corrcoef'), method='inverse_squared') else: trial_estimator = self.trial_estimator # patch trial estimator with this lambda if self.trial_estimator == 'Adaptive': trial_estimator.estimator_.set_params(**{ self.penalty: lam, }) else: trial_estimator.set_params(**{ self.penalty: lam, }) # estimate statistical power exact_support_counts = Parallel( n_jobs=self.n_jobs, verbose=False, backend='threading', #max_nbytes=None, #batch_size=1, )( delayed(sp_trial)( trial_estimator, n_samples, self.n_features, cov, adj, mcmc_prng ) for nn in range(self.n_trials)) self.results_[aidx, sidx] = 1. * np.sum(exact_support_counts) / self.n_trials if self.verbose: print ('Results at this row: {}'.format(self.results_[aidx, :])) self.is_fitted = True return self
def compare_init_adaptive(X,n_samples,n_features,covariance,precision,adjacency,figures): f, (ax1, ax2, ax3) = figures # Initial Estimator initial_estimator = QuicGraphLassoCV(init_method='corrcoef') initial_estimator.fit(X) prec_hat = initial_estimator.precision_ # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) prec_vmax = np.max(np.triu(prec_hat,1)) sns.heatmap(initial_estimator.precision_,cmap=cmap, vmax=prec_vmax, square=True, xticklabels=5, yticklabels=5, linewidths=.5, cbar_kws={"shrink": .5},ax=ax1) ax1.set_title('Precision Matrix, Initial Estimator') # Check Average Power err_frob, err_supp, err_fp, err_fn, err_inf = ae_trial( trial_estimator=initial_estimator, n_samples=n_samples, n_features=n_features, cov=covariance, adj=adjacency, random_state=np.random.RandomState(2),X=X) print ('Difference in sparsity: {},{}'.format( np.sum(np.not_equal(precision,0)), np.sum(np.not_equal(initial_estimator.precision_,0)) )) print ('Frob Norm: {} ({}), Support Error: {}, False Pos: {}, False Neg: {}'.format( err_frob, err_inf, err_supp, err_fp, err_fn ) ) # Adaptive Estimator twostage = AdaptiveGraphLasso(estimator=initial_estimator,method='inverse') twostage.fit(X) weighted_estimator = twostage.estimator_ prec_hat = weighted_estimator.precision_ prec_vmax = np.max(np.triu(prec_hat,1)) sns.heatmap(weighted_estimator.precision_,cmap=cmap, vmax=prec_vmax, square=True, xticklabels=5, yticklabels=5, linewidths=.5, cbar_kws={"shrink": .5},ax=ax2) ax2.set_title('Precision Matrix, Adaptive Estimator') print ('Difference in sparsity: {},{}'.format( np.sum(np.not_equal(precision,0)), np.sum(np.not_equal(weighted_estimator.precision_,0)) )) # Check Average Power err_frob, err_supp, err_fp, err_fn, err_inf = ae_trial( trial_estimator=weighted_estimator, n_samples=n_samples, n_features=n_features, cov=covariance, adj=adjacency, random_state=np.random.RandomState(2), X = X) print ('Frob Norm: {} ({}), Support Error: {}, False Pos: {}, False Neg: {}'.format( err_frob,err_inf, err_supp, err_fp, err_fn )) print() prec_vmax = np.max(np.triu(precision,1)) sns.heatmap(adjacency,cmap=cmap, vmax=prec_vmax, square=True, xticklabels=5, yticklabels=5, linewidths=.5, cbar_kws={"shrink": .5},ax=ax3) ax3.set_title('True Precision')
elif estimator_type == 'QuicGraphLassoCV': # Compute the sparse inverse covariance via QuicGraphLassoCV estimator = QuicGraphLassoCV(init_method='cov', verbose=1) estimator.fit(timeseries) elif estimator_type == 'QuicGraphLassoEBIC': # Compute the sparse inverse covariance via QuicGraphLassoEBIC estimator = QuicGraphLassoEBIC(init_method='cov', verbose=1) estimator.fit(timeseries) elif estimator_type == 'AdaptiveQuicGraphLasso': # Compute the sparse inverse covariance via # AdaptiveGraphLasso + QuicGraphLassoEBIC + method='binary' model = AdaptiveGraphLasso( estimator=QuicGraphLassoEBIC(init_method='cov', ), method='binary', ) model.fit(timeseries) estimator = model.estimator_ # Display the sparse inverse covariance plt.figure(figsize=(7.5, 7.5)) plt.imshow(np.triu(-estimator.precision_, 1), interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title('Precision (Sparse Inverse Covariance) matrix') plt.colorbar() # And now display the corresponding graph plotting.plot_connectome( -estimator.precision_,