예제 #1
0
def show_quic_coefficient_trace(X):
    path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
    estimator = QuicGraphLasso(
            lam=1.0,
            path=path,
            mode='path')
    estimator.fit(X)
    trace_plot(estimator.precision_, estimator.path_)
예제 #2
0
    def test_integration_quic_graph_lasso(self, params_in, expected):
        '''
        Just tests inputs/outputs (not validity of result).
        '''
        X = datasets.load_diabetes().data
        ic = QuicGraphLasso(**params_in)
        ic.fit(X)

        result_vec = [
            np.linalg.norm(ic.covariance_),
            np.linalg.norm(ic.precision_),
            np.linalg.norm(ic.opt_),
            np.linalg.norm(ic.duality_gap_),
        ]
        print result_vec
        assert_allclose(expected, result_vec, rtol=1e-1)
예제 #3
0
 def test_invalid_method(self):
     '''
     Test behavior of invalid inputs.
     '''
     X = datasets.load_diabetes().data
     ic = QuicGraphLasso(method='unknownmethod')
     assert_raises(NotImplementedError, ic.fit, X)
예제 #4
0
def quic_graph_lasso(X, num_folds, metric):
    '''Run QuicGraphLasso with mode='default' and use standard scikit  
    GridSearchCV to find the best lambda.  

    Primarily demonstrates compatibility with existing scikit tooling. 
    '''
    print 'QuicGraphLasso + GridSearchCV with:'
    print '   metric: {}'.format(metric)
    search_grid = {
        'lam': np.logspace(np.log10(0.01),
                           np.log10(1.0),
                           num=100,
                           endpoint=True),
        'init_method': ['cov'],
        'score_metric': [metric],
    }
    model = GridSearchCV(QuicGraphLasso(),
                         search_grid,
                         cv=num_folds,
                         refit=True)
    model.fit(X)
    bmodel = model.best_estimator_
    print '   len(cv_lams): {}'.format(len(search_grid['lam']))
    print '   cv-lam: {}'.format(model.best_params_['lam'])
    print '   lam_scale_: {}'.format(bmodel.lam_scale_)
    print '   lam_: {}'.format(bmodel.lam_)
    return bmodel.covariance_, bmodel.precision_, bmodel.lam_
예제 #5
0
def quic_graph_lasso_ebic_manual(X, gamma=0):
    '''Run QuicGraphLasso with mode='path' and gamma; use EBIC criteria for model 
    selection.  

    The EBIC criteria is built into InverseCovarianceEstimator base class 
    so we demonstrate those utilities here.  
    '''
    print 'QuicGraphLasso (manual EBIC) with:'
    print '   mode: path'
    print '   gamma: {}'.format(gamma)
    model = QuicGraphLasso(lam=1.0,
                           mode='path',
                           init_method='cov',
                           path=np.logspace(np.log10(0.01),
                                            np.log10(1.0),
                                            num=100,
                                            endpoint=True))
    model.fit(X)
    ebic_index = model.ebic_select(gamma=gamma)
    covariance_ = model.covariance_[ebic_index]
    precision_ = model.precision_[ebic_index]
    lam_ = model.lam_at_index(ebic_index)
    print '   len(path lams): {}'.format(len(model.path_))
    print '   lam_scale_: {}'.format(model.lam_scale_)
    print '   lam_: {}'.format(lam_)
    print '   ebic_index: {}'.format(ebic_index)
    return covariance_, precision_, lam_
예제 #6
0
def get_conn_matrix(time_series, conn_model):
    from nilearn.connectome import ConnectivityMeasure
    from sklearn.covariance import GraphLassoCV

    conn_matrix = None
    if conn_model == 'corr':
        # credit: nilearn
        print('\nComputing correlation matrix...\n')
        conn_measure = ConnectivityMeasure(kind='correlation')
        conn_matrix = conn_measure.fit_transform([time_series])[0]
    elif conn_model == 'partcorr':
        # credit: nilearn
        print('\nComputing partial correlation matrix...\n')
        conn_measure = ConnectivityMeasure(kind='partial correlation')
        conn_matrix = conn_measure.fit_transform([time_series])[0]
    elif conn_model == 'cov' or conn_model == 'sps':
        # Fit estimator to matrix to get sparse matrix
        estimator_shrunk = None
        estimator = GraphLassoCV()
        try:
            print('\nComputing covariance...\n')
            estimator.fit(time_series)
        except RuntimeWarning:
            print('Unstable Lasso estimation--Attempting to re-run by first applying shrinkage...')
            try:
                from sklearn.covariance import GraphLasso, empirical_covariance, shrunk_covariance
                emp_cov = empirical_covariance(time_series)
                for i in np.arange(0.8, 0.99, 0.01):
                    shrunk_cov = shrunk_covariance(emp_cov, shrinkage=i)
                    alphaRange = 10.0 ** np.arange(-8, 0)
                    for alpha in alphaRange:
                        try:
                            estimator_shrunk = GraphLasso(alpha)
                            estimator_shrunk.fit(shrunk_cov)
                            print("Retrying covariance matrix estimate with alpha=%s" % alpha)
                            if estimator_shrunk is None:
                                pass
                            else:
                                break
                        except RuntimeWarning:
                            print("Covariance estimation failed with shrinkage at alpha=%s" % alpha)
                            continue
            except ValueError:
                print('Unstable Lasso estimation! Shrinkage failed. A different connectivity model may be needed.')
        if estimator is None and estimator_shrunk is None:
            raise RuntimeError('ERROR: Covariance estimation failed.')
        if conn_model == 'sps':
            if estimator_shrunk is None:
                print('\nFetching precision matrix from covariance estimator...\n')
                conn_matrix = -estimator.precision_
            else:
                print('\nFetching shrunk precision matrix from covariance estimator...\n')
                conn_matrix = -estimator_shrunk.precision_
        elif conn_model == 'cov':
            if estimator_shrunk is None:
                print('\nFetching covariance matrix from covariance estimator...\n')
                conn_matrix = estimator.covariance_
            else:
                conn_matrix = estimator_shrunk.covariance_
    elif conn_model == 'QuicGraphLasso':
        from inverse_covariance import QuicGraphLasso
        # Compute the sparse inverse covariance via QuicGraphLasso
        # credit: skggm
        model = QuicGraphLasso(
            init_method='cov',
            lam=0.5,
            mode='default',
            verbose=1)
        print('\nCalculating QuicGraphLasso precision matrix using skggm...\n')
        model.fit(time_series)
        conn_matrix = -model.precision_
    elif conn_model == 'QuicGraphLassoCV':
        from inverse_covariance import QuicGraphLassoCV
        # Compute the sparse inverse covariance via QuicGraphLassoCV
        # credit: skggm
        model = QuicGraphLassoCV(
            init_method='cov',
            verbose=1)
        print('\nCalculating QuicGraphLassoCV precision matrix using skggm...\n')
        model.fit(time_series)
        conn_matrix = -model.precision_
    elif conn_model == 'QuicGraphLassoEBIC':
        from inverse_covariance import QuicGraphLassoEBIC
        # Compute the sparse inverse covariance via QuicGraphLassoEBIC
        # credit: skggm
        model = QuicGraphLassoEBIC(
            init_method='cov',
            verbose=1)
        print('\nCalculating QuicGraphLassoEBIC precision matrix using skggm...\n')
        model.fit(time_series)
        conn_matrix = -model.precision_
    elif conn_model == 'AdaptiveQuicGraphLasso':
        from inverse_covariance import AdaptiveGraphLasso, QuicGraphLassoEBIC
        # Compute the sparse inverse covariance via
        # AdaptiveGraphLasso + QuicGraphLassoEBIC + method='binary'
        # credit: skggm
        model = AdaptiveGraphLasso(
                estimator=QuicGraphLassoEBIC(
                    init_method='cov',
                ),
                method='binary',
            )
        print('\nCalculating AdaptiveQuicGraphLasso precision matrix using skggm...\n')
        model.fit(time_series)
        conn_matrix = -model.estimator_.precision_

    return conn_matrix
예제 #7
0
def get_conn_matrix(time_series, conn_model):
    import warnings
    warnings.simplefilter("ignore")
    from nilearn.connectome import ConnectivityMeasure
    from sklearn.covariance import GraphLassoCV
    try:
        from brainiak.fcma.util import compute_correlation
    except ImportError:
        pass

    if conn_model == 'corr':
        # credit: nilearn
        print('\nComputing correlation matrix...\n')
        conn_measure = ConnectivityMeasure(kind='correlation')
        conn_matrix = conn_measure.fit_transform([time_series])[0]
    elif conn_model == 'corr_fast':
        # credit: brainiak
        try:
            print('\nComputing accelerated fcma correlation matrix...\n')
            conn_matrix = compute_correlation(time_series, time_series)
        except RuntimeError:
            print(
                'Cannot run accelerated correlation computation due to a missing dependency. You need brainiak installed!'
            )
    elif conn_model == 'partcorr':
        # credit: nilearn
        print('\nComputing partial correlation matrix...\n')
        conn_measure = ConnectivityMeasure(kind='partial correlation')
        conn_matrix = conn_measure.fit_transform([time_series])[0]
    elif conn_model == 'tangent':
        # credit: nilearn
        print('\nComputing tangent matrix...\n')
        conn_measure = ConnectivityMeasure(kind='tangent')
        conn_matrix = conn_measure.fit_transform([time_series])[0]
    elif conn_model == 'cov' or conn_model == 'sps':
        ##Fit estimator to matrix to get sparse matrix
        estimator = GraphLassoCV()
        try:
            print('\nComputing covariance...\n')
            estimator.fit(time_series)
        except:
            try:
                print(
                    'Unstable Lasso estimation--Attempting to re-run by first applying shrinkage...'
                )
                from sklearn.covariance import GraphLasso, empirical_covariance, shrunk_covariance
                emp_cov = empirical_covariance(time_series)
                for i in np.arange(0.8, 0.99, 0.01):
                    shrunk_cov = shrunk_covariance(emp_cov, shrinkage=i)
                    alphaRange = 10.0**np.arange(-8, 0)
                    for alpha in alphaRange:
                        try:
                            estimator_shrunk = GraphLasso(alpha)
                            estimator_shrunk.fit(shrunk_cov)
                            print(
                                "Calculated graph-lasso covariance matrix for alpha=%s"
                                % alpha)
                            break
                        except FloatingPointError:
                            print("Failed at alpha=%s" % alpha)
                    if estimator_shrunk == None:
                        pass
                    else:
                        break
            except:
                raise ValueError(
                    'Unstable Lasso estimation! Shrinkage failed.')

        if conn_model == 'sps':
            try:
                print(
                    '\nFetching precision matrix from covariance estimator...\n'
                )
                conn_matrix = -estimator.precision_
            except:
                print(
                    '\nFetching shrunk precision matrix from covariance estimator...\n'
                )
                conn_matrix = -estimator_shrunk.precision_
        elif conn_model == 'cov':
            try:
                print(
                    '\nFetching covariance matrix from covariance estimator...\n'
                )
                conn_matrix = estimator.covariance_
            except:
                conn_matrix = estimator_shrunk.covariance_
    elif conn_model == 'QuicGraphLasso':
        from inverse_covariance import QuicGraphLasso
        # Compute the sparse inverse covariance via QuicGraphLasso
        # credit: skggm
        model = QuicGraphLasso(init_method='cov',
                               lam=0.5,
                               mode='default',
                               verbose=1)
        print('\nCalculating QuicGraphLasso precision matrix using skggm...\n')
        model.fit(time_series)
        conn_matrix = -model.precision_

    elif conn_model == 'QuicGraphLassoCV':
        from inverse_covariance import QuicGraphLassoCV
        # Compute the sparse inverse covariance via QuicGraphLassoCV
        # credit: skggm
        model = QuicGraphLassoCV(init_method='cov', verbose=1)
        print(
            '\nCalculating QuicGraphLassoCV precision matrix using skggm...\n')
        model.fit(time_series)
        conn_matrix = -model.precision_

    elif conn_model == 'QuicGraphLassoEBIC':
        from inverse_covariance import QuicGraphLassoEBIC
        # Compute the sparse inverse covariance via QuicGraphLassoEBIC
        # credit: skggm
        model = QuicGraphLassoEBIC(init_method='cov', verbose=1)
        print(
            '\nCalculating QuicGraphLassoEBIC precision matrix using skggm...\n'
        )
        model.fit(time_series)
        conn_matrix = -model.precision_

    elif conn_model == 'AdaptiveQuicGraphLasso':
        from inverse_covariance import AdaptiveGraphLasso, QuicGraphLassoEBIC
        # Compute the sparse inverse covariance via
        # AdaptiveGraphLasso + QuicGraphLassoEBIC + method='binary'
        # credit: skggm
        model = AdaptiveGraphLasso(
            estimator=QuicGraphLassoEBIC(init_method='cov', ),
            method='binary',
        )
        print(
            '\nCalculating AdaptiveQuicGraphLasso precision matrix using skggm...\n'
        )
        model.fit(time_series)
        conn_matrix = -model.estimator_.precision_

    return (conn_matrix)
예제 #8
0
class TestModelAverage(object):
    @pytest.mark.parametrize("params_in", [
        ({
            'estimator': QuicGraphLasso(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.3,
            'penalization': 'random',
        }),
        ({
            'estimator': QuicGraphLasso(lam=0.5, mode='trace'),
            'n_trials': 10,
            'normalize': False,
            'subsample': 0.6,
            'penalization': 'fully-random',
        }),
        ({
            'estimator': QuicGraphLassoCV(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.8,
            'lam': 0.1,
            'lam_perturb': 0.1,
            'penalization': 'random',
        }),
        ({
            'estimator': GraphLassoCV(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.8,
            'penalization': 'subsampling',
            'penalty_name': 'alpha',
        }),
        ({
            'estimator': QuicGraphLasso(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.3,
            'lam': 0.1,
            'lam_perturb': 0.1,
            'penalization': 'random',
            'n_jobs': 2,
        }),
    ])
    def test_integration_quic_graph_lasso_cv(self, params_in):
        '''
        Just tests inputs/outputs (not validity of result).
        '''
        n_features = 10
        n_samples = 10
        cov, prec, adj = ClusterGraph(
            n_blocks=1,
            chain_blocks=False,
            seed=1,
        ).create(n_features, 0.8)
        prng = np.random.RandomState(2)
        X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)

        ma = ModelAverage(**params_in)
        ma.fit(X)

        n_examples, n_features = X.shape

        assert ma.proportion_.shape == (n_features, n_features)
        assert len(ma.estimators_) == ma.n_trials
        assert len(ma.subsets_) == ma.n_trials
        if not ma.penalization == 'subsampling':
            assert len(ma.lams_) == ma.n_trials
        else:
            assert len(ma.lams_) == ma.n_trials
            assert ma.lams_[0] is None

        for eidx, e in enumerate(ma.estimators_):
            assert isinstance(e, params_in['estimator'].__class__)
            
            # sklearn doesnt have this but ours do
            if hasattr(e, 'is_fitted'):
                assert e.is_fitted == True

            # check that all lambdas used where different
            if not ma.penalization == 'subsampling' and eidx > 0:
                if hasattr(e, 'lam'):
                    prev_e = ma.estimators_[eidx - 1]
                    assert np.linalg.norm((prev_e.lam - e.lam).flat) > 0

        if ma.normalize == True:
            assert np.max(ma.proportion_) <= 1.0
        else:        
            assert np.max(ma.proportion_) <= ma.n_trials
                
        assert np.min(ma.proportion_) >= 0.0
        assert np.max(ma.proportion_) > 0.0
예제 #9
0
    def fit(self, X=None, y=None):
        n_alpha_grid_points = 4

        self.error_fro_ = np.zeros((n_alpha_grid_points, self.n_grid_points))
        self.error_supp_ = np.zeros((n_alpha_grid_points, self.n_grid_points))
        self.error_fp_ = np.zeros((n_alpha_grid_points, self.n_grid_points))
        self.error_fn_ = np.zeros((n_alpha_grid_points, self.n_grid_points))

        self.grid_ = np.linspace(5, 200, self.n_grid_points)
        #self.grid_ = np.logspace(np.log10(2), np.log10(200), self.n_grid_points)
        if self.adj_type=='erdos-renyi':
            self.alphas_ = np.logspace(-2.3,np.log10(.025), n_alpha_grid_points)[::1]
            #self.alphas_ = np.linspace(0.95, 0.99, n_alpha_grid_points)[::-1]
        else:
            self.alphas_ = np.logspace(np.log(.15),np.log10(.4), n_alpha_grid_points)[::1]
        self.ks_ = []

        for aidx, alpha in enumerate(self.alphas_):
            if self.verbose:
                print ('at alpha {} ({}/{})'.format(
                    alpha,
                    aidx,
                    n_alpha_grid_points,
                ))

            # draw a new fixed graph for alpha
            cov, prec, adj = new_graph(self.n_features, alpha, adj_type=self.adj_type,random_sign=False,seed=1)    
            n_nonzero_prec = np.count_nonzero(np.triu(adj,1).flat)
            self.ks_.append(n_nonzero_prec)
            mcmc_prng = np.random.RandomState(2)    
            # cov, prec = _new_graph(self.n_features, alpha)
            # n_nonzero_prec = np.count_nonzero(prec.flat)
            # self.ks_.append(n_nonzero_prec)
            
            if self.verbose:
                print ('   Graph has {} nonzero entries'.format(n_nonzero_prec))

            for sidx, sample_grid in enumerate(self.grid_):
                n_samples = int(sample_grid * self.n_features)
                # Debugging
                # print alpha, n_samples
                
                # model selection (once)
                X = mvn(n_samples, self.n_features, cov,random_state=mcmc_prng)
                ms_estimator = clone(self.model_selection_estimator)
                ms_estimator.fit(X)
                lam = getattr(ms_estimator, self.penalty_)
                
                if self.verbose:
                    display_lam = lam
                    if isinstance(lam, np.ndarray):
                        display_lam = np.linalg.norm(lam)
                    print ('   ({}/{}), n_samples = {}, selected lambda = {}'.format(
                            sidx,
                            self.n_grid_points,
                            n_samples,
                            display_lam))

                # setup default trial estimator
                trial_estimator = QuicGraphLasso(lam=lam,
                                                 mode='default',
                                                 init_method='corrcoef')

                # estimate statistical power
                errors = Parallel(
                    n_jobs=self.n_jobs,
                    verbose=False,
                    backend='threading',
                    #max_nbytes=None,
                    #batch_size=1,
                )(
                    delayed(ae_trial)(
                        trial_estimator, n_samples, self.n_features, cov, adj, random_state=mcmc_prng
                    )
                    for nn in range(self.n_trials))

                error_fro, error_supp, error_fp, error_fn, _ = zip(*errors)
                self.error_fro_[aidx, sidx] = np.mean(error_fro)
                self.error_supp_[aidx, sidx] = np.mean(error_supp)
                self.error_fp_[aidx, sidx] = np.mean(error_fp)
                self.error_fn_[aidx, sidx] = np.mean(error_fn)

            if self.verbose:
                print ('Results at this row:')
                print ('   fro = {}'.format(self.error_fro_[aidx, :]))
                print ('   supp = {}'.format(self.error_supp_[aidx, :]))
                print ('   fp = {}'.format(self.error_fp_[aidx, :]))
                print ('   fn = {}'.format(self.error_fn_[aidx, :]))

        self.is_fitted = True
        return self
예제 #10
0
    def fit(self, X=None, y=None):
        n_alpha_grid_points = 4

        self.results_ = np.zeros((n_alpha_grid_points, self.n_grid_points))
        self.grid_ = np.logspace(0, np.log10(200), self.n_grid_points)
        if self.adj_type=='erdos-renyi':
            self.alphas_ = np.logspace(-2.3,np.log10(.025), n_alpha_grid_points)[::1]
        else:
            self.alphas_ = np.logspace(np.log(.1),np.log10(.3), n_alpha_grid_points)[::1]

        self.ks_ = []

        for aidx, alpha in enumerate(self.alphas_):
            if self.verbose:
                print ('at alpha {} ({}/{})'.format(
                    alpha,
                    aidx,
                    n_alpha_grid_points,
                ))
            
            # draw a new fixed graph for alpha
            cov, prec, adj = new_graph(self.n_features, alpha, adj_type=self.adj_type,random_sign=False,seed=1)    
            n_nonzero_prec = np.count_nonzero(np.triu(adj,1).flat)
            self.ks_.append(n_nonzero_prec)
            mcmc_prng = np.random.RandomState(2)
            if self.verbose:
                print ('   Graph has {} nonzero entries'.format(n_nonzero_prec))

            for sidx, sample_grid in enumerate(self.grid_):
                n_samples = int(sample_grid * self.n_features)
                # Debugging
                # print alpha, n_samples
                
                # model selection (once)
                X = mvn(n_samples, self.n_features, cov,random_state=mcmc_prng)
                ms_estimator = clone(self.model_selection_estimator)
                ms_estimator.fit(X)                
                lam = getattr(ms_estimator, self.penalty_)
                
                if self.verbose:
                    display_lam = lam
                    if isinstance(lam, np.ndarray):
                        display_lam = np.linalg.norm(lam)
                    print ('   ({}/{}), n_samples = {}, selected lambda = {}'.format(
                            sidx,
                            self.n_grid_points,
                            n_samples,
                            display_lam))

                # setup default trial estimator
                if self.trial_estimator is None:
                    trial_estimator = QuicGraphLasso(lam=lam,
                                                     mode='default',
                                                     init_method='corrcoef')
                elif self.trial_estimator == 'Adaptive':
                    trial_estimator = AdaptiveGraphLasso(estimator = QuicGraphLasso(lam=lam,mode='default',init_method='corrcoef'), 
                                                         method='inverse_squared')
                else:
                    trial_estimator = self.trial_estimator

                # patch trial estimator with this lambda
                if self.trial_estimator == 'Adaptive':
                    trial_estimator.estimator_.set_params(**{
                        self.penalty: lam, 
                    })
                else:
                    trial_estimator.set_params(**{
                        self.penalty: lam, 
                    })
                    

                # estimate statistical power
                exact_support_counts = Parallel(
                    n_jobs=self.n_jobs,
                    verbose=False,
                    backend='threading',
                    #max_nbytes=None,
                    #batch_size=1,
                )(
                    delayed(sp_trial)(
                        trial_estimator, n_samples, self.n_features, cov, adj, mcmc_prng
                    )
                    for nn in range(self.n_trials))

                self.results_[aidx, sidx] = 1. * np.sum(exact_support_counts) / self.n_trials

            if self.verbose:
                print ('Results at this row: {}'.format(self.results_[aidx, :]))

        self.is_fitted = True
        return self
예제 #11
0
class TestModelAverage(object):
    @pytest.mark.parametrize("params_in", [
        ({
            'estimator': QuicGraphLasso(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.3,
            'penalization': 'random',
        }),
        ({
            'estimator': QuicGraphLasso(lam=0.5, mode='trace'),
            'n_trials': 15,
            'normalize': False,
            'subsample': 0.6,
            'penalization': 'fully-random',
        }),
        ({
            'estimator': QuicGraphLassoCV(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.3,
            'lam': 0.1,
            'lam_perturb': 0.1,
            'penalization': 'random',
            'use_cache': True,
        }),
        ({
            'estimator': GraphLassoCV(),
            'n_trials': 10,
            'normalize': True,
            'subsample': 0.3,
            'penalization': 'subsampling',
            'use_cache': True,
            'penalty_name': 'alpha',
        }),
    ])
    def test_integration_quic_graph_lasso_cv(self, params_in):
        '''
        Just tests inputs/outputs (not validity of result).
        '''
        X = datasets.load_diabetes().data
        ma = ModelAverage(**params_in)
        ma.fit(X)

        n_examples, n_features = X.shape

        assert ma.proportion_.shape == (n_features, n_features)
        if ma.use_cache:
            assert len(ma.estimators_) == ma.n_trials
            assert len(ma.subsets_) == ma.n_trials
            if not ma.penalization == 'subsampling':
                assert len(ma.lams_) == ma.n_trials
            else:
                assert len(ma.lams_) == 0
        else:
            assert len(ma.estimators_) == 0
            assert len(ma.lams_) == 0
            assert len(ma.subsets_) == 0

        for eidx, e in enumerate(ma.estimators_):
            assert isinstance(e, params_in['estimator'].__class__)

            # sklearn doesnt have this but ours do
            if hasattr(e, 'is_fitted'):
                assert e.is_fitted == True

            # check that all lambdas used where different
            if not ma.penalization == 'subsampling' and eidx > 0:
                if hasattr(e, 'lam'):
                    prev_e = ma.estimators_[eidx - 1]
                    assert np.linalg.norm((prev_e.lam - e.lam).flat) > 0

        if ma.normalize == True:
            assert np.max(ma.proportion_) <= 1.0
        else:
            assert np.max(ma.proportion_) <= ma.n_trials

        assert np.min(ma.proportion_) >= 0.0
        assert np.max(ma.proportion_) > 0.0
                                       detrend=True,
                                       low_pass=0.1,
                                       high_pass=0.01,
                                       t_r=2.5)

timeseries = masker.fit_transform(abide.func[0])

###############################################################################
# Extract and plot sparse inverse covariance

estimator_type = 'QuicGraphLasso'

if estimator_type == 'QuicGraphLasso':
    # Compute the sparse inverse covariance via QuicGraphLasso
    estimator = QuicGraphLasso(init_method='cov',
                               lam=0.5,
                               mode='default',
                               verbose=1)

elif estimator_type == 'QuicGraphLassoCV':
    # Compute the sparse inverse covariance via QuicGraphLassoCV
    estimator = QuicGraphLassoCV(init_method='cov', verbose=1)
    estimator.fit(timeseries)

elif estimator_type == 'QuicGraphLassoEBIC':
    # Compute the sparse inverse covariance via QuicGraphLassoEBIC
    estimator = QuicGraphLassoEBIC(init_method='cov', verbose=1)
    estimator.fit(timeseries)

elif estimator_type == 'AdaptiveQuicGraphLasso':
    # Compute the sparse inverse covariance via
    # AdaptiveGraphLasso + QuicGraphLassoEBIC + method='binary'