Exemple #1
0
def analysis(conf, name):
    #print(conf._default_options)
    kwargs = conf._get_kwargs()
    #print(kwargs)
    a = AnalysisPipeline(conf, name=name).fit(**kwargs)
    a.save(path="/media/guidotr1/Seagate_Pt1/data/simulations/")
    return
def analysis(conf, name):
    #print(conf._default_options)
    kwargs = conf._get_kwargs()
    #print(kwargs)
    a = AnalysisPipeline(conf, name=name).fit(**kwargs)
    a.save(path="/media/robbis/DATA/fmri/c2b/")
    return
Exemple #3
0
    'estimator__clf__kernel': 'linear',
    'cv': StratifiedKFold,
    'cv__n_splits': 5,
    'scores': ['accuracy'],
    'analysis': SearchLight,
    'analysis__n_jobs': 15,
    'cv_attr': 'subject'
}

iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator(**_default_config))
#conf = iterator.next()

for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="permut1_balancer_7").fit(ds, **kwargs)
    a.save()

#############################################################
# Haxby dataset #

conf_file = "/media/robbis/DATA/fmri/ds105/ds105.conf"

loader = DataLoader(
    configuration_file=conf_file,
    task='objectviewing',
    loader='bids',
    bids_derivatives='fmriprep',
)

ds = loader.fetch(prepro=StandardPreprocessingPipeline())
Exemple #4
0
    'scores': ['accuracy'],
    'analysis': TemporalDecoding,
    'analysis__n_jobs': 8,
    'analysis__permutation': 0,
    'analysis__verbose': 0,
    'kwargs__roi': ['matrix_values'],
    #'kwargs__cv_attr':'subjects',
}

estimators = []
iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator,
                            config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="prova").fit(ds, **kwargs)
    a.save(save_estimator=True)
    est = a._estimator.scores['mask-matrix_values_value-1.0'][0]['estimator']
    estimators.append(est)
    del a

#########################
# Prediction timecourse #

# Load second session
ds_session = SampleSlicer(targets=['LH', 'RH', 'LF', 'RF']).transform(ds)

X = ds_session.samples
y = ds_session.targets

colormap = {
Exemple #5
0
def run(n_jobs):
    path = "/media/robbis/Seagate_Pt1/data/working_memory/"

    conf_file = "%s/data/working_memory.conf" % (path)

    ### Load datasets ###

    iterator_kwargs = {
        "loader__img_pattern": [
            #'power_parcel.mat',
            'power_normalized.mat',
            #'connectivity_matrix.mat'
            'mpsi_normalized.mat'
        ],
        "fetch__prepro": [['none'], ['none']],
        "loader__task": ["POWER", "CONN"]
    }

    config_kwargs = {
        'loader': DataLoader,
        'loader__configuration_file': conf_file,
        'loader__loader': 'mat',
        'loader__task': 'POWER',
        #'fetch__n_subjects': 57,
        "loader__data_path": "%s/data/" % (path),
        "loader__subjects": "%s/data/participants.csv" % (path),
    }

    iterator = AnalysisIterator(iterator_kwargs,
                                AnalysisConfigurator,
                                config_kwargs=config_kwargs,
                                kind='list')

    ds_list = [generate(configurator) for configurator in iterator]

    for i, ds in enumerate(ds_list):
        ds_ = ds.copy()
        if i == 0:
            k = np.arange(1, 88, 10)
            ds_ = DatasetFxNormalizer(ds_fx=np.mean).transform(ds_)
        else:
            k = np.arange(1, 400, 50)
            #ds_ = DatasetFxNormalizer(ds_fx=np.mean).transform(ds_)

        _default_options = {
            #'sample_slicer__targets' : [['0back', '2back'], ['0back', 'rest'], ['rest', '2back']],
            #'kwargs__ds': ds_list,
            'sample_slicer__targets': [['0back'], ['2back']],
            'target_transformer__attr': [
                'accuracy_0back_both', 'accuracy_2back_both', 'rt_0back_both',
                'rt_2back_both'
            ],
            'sample_slicer__band': [['alpha'], ['beta'], ['theta'], ['gamma']],
            'estimator__fsel__k':
            k,
            'clf__C': [1, 10, 100],
            'clf__kernel': ['linear', 'rbf']
        }

        _default_config = {
            'prepro': ['sample_slicer', 'target_transformer'],
            'sample_slicer__band': ['gamma'],
            'sample_slicer__targets': ['0back', '2back'],
            'estimator': [('fsel', SelectKBest(score_func=f_regression, k=5)),
                          ('clf', SVR(C=10, kernel='linear'))],
            'estimator__clf__C':
            1,
            'estimator__clf__kernel':
            'linear',
            'cv':
            GroupShuffleSplit,
            'cv__n_splits':
            75,
            'cv__test_size':
            0.25,
            'analysis_scoring': ['r2', 'neg_mean_squared_error'],
            'analysis':
            RoiRegression,
            'analysis__n_jobs':
            n_jobs,
            'analysis__permutation':
            0,
            'analysis__verbose':
            0,
            'kwargs__roi': ['matrix_values'],
            'kwargs__cv_attr':
            'subjects',
        }

        iterator = AnalysisIterator(_default_options,
                                    AnalysisConfigurator,
                                    config_kwargs=_default_config)

        for conf in iterator:
            kwargs = conf._get_kwargs()
            a = AnalysisPipeline(conf,
                                 name="triton+behavioural").fit(ds_, **kwargs)
            a.save()
            del a
Exemple #6
0
    'cv_attr': 'subject',
    'roi': ['lateral_ips'],
    'kwargs__prepro': ['featurenorm', 'samplenorm'],
    'analysis__n_jobs': 2
}

_default_options = {
    'sampleslicer__evidence': [[1], [2], [3]],
    'cv__n_splits': [3, 5],
}

iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator(**_default_config))
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="permut1_wm").fit(ds, **kwargs)
    a.save()

##################################################
# Review
from pyitab.preprocessing.pipelines import PreprocessingPipeline
from pyitab.analysis.iterator import AnalysisIterator
from pyitab.analysis.configurator import AnalysisConfigurator

loader = DataLoader(
    configuration_file="/home/carlos/fmri/carlo_ofp/ofp_new.conf",
    task='OFP_NORES')

prepro = PreprocessingPipeline(nodes=[
    #Transformer(),
    Detrender(),
Exemple #7
0
ds = ds[keep_idx]

    
_default_options = {
                    'estimator': [
                        [('clf1', cluster.KMeans())], 
                        [('clf1', cluster.AgglomerativeClustering())],
                    ],

                    'sample_slicer__runs':[[run] for run in np.unique(ds.sa.runs)],

                    'estimator__clf1__n_clusters': range(2, 10),
                    }    
    
_default_config = { 
                    'prepro': ['sample_slicer'],
                    'analysis': Clustering
                    }


errors = []
iterator = AnalysisIterator(_default_options, AnalysisConfigurator, config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="states+alessio").fit(ds, **kwargs)
    a.save(path="/media/robbis/DATA/meg/hcp/")


###############################

    'cv__test_size': 0.25,
    'scores': ['accuracy'],
    'analysis': RoiDecoding,
    'analysis__n_jobs': -1,
    'analysis__permutation': 0,
    'analysis__verbose': 0,
    'kwargs__roi': ['matrix_values'],
    'kwargs__cv_attr': 'subjects',
}

iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator,
                            config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name=task + "+review+singleband+plain").fit(
        ds, **kwargs)
    a.save()
    del a

################################################################
_default_options = {
    'sample_slicer__targets': [['0back', '2back']],
    'estimator__fsel__k': np.arange(1, 1200, 20)
}

_default_config = {
    'prepro': ['feature_stacker', 'sample_slicer'],
    'feature_stacker__stack_attr': ['band'],
    'feature_stacker__keep_attr': ['targets', 'subjects'],
    'sample_slicer__targets': ['0back', '2back'],
    'estimator': [('fsel', SelectKBest(k=5)), ('clf', SVC(C=1,
Exemple #9
0
    #'kwargs__prepro': ['feature_normalizer', 'sample_normalizer'],
    'kwargs__cv_attr': 'subject'
}

errs = []
import gc

filtered_options = _default_options[1:]

iterator = AnalysisIterator(filtered_options,
                            AnalysisConfigurator(**_default_config),
                            kind='configuration')
for conf in iterator:
    kwargs = conf._get_kwargs()
    try:
        a = AnalysisPipeline(conf, name="temporal_decoding").fit(ds, **kwargs)
        a.save()
        gc.collect()
    except Exception as err:
        errs.append([conf._default_options, err])
        capture_exception(err)

########################## Results #######################################
from pyitab.results.bids import get_results_bids

path = '/media/robbis/DATA/fmri/carlo_mdm/derivatives/'
path = '/home/robbis/mount/permut1/fmri/carlo_mdm/derivatives/'

dataframe = get_results_bids(
    path=path,
    pipeline="temporal+decoding",
Exemple #10
0
options = dict(
    kwargs__cv_attr=['maintask', 'mainband'],
    estimator__clf=[KNeighborsClassifier(n_neighbors=1, metric=correlation)])

from pyitab.analysis.configurator import AnalysisConfigurator
from pyitab.analysis.iterator import AnalysisIterator
from pyitab.analysis.pipeline import AnalysisPipeline

from sentry_sdk import capture_exception
import sentry_sdk
sentry_sdk.init(
    "https://[email protected]/1439199",
    traces_sample_rate=1.0,
)

iterator = AnalysisIterator(options,
                            AnalysisConfigurator,
                            config_kwargs=base_config)

errs = []

for conf in iterator:
    kwargs = conf._get_kwargs()
    try:
        a = AnalysisPipeline(conf, name='fingerprint+task+mainband').fit(
            ds, **kwargs)
        a.save()
    except Exception as err:
        errs.append([conf._default_options, err])
        capture_exception(err)
Exemple #11
0
        'estimator__clf__C':
        1,
        'estimator__clf__kernel':
        'linear',
        'cv':
        StratifiedShuffleSplit,
        'cv__n_splits':
        100,
        'cv__test_size':
        0.25,
        'scores': ['accuracy'],
        'analysis':
        Decoding,
        'analysis__n_jobs':
        10,
        'analysis__permutation':
        0,
        'analysis__verbose':
        0,
        'kwargs__roi': ['matrix_values'],
        'kwargs__cv_attr':
        'name',
    }

    iterator = AnalysisIterator(_default_options,
                                ScriptConfigurator(**_default_config))
    for i, conf in enumerate(iterator):
        kwargs = conf._get_kwargs()
        a = AnalysisPipeline(conf, name=m[:15] + "_EXPvsNOV").fit(ds, **kwargs)
        a.save()
        del a
Exemple #12
0
                       'scores' : ['accuracy'],

                       'analysis': TemporalDecoding,
                       'analysis__n_jobs': 8,
                       'analysis__permutation': 0,
                       'analysis__verbose': 0,
                       'kwargs__roi': ['matrix_values'],
                       #'kwargs__cv_attr':'subjects',

                    }
 
estimators = []
iterator = AnalysisIterator(_default_options, AnalysisConfigurator, config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="cross+session").fit(ds=None, **kwargs)
    a.save(save_estimator=True)
    est = a._estimator.scores['mask-matrix_values_value-1.0'][0]['estimator']
    estimators.append(est)
    del a


#########################
# Prediction timecourse #

# Plot results

# See results.py
colormap = {'LH':'navy', 'RH':'firebrick', 'LF':'cornflowerblue', 'RF':'salmon'}
colors = [colormap[t] for t in y]
Exemple #13
0
                    'analysis': SearchLight,
                    'analysis__n_jobs': 5,
                    'analysis__permutation': 0,
                    'analysis__radius': 9,
                    
                    'analysis__verbose': 0,

                    #'kwargs__cv_attr': ['group', 'subject'],

                    }
 
 
iterator = AnalysisIterator(_default_options, AnalysisConfigurator, config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="egg").fit(**kwargs)
    a.save()

##################################
# Analysis 26.11.2020
# 

subjects, _ = load_subject_file("/media/robbis/DATA/fmri/EGG/participants.tsv", delimiter="\t")

_default_options = {
                        'loader__task': #['smoothAROMAnonaggr', 'filtered', 
                                        ['plain'],
                        'fetch__subject_names':
                            [[s] for s in subjects[:]]

                        }
    'analysis__permutation':
    0,
    'analysis__verbose':
    0,
    'kwargs__roi': ['matrix_values'],
    #'kwargs__cv_attr': 'mep-right',
}

errors = []
iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator,
                            config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    #try:
    a = AnalysisPipeline(conf, name="reftep+connectivity+lv").fit(ds, **kwargs)
    a.save()
    #except Exception as err:
    #    capture_exception(err)
    #    errors.append([conf, err])

####################
from pyitab.analysis.results import get_results_bids, filter_dataframe, apply_function
import seaborn as sns
import h5py

pipeline = 'reftep+connectivity+lv'

path = "/media/robbis/DATA/meg/reftep/derivatives/pipeline-" + pipeline
dataframe = get_results_bids(
    path,
Exemple #15
0
prepro = PreprocessingPipeline(nodes=[
                                      #Transformer(), 
                                      Detrender(), 
                                      SampleZNormalizer(),
                                      FeatureZNormalizer()
                                      ])
#prepro = PreprocessingPipeline()


ds = loader.fetch(prepro=prepro)
    
_default_options = {
                        'kwargs__use_partialcorr': [True, False],
                        'sample_slicer__subject': [[s] for s in np.unique(ds.sa.subject)],           
                        }


_default_config = {
                    'prepro': ['sample_slicer'],
                    
                    'analysis': TrajectoryConnectivity,
                    "kwargs__roi":["conjunction"]

                    }
 
 
iterator = AnalysisIterator(_default_options, ScriptConfigurator(**_default_config))
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="trajectory_connectivity").fit(ds, **kwargs)
    a.save()
Exemple #16
0
def analysis(ds, conf):
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="c2b_01").fit(ds, **kwargs)
    a.save()
    del a
Exemple #17
0
    #'cv__n_splits': 50,
    #'cv__test_size': 0.25,
    'scores': ['accuracy'],
    'analysis': SearchLight,
    'analysis__n_jobs': 5,
    'analysis__permutation': 0,
    'analysis__radius': 9,
    'analysis__verbose': 1,
    'kwargs__cv_attr': ['group', 'subject'],
}

iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator(**_default_config))
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="accuracy").fit(ds, **kwargs)
    a.save()

####################### Roi Analysis ##########################
conf_file = "/media/robbis/DATA/fmri/carlo_mdm/memory.conf"
loader = DataLoader(
    configuration_file=conf_file,
    #loader=load_mat_ds,
    task='BETA_MVPA')

prepro = PreprocessingPipeline(nodes=[
    #Transformer(),
    Detrender(),
    SampleZNormalizer(),
    FeatureZNormalizer()
])
Exemple #18
0
    'analysis__verbose': 0,

    #'kwargs__roi': labels,
    #'kwargs__roi_values': [('image+type', [2])],
    #'kwargs__prepro': ['feature_normalizer', 'sample_normalizer'],
    'kwargs__cv_attr': 'subject'
}

import gc
iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator(**_default_config),
                            kind='configuration')
for conf in iterator:
    kwargs = conf._get_kwargs()

    a = AnalysisPipeline(conf,
                         name="roi_decoding_across_full").fit(ds, **kwargs)
    a.save()
    gc.collect()

################################
path = '/home/robbis/mount/permut1/fmri/carlo_mdm/derivatives/'


def imshow_plot(**kwargs):
    data = kwargs['data']
    print(data)


dataframe = get_results_bids(
    path=path,
    pipeline="roi+decoding+across+full",
Exemple #19
0
                       'scores' : ['accuracy'],

                       'analysis': TemporalDecoding,
                       'analysis__n_jobs': -1,
                       'analysis__permutation': 0,
                       'analysis__verbose': 0,
                       'kwargs__roi': ['matrix_values'],
                       'kwargs__cv_attr': 'subjects',

                    }
 
import gc
iterator = AnalysisIterator(_default_options, AnalysisConfigurator, config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="movie+revenge+nofsel").fit(ds, **kwargs)
    a.save()
    del a
    gc.collect()

################################# Results
from pyitab.results.bids import get_results_bids
from pyitab.results.dataframe import apply_function
from pyitab.results.base import filter_dataframe

path = '/media/robbis/Seagate_Pt1/data/Viviana2018/meg/derivatives/'

dataframe = get_results_bids(path=path,  
                             pipeline="movie+revenge",
                             field_list=['sample_slicer'],
                             result_keys=['features'] 
Exemple #20
0
    #'cv__test_size': 0.25,
    'scores': ['accuracy'],
    'analysis': SearchLight,
    'analysis__n_jobs': 15,
    'analysis__permutation': 0,
    'analysis__radius': 9,
    'analysis__verbose': 0,
    'kwargs__cv_attr': 'subject',
}

iterator = AnalysisIterator(_default_options,
                            ScriptConfigurator(**_default_config))

for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="across_memory").fit(ds, **kwargs)
    a.save()

################# Across Decision ########################
_default_options = {
    'target_trans__target': ["decision"],
}

_default_config = {
    'prepro': ['sample_slicer', 'target_trans', 'balancer'],
    'sample_slicer__decision': ['L', 'F'],
    'sample_slicer__evidence': [1],
    'target_trans__target': "decision",
    "balancer__attr": 'subject',
    'estimator': [('clf', SVC(C=1, kernel='linear'))],
    'estimator__clf__C': 1,
Exemple #21
0
                       'scores' : ['accuracy'],

                       'analysis': RoiDecoding,
                       'analysis__n_jobs': 5,
                       'analysis__permutation': 0,
                       'analysis__verbose': 0,
                       'kwargs__roi': ['matrix_values'],
                       'kwargs__cv_attr':'subjects',

                    }
 
 
iterator = AnalysisIterator(_default_options, AnalysisConfigurator(**_default_config))
for conf in iterator:
    kwargs = conf._get_kwargs()
    a = AnalysisPipeline(conf, name="wm_mpsi_norm_sign").fit(ds, **kwargs)
    a.save()
    del a


###############################################################
# 2019 #
conf_file =  "/media/robbis/DATA/fmri/working_memory/working_memory.conf"


loader = DataLoader(configuration_file=conf_file, 
                    loader=load_mat_ds,
                    task='MPSI_NORM')

prepro = PreprocessingPipeline(nodes=[
                                      Transformer(), 
Exemple #22
0
from sentry_sdk import capture_exception
import sentry_sdk

sentry_sdk.init(
    "https://[email protected]/1439199",
    traces_sample_rate=1.0,
)

errors = []
iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator,
                            config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    try:
        a = AnalysisPipeline(conf, name="reftep+nine+subjects").fit(**kwargs)
        a.save(path="/media/robbis/DATA/meg/reftep/derivatives/results/")
    except Exception as err:
        capture_exception(err)
        errors.append([conf, err])

######## Permutations ################

conf_file = "/media/robbis/DATA/meg/reftep/bids.conf"

_default_options = {
    'loader__bids_band': [
        ['alphalow'],
    ],
    'loader__bids_pipeline':
    ['aal+connectivity', 'seed+connectivity', 'sensor+connectivity'],
Exemple #23
0
                    'kwargs__cv_attr': 'subject',

                    }
 
errs = []

filtered_options = [_default_options[2]]

iterator = AnalysisIterator(filtered_options, 
                            AnalysisConfigurator(**_default_config), 
                            kind='configuration')
for conf in iterator:
    kwargs = conf._get_kwargs()
    try:
        a = AnalysisPipeline(conf, name="fingerprint").fit(ds, **kwargs)
        a.save(path="/home/carlos/fmri/carlo_mdm/0_results/")
    except Exception as err:
        errs.append([conf._default_options, err])
        capture_exception(err)


##### Results #####
from pyitab.results.base import filter_dataframe
from pyitab.results.bids import get_searchlight_results_bids
from scipy.stats import zscore


dataframe = get_searchlight_results_bids('/media/robbis/DATA/fmri/carlo_mdm/0_results/derivatives/')
df = filter_dataframe(dataframe, id=['uv5oyc6s'], filetype=['full'])
Exemple #24
0
    'analysis__n_jobs':
    1,
    'analysis__permutation':
    0,
    'analysis__verbose':
    0,
    'kwargs__roi': ['matrix_values'],
    #'kwargs__cv_attr': 'mep-right',
}

from sentry_sdk import capture_exception
import sentry_sdk

sentry_sdk.init(
    "https://[email protected]/1439199",
    traces_sample_rate=1.0,
)

errors = []
iterator = AnalysisIterator(_default_options,
                            AnalysisConfigurator,
                            config_kwargs=_default_config)
for conf in iterator:
    kwargs = conf._get_kwargs()
    try:
        a = AnalysisPipeline(conf,
                             name="reftep+iplv+singleregression").fit(**kwargs)
        a.save()
    except Exception as err:
        capture_exception(err)
        errors.append([conf, err])
                        'analysis__permutation': 0,
                        'analysis__verbose': 0,
                        
                        'kwargs__roi': ['matrix_values'],
                        'kwargs__cv_attr':'subjects',

                        }
    
    
    iterator = AnalysisIterator(_default_options, 
                                AnalysisConfigurator,
                                config_kwargs=_default_config)
    
    for conf in iterator:
        kwargs = conf._get_kwargs()
        a = AnalysisPipeline(conf, name="feature+stacked+600").fit(ds_, **kwargs)
        a.save()
        del a



############################
for i, ds in enumerate(ds_list):
    ds_ = ds.copy()
    if i == 0:
        continue
        k = np.arange(10, 300, 1)
        #ds_ = DatasetFxNormalizer(ds_fx=np.mean).transform(ds_)
    else:
        k = np.arange(5, 600, 7)
        #ds_ = DatasetFxNormalizer(ds_fx=np.mean).transform(ds_)
                   },
                   {
                       'task': ['task5']
                   },
               ])

iterator = AnalysisIterator(options,
                            AnalysisConfigurator,
                            config_kwargs=base_config)

errs = []

for conf in iterator:
    kwargs = conf._get_kwargs()
    try:
        a = AnalysisPipeline(conf, name='fingerprint+tavor').fit(ds, **kwargs)
        a.save()
    except Exception as err:
        errs.append([conf._default_options, err])

#######################################################################################

base_config = dict(prepro=['sample_slicer', 'feature_slicer'],
                   estimator=[('clf', LinearRegression())],
                   analysis=TaskPredictionTavor,
                   analysis__n_jobs=-1,
                   analysis__permutation=0,
                   analysis__verbose=0,
                   kwargs__x_attr={'task': ['rest']},
                   kwargs__prepro=[SampleZNormalizer()])
    feature_slicer__nodes_2=networks,
    estimator__clf=[
        SVC(C=1),
        #SVC(C=1, kernel='linear')
    ])

iterator = AnalysisIterator(options,
                            AnalysisConfigurator,
                            config_kwargs=base_config)

errs = []

for conf in iterator:
    kwargs = conf._get_kwargs()

    a = AnalysisPipeline(conf, name='dexterity+overhyping').fit(ds, **kwargs)
    a.save()

################## Permutations ##################################
base_config = dict(
    prepro=['zfisher', 'sample_slicer', 'balancer', 'feature_slicer'],
    estimator=[  #('fsel', SelectKBest(k=50, score_func=f_oneway)), 
        ('clf', SVC(C=1))
    ],
    balancer__attr='all',
    cv=StratifiedShuffleSplit,
    cv__n_splits=100,
    cv__test_size=.2,
    analysis=RoiDecoding,
    analysis__n_jobs=-1,
    analysis__permutation=1000,