コード例 #1
0
ファイル: review.py プロジェクト: robbisg/mvpa_itab_wu
def loading_data(img_pattern):
    loader = DataLoader(configuration_file=conf_file,
                        loader='mat',
                        task='fcmri',
                        event_file=img_pattern[:-4] + ".txt",
                        img_pattern=img_pattern,
                        atlas='findlab')

    prepro = PreprocessingPipeline(nodes=[
        Transformer(),
        #Detrender(),
        #SampleZNormalizer(),
        #FeatureZNormalizer()
    ])
    #prepro = PreprocessingPipeline()

    ds = loader.fetch(prepro=prepro)

    return ds
コード例 #2
0
data_path = '/media/robbis/DATA/meg/viviana-hcp/'

conf_file = "/media/robbis/DATA/meg/viviana-hcp/bids.conf"

loader = DataLoader(
    configuration_file=conf_file,
    data_path=data_path,
    subjects="/media/robbis/DATA/meg/viviana-hcp/participants.tsv",
    loader='bids-meg',
    task='blp',
    bids_atlas="complete",
    bids_correction="corr",
    bids_derivatives='True',
    load_fx='hcp-blp')

ds = loader.fetch()
nodes = ds.fa.nodes_1
matrix = np.zeros_like(ds.samples[0])
nanmask = np.logical_not(np.isnan(ds.samples).sum(0))
ds = ds[:, nanmask]

networks = ['AN', 'CON', 'DAN', 'DMN', 'FPN', 'LN', 'MN', 'VAN', 'VFN', 'VPN']
networks = [[n] for n in networks]

kwargs_list = [{'nodes_1': v, 'nodes_2': v} for v in networks]

############################################################################
base_config = dict(prepro=['sample_slicer', 'feature_slicer'],
                   estimator=[('clf', LinearRegression())],
                   analysis=TaskPredictionTavor,
                   analysis__n_jobs=-1,
コード例 #3
0
from pyitab.preprocessing.pipelines import PreprocessingPipeline
from pyitab.analysis.iterator import AnalysisIterator
from pyitab.analysis.configurator import AnalysisConfigurator

from pyitab.analysis.pipeline import AnalysisPipeline
from sklearn.feature_selection.univariate_selection import SelectKBest
from sklearn.model_selection import *
from sklearn.svm.classes import SVC

import _pickle as pickle

loader = DataLoader(
    configuration_file="/home/carlos/fmri/carlo_ofp/ofp_new.conf",
    task='OFP_NORES')
ds = loader.fetch()

decoding = RoiDecoding(n_jobs=20, scoring=['accuracy'])

results = dict()
for subject in np.unique(ds.sa.subject):
    results[subject] = []
    for evidence in [1, 2, 3]:

        pipeline = PreprocessingPipeline(nodes=[
            TargetTransformer('decision'),
            SampleSlicer(**{
                'subject': [subject],
                'evidence': [evidence]
            }),
            Balancer(balancer=RandomUnderSampler(return_indices=True),
コード例 #4
0
                        loader='mat',
                        task='fcmri',
                        atlas='findlab',
                        event_file=m[:-4]+".txt",
                        img_pattern=m)

    prepro = PreprocessingPipeline(nodes=[
                                        #Transformer(), 
                                        #Detrender(), 
                                        SampleZNormalizer(),
                                        #FeatureZNormalizer()
                                        ])
    #prepro = PreprocessingPipeline()


    ds = loader.fetch(prepro=prepro)

    _default_options = [
                        {
                            'prepro':['sample_slicer', 'target_transformer'],
                            'target_transformer__attr': 'expertise',
                            'sample_slicer__targets': ['Samatha']
                        },
                        {
                            'prepro':['sample_slicer', 'target_transformer'],
                            'target_transformer__attr': 'age',
                            'sample_slicer__targets': ['Samatha']
                        },
                        {
                            'prepro':['sample_slicer', 'target_transformer', 'sample_residual'],
                            'target_transformer__attr': 'expertise',
コード例 #5
0
ファイル: pipeline.py プロジェクト: robbisg/mvpa_itab_wu
from pyitab.preprocessing.math import AbsoluteValueTransformer, SignTransformer
from pyitab.preprocessing.base import Transformer

from pyitab.analysis.states.base import Clustering
from sklearn import cluster, mixture
from joblib import Parallel, delayed

conf_file = "/media/robbis/DATA/fmri/working_memory/working_memory.conf"
conf_file = '/m/home/home9/97/guidotr1/unix/data/simulations/meg/simulations.conf'

loader = DataLoader(configuration_file=conf_file,
                    loader='simulations',
                    task='simulations')

ds = loader.fetch(prepro=Transformer())

_default_options = {
    'estimator': [
        [[('clf1', cluster.MiniBatchKMeans())]],
        [[('clf1', cluster.KMeans())]],
        [[('clf1', cluster.SpectralClustering())]],
        [[('clf1', cluster.AgglomerativeClustering())]],
        [[('clf5', mixture.GaussianMixture())]],
    ],
    'sample_slicer__subject': [[trial] for trial in np.unique(ds.sa.subject)],
    'estimator__clf1__n_clusters':
    range(2, 10),
    'estimator__clf5__n_components':
    range(2, 10),
}
コード例 #6
0
from imblearn.over_sampling import SMOTE
import numpy as np

from imblearn.under_sampling import *
from imblearn.over_sampling import *

conf_file = "/home/carlos/mount/megmri03/fmri/carlo_ofp/ofp.conf"
conf_file = "/media/robbis/DATA/fmri/carlo_ofp/ofp.conf"
#conf_file = "/home/carlos/fmri/carlo_ofp/ofp_new.conf"

if conf_file[1] == 'h':
    from mvpa_itab.utils import enable_logging
    root = enable_logging()

loader = DataLoader(configuration_file=conf_file, task='OFP')
ds = loader.fetch()

return_ = True
ratio = 'auto'

_default_options = {
    'sample_slicer__evidence': [[1]],
    'sample_slicer__subject': [[s] for s in np.unique(ds.sa.subject)],
    'balancer__balancer': [
        AllKNN(return_indices=return_, ratio=ratio),
        CondensedNearestNeighbour(return_indices=return_, ratio=ratio),
        EditedNearestNeighbours(return_indices=return_, ratio=ratio),
        InstanceHardnessThreshold(return_indices=return_, ratio=ratio),
        NearMiss(return_indices=return_, ratio=ratio),
        OneSidedSelection(return_indices=return_, ratio=ratio),
        RandomUnderSampler(return_indices=return_, ratio=ratio,
コード例 #7
0
ファイル: gsbs.py プロジェクト: robbisg/mvpa_itab_wu
from pyitab.analysis.configurator import AnalysisConfigurator
from pyitab.preprocessing import SampleSlicer, FeatureSlicer
from pyitab.analysis.roi import RoiAnalyzer
import os
import numpy as np

conf_file = "/home/robbis/mount/permut1/sherlock/bids/bids.conf"
loader = DataLoader(configuration_file=conf_file,
                    loader='bids',
                    task='preproc',
                    bids_task=['day1'])

subjects = ['marcer', 'matsim', 'simpas']
for s in subjects:

    ds = loader.fetch(subject_names=[s],
                      prepro=[SampleSlicer(trial_type=np.arange(1, 32))])

    roi_analyzer = RoiAnalyzer(analysis=GSBS())
    roi_analyzer.fit(ds, roi=['aal'], kmax=50)

    roi_analyzer.save()

################## Resting state ##########################
conf_file = path = "/home/robbis/mount/permut1/sherlock/bids/bids.conf"
loader = DataLoader(configuration_file=conf_file,
                    data_path="/home/robbis/mount/permut1/sherlock/bids/",
                    subjects='participants.tsv',
                    loader='bids',
                    task='preproc',
                    bids_task=['day1'])
コード例 #8
0
conf_file = "/media/robbis/DATA/fmri/carlo_mdm/memory.conf"

loader = DataLoader(
    configuration_file=conf_file,
    #loader=load_mat_ds,
    task='BETA_MVPA')

prepro = PreprocessingPipeline(nodes=[
    #Transformer(),
    Detrender(),
    SampleZNormalizer(),
    FeatureZNormalizer()
])
#prepro = PreprocessingPipeline()

ds = loader.fetch(prepro=prepro)

_default_options = {
    #'target_trans__target': ["decision"],
    'sample_slicer__accuracy': [[1], [0]],
}

_default_config = {
    'prepro': ['sample_slicer', 'target_transformer', 'balancer'],
    'sample_slicer__decision': ['NEW', 'OLD'],
    'sample_slicer__evidence': [1],
    'sample_slicer__accuracy': [0],
    'target_transformer__target': "decision",
    "balancer__attr": 'subject',
    'estimator': [('clf', SVC(C=1, kernel='linear'))],
    'estimator__clf__C': 1,
コード例 #9
0
                    roi_labels=roi_labels,
                    task='RESIDUALS_MVPA')

prepro = PreprocessingPipeline(nodes=[
    #Transformer(),
    #Detrender(attr='file'),
    Detrender(attr='chunks'),
    SampleZNormalizer(),
    FeatureZNormalizer(),
    SampleSlicer(frame=[1, 2, 3, 4, 5, 6, 7]),
    #TargetTransformer(attr='decision'),
    MemoryReducer(dtype=np.float16),
    #Balancer(attr='frame'),
])

ds = loader.fetch(prepro=prepro, n_subjects=8)

ds = MemoryReducer(dtype=np.float16).transform(ds)

labels = list(roi_labels.keys())[:-1]

import sentry_sdk

sentry_sdk.init("https://[email protected]/1439199")

_default_options = [
    {
        'target_transformer__attr':
        "image_type",
        'sample_slicer__attr': {
            'image_type': ["I", "O"]
コード例 #10
0
from pyitab.analysis.states.gsbs import GSBS
from pyitab.io.loader import DataLoader

from pyitab.analysis.configurator import AnalysisConfigurator
from pyitab.preprocessing import SampleSlicer, FeatureSlicer
from pyitab.preprocessing.connectivity import SpeedEstimator
import os
import numpy as np

conf_file = path = "/home/robbis/mount/permut1/sherlock/bids/bids.conf"
loader = DataLoader(configuration_file=conf_file,
                    loader='bids',
                    task='preproc',
                    bids_task=['day1'],
                    bids_run=['01', '02', '03'])

ds = loader.fetch(
    subject_names=['matsim'],
    prepro=[SampleSlicer(trial_type=np.arange(1, 32)),
            FeatureSlicer(aal=[1])])

X = ds.samples

speed = SpeedEstimator().transform(ds)
peaks = speed > np.mean(speed) + 2 * np.std(speed)
peaks_idx = np.nonzero(peaks.flatten())[0]

X_ = np.split(X, peaks_idx, axis=0)
cluster = [i * np.ones(x.shape[0]) for i, x in X_]
コード例 #11
0
                          cross_val_multiscore, LinearModel, get_coef,
                          Vectorizer, CSP)
from sklearn.linear_model import LogisticRegression

import warnings
warnings.filterwarnings("ignore")
 
conf_file = "/media/robbis/DATA/meg/c2b/meeting-december-data/bids.conf"

loader = DataLoader(configuration_file=conf_file, 
                    loader='bids-meg',
                    bids_window='300',
                    bids_ses='01',
                    task='power')

ds = loader.fetch(subject_names=['sub-109123'], prepro=[Transformer()])
    
_default_options = {
                       
                       'loader__bids_ses': ['01', '02'],
                       
                       'sample_slicer__targets' : [
                           ['LH', 'RH'], 
                           ['LF', 'RF'], 
                           #['LH', 'RH', 'LF', 'RF']
                        ],

                       'estimator__clf': [
                           LinearModel(LogisticRegression(C=1, solver='liblinear')),
                           SVC(C=1, kernel='linear', probability=True),
                           SVC(C=1, gamma=1, kernel='rbf', probability=True),
コード例 #12
0
from pyitab.preprocessing.pipelines import PreprocessingPipeline
from pyitab.preprocessing.functions import Detrender, SampleSlicer, \
    TargetTransformer, Transformer
from pyitab.preprocessing.normalizers import SampleZNormalizer

import warnings
warnings.filterwarnings("ignore")

conf_file = "/media/robbis/DATA/meg/reftep/bids.conf"
loader = DataLoader(configuration_file=conf_file,
                    task='reftep',
                    load_fx='reftep-conn',
                    loader='bids-meg',
                    bids_pipeline='connectivity+lv')

ds = loader.fetch(n_subjects=9)

_default_options = {
    'prepro': [
        ['sample_slicer', 'target_transformer'],
        ['sample_slicer', 'feature_znormalizer', 'target_transformer'],
        ['sample_slicer', 'sample_znormalizer', 'target_transformer'],
    ],
    'sample_slicer__subject': [[s] for s in np.unique(ds.sa.subject)],
    'estimator__fsel__k': [50, 100, 150],
    'estimator__clf': [
        LogisticRegression(penalty='l1', solver='liblinear'),
        SVC(C=1, kernel='linear'),
    ],
}