Esempio n. 1
0
def test_spatiotemporal(path, subjects, conf_file, type_, **kwargs):
    
    conf = read_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'balance':
            balance = kwargs[arg]
    
    total_results = dict()
    
    data_path = conf['data_path']
    conf['analysis_type'] = 'spatiotemporal'
    conf['analysis_task'] = type_
    
    for subj in subjects:
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        ds = detrend_dataset(ds, type_, **conf)
        
        if 'balance' in locals() and balance == True:
            if conf['label_included'] == 'all' and \
                conf['label_dropped'] == 'none':
                ds = balance_dataset_timewise(ds, 'fixation')
        
        r = spatiotemporal(ds, **conf)
        
        total_results[subj] = r
Esempio n. 2
0
def test_spatiotemporal(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'balance':
            balance = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']
    conf['analysis_type'] = 'spatiotemporal'
    conf['analysis_task'] = type_

    for subj in subjects:
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        ds = detrend_dataset(ds, type_, **conf)

        if 'balance' in locals() and balance == True:
            if conf['label_included'] == 'all' and \
                conf['label_dropped'] == 'none':
                ds = balance_dataset_timewise(ds, 'fixation')

        r = spatiotemporal(ds, **conf)

        total_results[subj] = r
Esempio n. 3
0
 def __init__(self, name="searchlight", **kwargs):
     """
     This method is used to set up the configuration of the 
     analysis. 
     """
     
     self.name = name
     
     self._default_conf = {   
                         'path':'/home/robbis/fmri/memory/', # Data
                         'configuration_file':"memory.conf", # Data
                         "project":"carlo_memory", # Data
                         "partecipants": "subjects.csv", # Data+Analysis
                         'data_type': 'BETA_MVPA', # Data
                         'n_folds':3, # Analysis
                         "condition_names":["evidence", "task"], # Data+Analysis
                         'evidence': 3, # Default_value (memory : 3) # Data+Analysis
                         'task':'decision', # Data
                         'split_attr':'subject', # Analysis
                         'mask_area':'intersect', # Data                          
                         'normalization':'both', # Analysis
                         "radius": 3, # Analysis
                         "n_balanced_ds": 1, # Analysis
                         "set_targets": carlo_memory_set_targets, # Data+Analysis (Outdated)
                         "classifier":LinearCSVMC(C=1) # Analysis (Not proper)
                         }
     
     
     
     
     self._default_conf.update(**kwargs)
     
     for k, v in self._default_conf.iteritems():
         setattr(self, "_"+k, v)
     
     
     # Setting default fields #
     if __debug__:
         debug.active += ["SLC"]
     
     
     self._conf = read_configuration(self._path, 
                                    self._configuration_file, 
                                    self._data_type)
     
     self._conf['analysis_type'] = 'searchlight'
     self._conf['analysis_task'] = self._project
     
     # Avoid non serializable objects saving
     self._default_conf.pop("set_targets")
     self._default_conf.pop("classifier")
     
     self._conf.update(**self._default_conf)
         
     self._data_path = self._conf['data_path']
     
     self.result_dict = dict()
     self.maps = []
     
     self._subjects, self._extra_sa = load_subject_file(os.path.join(self._path, self._partecipants))
Esempio n. 4
0
def test_clustering(path,
                    subjects,
                    analysis,
                    conf_file,
                    source='task',
                    **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        target = 'task'

    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)

    ##############################################
    conf_src['label_included'] = 'all'
    conf_src['label_dropped'] = 'none'
    conf_src['mean_samples'] = 'True'
    ##############################################
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf_src['data_path']

    for subj in subjects:
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue

        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
                conf_src['label_dropped'] != 'fixation':
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        r = clustering_analysis(ds_src, ds_tar, analysis, **kwargs)

        total_results[subj] = r
Esempio n. 5
0
def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):
    
    conf = read_configuration(path, conf_file, type)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]
    
    
    debug.active += ["SLC"]
    
    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)
    
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))
    
    maps = []
    
    for ds in ds_merged:
                
        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'
        
        sl = sphere_searchlight(cv, radius, space = 'voxel_indices')
    
        sl_map = sl(ds)
    
        sl_map.samples *= -1
        sl_map.samples +=  1
    
        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
        
        maps.append(nif)
        
        
    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type
    
    new_dir = datetime+'_'+analysis+'_'+mask+'_'+task
    command = 'mkdir '+os.path.join(path, '0_results', new_dir)
    os.system(command)
    
    parent_dir = os.path.join(path, '0_results', new_dir)
    
    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir '+os.path.join(parent_dir, name)
        os.system(command)
        
        results_dir = os.path.join(parent_dir, name)
        fname = name+'_radius_'+str(radius)+'_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))
        
    
    return maps
Esempio n. 6
0
def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):

    conf = read_configuration(path, conf_file, type)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]

    debug.active += ["SLC"]

    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)

    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))

    maps = []

    for ds in ds_merged:

        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'

        sl = sphere_searchlight(cv, radius, space='voxel_indices')

        sl_map = sl(ds)

        sl_map.samples *= -1
        sl_map.samples += 1

        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)

        maps.append(nif)

    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type

    new_dir = datetime + '_' + analysis + '_' + mask + '_' + task
    command = 'mkdir ' + os.path.join(path, '0_results', new_dir)
    os.system(command)

    parent_dir = os.path.join(path, '0_results', new_dir)

    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir ' + os.path.join(parent_dir, name)
        os.system(command)

        results_dir = os.path.join(parent_dir, name)
        fname = name + '_radius_' + str(radius) + '_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))

    return maps
Esempio n. 7
0
def test_clustering(path, subjects, analysis, conf_file, source='task', **kwargs):    
    
    if source == 'task':
        target = 'rest'
    else:
        target = 'task'
     
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)
    
    ##############################################
    conf_src['label_included'] = 'all'
    conf_src['label_dropped'] = 'none'
    conf_src['mean_samples'] = 'True'
    ##############################################
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]
        
    total_results = dict()
    
    data_path = conf_src['data_path']
    
    for subj in subjects:
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue
        
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
        
        if conf_src['label_included'] == 'all' and \
                conf_src['label_dropped'] != 'fixation':
                ds_src = balance_dataset_timewise(ds_src, 'fixation')
        
        r = clustering_analysis(ds_src, ds_tar, analysis, **kwargs)
        
        total_results[subj] = r
Esempio n. 8
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)

    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']

    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)

    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception as err:
            print err
            continue

        ds = detrend_dataset(ds, type_, **conf)

        balancer = balance_dataset(**conf)

        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i + 1))
            subj_ = "%s_%03d" % (subj, i + 1)

            ds_ = normalize_dataset(ds_, **conf)

            logger.info(ds_.summary())

            r = spatial(ds_, **conf)
            total_results[subj_] = r

            subj_result = rs.SubjectResult(subj_, r, savers)

            result.add(subj_result)

    #result.save()
    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return total_results, subj_result
Esempio n. 9
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)
    
    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    total_results = dict()
    
    data_path = conf['data_path']
        
    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    
    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        
        
        ds = detrend_dataset(ds, type_, **conf)
        
        balancer = balance_dataset(**conf)
        
        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i+1))
            subj_ = "%s_%03d" % (subj, i+1)
            
            ds_ = normalize_dataset(ds_, **conf)
            
            logger.info(ds_.summary())
            
            r = spatial(ds_, **conf)
            total_results[subj_] = r
        
            subj_result = rs.SubjectResult(subj_, r, savers)
        
            result.add(subj_result)
Esempio n. 10
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):
    
    
    conf = read_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    
    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_
    
    total_results = dict()
    data_path = conf['data_path']
    
    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #
    
    for subj in subjects:
        
        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)
        
        r = searchlight(ds, **kwargs)
        
        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r
        
        result.add(subj_result)
    
    result.summarize()
    

    conf['classes'] = np.unique(ds.targets)  
    #save_results()
    #save_results(path, total_results, conf)
    
    return result, r, subj_result
Esempio n. 11
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_

    total_results = dict()
    data_path = conf['data_path']

    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #

    for subj in subjects:

        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)

        r = searchlight(ds, **kwargs)

        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r

        result.add(subj_result)

    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return result, r, subj_result
Esempio n. 12
0
 '111123roblan',
 '111129mirgra',
 '111202cincal',
 '111206marant',
 '111214angfav',
 '111214cardin',
 '111220fraarg',
 '111220martes',
 '120119maulig',
 '120126andspo',
 '120112jaclau']  
tasks = ['memory', 'decision']
res = []
result_dict = dict()
 
conf = read_configuration(path, 'memory.conf', 'BETA_MVPA')

conf['analysis_type'] = 'searchlight'
conf['analysis_task'] = 'memory'

summarizers = [rs.SearchlightSummarizer()]
savers = [rs.SearchlightSaver()]
collection = rs.ResultsCollection(conf, path, summarizers)


#for subj in subjects:
    
    data_type = 'BETA_MVPA'
    conf = read_configuration(path, 'memory.conf', data_type)
    data_path = conf['data_path']
    ds_original = load_dataset(data_path, subj, data_type, **conf)
Esempio n. 13
0
def test_searchlight_similarity(path,
                                subjects, 
                                conf_file, 
                                type, 
                                dim=4,
                                measure=CorrelationThresholdMeasure(),
                                partitioner=TargetCombinationPartitioner(attr='targets'),
                                **kwargs):
    
    conf = read_configuration(path, conf_file, type)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]
        if arg == 'duration':
            duration = kwargs[arg]
            
    #########################################################
    datetime = get_time()
    analysis = 'searchlight_measure'
    mask = conf['mask_area']
    task = type
    
    new_dir = datetime+'_'+analysis+'_'+mask+'_'+task
    command = 'mkdir '+os.path.join(path, '0_results', new_dir)
    os.system(command)
    
    parent_dir = os.path.join(path, '0_results', new_dir)       
    ##########################################################
    
    debug.active += ["SLC"]
    
    ds_merged = get_merged_ds(path, subjects, conf_file, type, dim=dim, **kwargs)
    
    cv = CrossValidation(measure, 
                         partitioner,
                         splitter=Splitter(attr='partitions', attr_values=(3,2)),
                         errorfx=None
                         )
    
    maps = []
    kwa = dict(voxel_indices=Sphere(radius), 
               event_offsetidx=Sphere(duration))
    queryengine = IndexQueryEngine(**kwa)
        
    for s, ds in zip(subjects, ds_merged):
        
        print ds.samples.shape
        
        voxel_num = ds.samples.shape[1]/duration
        ids = np.arange(voxel_num)        
        sl = Searchlight(cv, queryengine=queryengine, roi_ids=ids)
    
        map = sl(ds)
            
        maps.append(map)

        name = s
        command = 'mkdir '+os.path.join(parent_dir, name)
        os.system(command)
        
        results_dir = os.path.join(parent_dir, name)
        fname = name+'_radius_'+str(radius)+'_searchlight_map.nii.gz'        
        map_ = ds.a.mapper[:2].reverse(map)
        map_ = np.rollaxis(map_.samples, 0, 4)
        img = ni.Nifti1Image(map_, ds.a.imghdr.get_base_affine())
        ni.save(img, os.path.join(results_dir, fname))
    
    return maps
Esempio n. 14
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    p = kwargs['p']
    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(
                path, subjects, conf_file, target, **kwargs)

            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)

        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]

        data_path = conf_src['data_path']

    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p

    total_results = dict()

    summarizers = [
        rs.CrossDecodingSummarizer(),
        rs.SimilaritySummarizer(),
        rs.DecodingSummarizer(),
        rs.SignalDetectionSummarizer(),
    ]

    savers = [
        rs.CrossDecodingSaver(),
        rs.SimilaritySaver(),
        rs.DecodingSaver(),
        rs.SignalDetectionSaver(),
    ]

    collection = rs.ResultsCollection(conf_src, path, summarizers)

    for subj in subjects:
        print '-------------------'

        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue

        # Evaluate if is correct to do further normalization after merging two ds.
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
            print 'Balancing dataset...'
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)

        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']

        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m

        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new

        print c_new

        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            mahala_data = similarity_measure(r['ds_tar'],
                                             r['ds_src'],
                                             r,
                                             p_value=p,
                                             method='mahalanobis')

            #r['mahalanobis_similarity'] = mahala_data
            for k_, v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']

        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'

        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)

        for k_, v_ in sdt_res.items():
            r[k_] = v_
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''

        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)

        collection.add(subj_result)
Esempio n. 15
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):
    
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
    p = kwargs['p']
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':
        
        if path.__class__ == conf_file.__class__ == list:  
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file, source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(path, subjects, conf_file, target, **kwargs)
            
            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'
            return 0
    
    else:
        
        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)
    
        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]
        
        
        data_path = conf_src['data_path']
    
    
    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name
    
    
    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p
    
    
    total_results = dict()
    
    
    
    
    summarizers = [rs.CrossDecodingSummarizer(),
                   rs.SimilaritySummarizer(),
                   rs.DecodingSummarizer(),
                   rs.SignalDetectionSummarizer(),
                   ]
    
    savers = [rs.CrossDecodingSaver(),
                   rs.SimilaritySaver(),
                   rs.DecodingSaver(),
                   rs.SignalDetectionSaver(),
                   ]
    
    collection = rs.ResultsCollection(conf_src, path, summarizers)
    
    
    for subj in subjects:
        print '-------------------'
        
        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue
         
        # Evaluate if is correct to do further normalization after merging two ds. 
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
        
        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
                print 'Balancing dataset...'
                ds_src = balance_dataset_timewise(ds_src, 'fixation')        
        
        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)
        
        
        
        
        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']
        
        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m
        
        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new
        
        print c_new
        
        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            
            mahala_data = similarity_measure(r['ds_tar'], r['ds_src'], 
                                             r, p_value=p, method='mahalanobis')
            
            #r['mahalanobis_similarity'] = mahala_data
            for k_,v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']
        
        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'
        
        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)
        
        for k_,v_ in sdt_res.items():
            r[k_] = v_
            
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''
        
        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)
        
        collection.add(subj_result)
Esempio n. 16
0
frames = [0]  #[1,2,3,4,5,6]
mask = ni.load(
    p.join(path, '1_single_ROIs',
           'cluster_conjunction_level-0-003_omnibus-0-001.nii.gz'))

subjects, extra_sa = load_subject_file(p.join(path, 'subjects.csv'))

target = 'decision'
selected_variables = {
    target: ['L', 'F'],
}

evidences = [1, 2, 3]

# Setup result utilities
conf = read_configuration(path, conf_file, task)
conf['analysis_type'] = 'searchlight'
conf['analysis_task'] = 'temporal_residual'
summarizers = [rs.SearchlightSummarizer()]
#savers = [rs.DecodingSaver(fields=['classifier', 'stats'])]
savers = [rs.SearchlightSaver()]
result = rs.ResultsCollection(conf, path, summarizers)

for subj in subjects[:1]:

    conf = read_configuration(path, conf_file, task)

    # Load dataset
    ds_orig = load_dataset(path,
                           subj,
                           task,
Esempio n. 17
0
def get_merged_ds(path, subjects, conf_file, source='task', dim=3, **kwargs):
    
    
    #Mettere source e target nel conf!
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
    ds_merged_list = []
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)
    
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################
    
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]
    
    data_path = conf_src['data_path']
    
    for subj in subjects:
        print '--------'
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue
        
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 

        if dim == 4:    
            duration = np.min([e['duration'] for e in ds_src.a.events])      
            ds_tar = build_events_ds(ds_tar, duration, overlap=duration-1)
            ds_src = load_spatiotemporal_dataset(ds_src, duration=duration)
        
        print ds_src.samples.shape
        print ds_tar.samples.shape 
        
        ds_src.sa['task'] = [source for s in range(ds_src.samples.shape[0])]
        ds_tar.sa['task'] = [target for s in range(ds_tar.samples.shape[0])]
        
        ds_merged = vstack((ds_src, ds_tar))
        ds_merged.a.update(ds_src.a)
        
        print ds_merged.sa.task
        
        ds_merged_list.append(ds_merged)
        '''
Esempio n. 18
0
def _group_transfer_learning(path,
                             subjects,
                             analysis,
                             conf_file,
                             source='task',
                             analysis_type='single',
                             **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, s, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)

            conf_src['permutations'] = 0

        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')

    r_group = spatial(ds_src, **conf_src)

    total_results = dict()
    total_results['group'] = r_group

    clf = r_group['classifier']

    for subj_, conf_, path_ in zip(subjects, conf_file, path):
        for subj in subj_:
            print '-----------'
            r = dict()
            if len(subj_) > 1:
                conf_tar = read_configuration(path_, conf_, target)

                for arg in kwargs:

                    conf_tar[arg] = kwargs[arg]

                data_path = conf_tar['data_path']
                try:
                    ds_tar = load_dataset(data_path, subj, target, **conf_tar)
                except Exception, err:
                    print err
                    continue

            ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

            if conf_src['label_included'] == 'all' and \
               conf_src['label_dropped'] != 'fixation':
                print 'Balancing dataset...'
                ds_src = balance_dataset_timewise(ds_src, 'fixation')

            predictions = clf.predict(ds_tar)

            pred = np.array(predictions)
            targets = ds_tar.targets

            for arg in r_group.keys():
                r[arg] = copy.copy(r_group[arg])

            r['targets'] = targets
            r['predictions'] = predictions

            r['fclf'] = clf

            c_m = ConfusionMatrix(predictions=pred, targets=targets)
            c_m.compute()
            r['confusion_target'] = c_m
            print c_m

            tr_pred = similarity_measure_mahalanobis(ds_tar, ds_src, r)
            r['mahalanobis_similarity'] = tr_pred

            #print tr_pred

            c_mat_mahala = ConfusionMatrix(predictions=tr_pred.T[1],
                                           targets=tr_pred.T[0])
            c_mat_mahala.compute()
            r['confusion_mahala'] = c_mat_mahala

            d_prime, beta, c, c_new = signal_detection_measures(
                pred, targets, map_list)
            r['d_prime'] = d_prime
            print d_prime
            r['beta'] = beta
            r['c'] = c
            r['confusion_total'] = c_new
            '''
            d_prime_maha, c_new_maha = d_prime_statistics(tr_pred.T[1], tr_pred.T[0], map_list)
            r['d_prime_maha'] = d_prime_maha
            r['confusion_tot_maha'] = c_new_maha
            '''

            total_results[subj] = r
Esempio n. 19
0
def get_merged_ds(path, subjects, conf_file, source='task', dim=3, **kwargs):

    #Mettere source e target nel conf!
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    ds_merged_list = []
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)

    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]

    data_path = conf_src['data_path']

    for subj in subjects:
        print '--------'
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue

        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if dim == 4:
            duration = np.min([e['duration'] for e in ds_src.a.events])
            ds_tar = build_events_ds(ds_tar, duration, overlap=duration - 1)
            ds_src = load_spatiotemporal_dataset(ds_src, duration=duration)

        print ds_src.samples.shape
        print ds_tar.samples.shape

        ds_src.sa['task'] = [source for s in range(ds_src.samples.shape[0])]
        ds_tar.sa['task'] = [target for s in range(ds_tar.samples.shape[0])]

        ds_merged = vstack((ds_src, ds_tar))
        ds_merged.a.update(ds_src.a)

        print ds_merged.sa.task

        ds_merged_list.append(ds_merged)
        '''
Esempio n. 20
0
def test_searchlight_similarity(
        path,
        subjects,
        conf_file,
        type,
        dim=4,
        measure=CorrelationThresholdMeasure(),
        partitioner=TargetCombinationPartitioner(attr='targets'),
        **kwargs):

    conf = read_configuration(path, conf_file, type)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]
        if arg == 'duration':
            duration = kwargs[arg]

    #########################################################
    datetime = get_time()
    analysis = 'searchlight_measure'
    mask = conf['mask_area']
    task = type

    new_dir = datetime + '_' + analysis + '_' + mask + '_' + task
    command = 'mkdir ' + os.path.join(path, '0_results', new_dir)
    os.system(command)

    parent_dir = os.path.join(path, '0_results', new_dir)
    ##########################################################

    debug.active += ["SLC"]

    ds_merged = get_merged_ds(path,
                              subjects,
                              conf_file,
                              type,
                              dim=dim,
                              **kwargs)

    cv = CrossValidation(measure,
                         partitioner,
                         splitter=Splitter(attr='partitions',
                                           attr_values=(3, 2)),
                         errorfx=None)

    maps = []
    kwa = dict(voxel_indices=Sphere(radius), event_offsetidx=Sphere(duration))
    queryengine = IndexQueryEngine(**kwa)

    for s, ds in zip(subjects, ds_merged):

        print ds.samples.shape

        voxel_num = ds.samples.shape[1] / duration
        ids = np.arange(voxel_num)
        sl = Searchlight(cv, queryengine=queryengine, roi_ids=ids)

        map = sl(ds)

        maps.append(map)

        name = s
        command = 'mkdir ' + os.path.join(parent_dir, name)
        os.system(command)

        results_dir = os.path.join(parent_dir, name)
        fname = name + '_radius_' + str(radius) + '_searchlight_map.nii.gz'
        map_ = ds.a.mapper[:2].reverse(map)
        map_ = np.rollaxis(map_.samples, 0, 4)
        img = ni.Nifti1Image(map_, ds.a.imghdr.get_base_affine())
        ni.save(img, os.path.join(results_dir, fname))

    return maps
Esempio n. 21
0
def _group_transfer_learning(path, subjects, analysis,  conf_file, source='task', analysis_type='single', **kwargs):
    
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
   
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':
        
        if path.__class__ == conf_file.__class__ == list:  
            ds_src, s, conf_src = sources_merged_ds(path, subjects, conf_file, source, **kwargs)
            
            conf_src['permutations'] = 0
            
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'
            return 0
    
    else:
        
        conf_src = read_configuration(path, conf_file, source)
        
    
    
    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
    
    
    r_group = spatial(ds_src, **conf_src)
    
    total_results = dict()
    total_results['group'] = r_group
    
    clf = r_group['classifier']
    
    for subj_, conf_, path_ in zip(subjects, conf_file, path):
        for subj in subj_:
            print '-----------'
            r = dict()
            if len(subj_) > 1:
                conf_tar = read_configuration(path_, conf_, target)
        
                for arg in kwargs:
                    
                    conf_tar[arg] = kwargs[arg]
            
            
                data_path = conf_tar['data_path']
                try:
                    ds_tar = load_dataset(data_path, subj, target, **conf_tar)
                except Exception, err:
                    print err
                    continue
    
            
            ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
    
            if conf_src['label_included'] == 'all' and \
               conf_src['label_dropped'] != 'fixation':
                    print 'Balancing dataset...'
                    ds_src = balance_dataset_timewise(ds_src, 'fixation')       
            
            predictions = clf.predict(ds_tar)
           
            pred = np.array(predictions)
            targets = ds_tar.targets
            
            
            for arg in r_group.keys():
                r[arg] = copy.copy(r_group[arg])
            
            r['targets'] = targets
            r['predictions'] = predictions
            
            r['fclf'] = clf
            
            c_m = ConfusionMatrix(predictions=pred, targets=targets)
            c_m.compute()
            r['confusion_target'] = c_m
            print c_m
            
            tr_pred = similarity_measure_mahalanobis(ds_tar, ds_src, r)
            r['mahalanobis_similarity'] = tr_pred
            
            #print tr_pred
            
            c_mat_mahala = ConfusionMatrix(predictions=tr_pred.T[1], targets=tr_pred.T[0])
            c_mat_mahala.compute()
            r['confusion_mahala'] = c_mat_mahala
            
            d_prime, beta, c, c_new = signal_detection_measures(pred, targets, map_list)
            r['d_prime'] = d_prime
            print d_prime
            r['beta'] = beta
            r['c'] = c
            r['confusion_total'] = c_new
            
            '''
            d_prime_maha, c_new_maha = d_prime_statistics(tr_pred.T[1], tr_pred.T[0], map_list)
            r['d_prime_maha'] = d_prime_maha
            r['confusion_tot_maha'] = c_new_maha
            '''
            
            total_results[subj] = r
Esempio n. 22
0
from mvpa2.suite import mean_group_sample, map2nifti
from mvpa2.mappers.fx import BinaryFxNode
from mvpa2.misc.errorfx import mean_mismatch_error
import nibabel as ni
import numpy as np
from mvpa2.clfs.base import Classifier
from mvpa2.generators.resampling import Balancer
import mvpa_itab.results as rs
from mvpa2.misc.neighborhood import Sphere, IndexQueryEngine
from mvpa2.measures.searchlight import Searchlight



#path = '/media/robbis/DATA/fmri/memory/'

conf = read_configuration(path, 'remote_memory.conf', 'BETA_MVPA')

conf['analysis_type'] = 'searchlight'
conf['analysis_task'] = 'memory_regression_sample_wise'
conf['mask_area'] = 'total'
task_ = 'BETA_MVPA'
subj = '110929anngio'

partitioners = [NGroupPartitioner(k) for k in np.arange(2, 5)]
result_dict = dict()

summarizers = [rs.SearchlightSummarizer()]
savers = [rs.SearchlightSaver()]
collection = rs.ResultsCollection(conf, path, summarizers)