Example #1
0
def test_spatiotemporal(path, subjects, conf_file, type_, **kwargs):
    
    conf = read_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'balance':
            balance = kwargs[arg]
    
    total_results = dict()
    
    data_path = conf['data_path']
    conf['analysis_type'] = 'spatiotemporal'
    conf['analysis_task'] = type_
    
    for subj in subjects:
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        ds = detrend_dataset(ds, type_, **conf)
        
        if 'balance' in locals() and balance == True:
            if conf['label_included'] == 'all' and \
                conf['label_dropped'] == 'none':
                ds = balance_dataset_timewise(ds, 'fixation')
        
        r = spatiotemporal(ds, **conf)
        
        total_results[subj] = r
Example #2
0
def test_spatiotemporal(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'balance':
            balance = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']
    conf['analysis_type'] = 'spatiotemporal'
    conf['analysis_task'] = type_

    for subj in subjects:
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        ds = detrend_dataset(ds, type_, **conf)

        if 'balance' in locals() and balance == True:
            if conf['label_included'] == 'all' and \
                conf['label_dropped'] == 'none':
                ds = balance_dataset_timewise(ds, 'fixation')

        r = spatiotemporal(ds, **conf)

        total_results[subj] = r
Example #3
0
def test_clustering(path,
                    subjects,
                    analysis,
                    conf_file,
                    source='task',
                    **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        target = 'task'

    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)

    ##############################################
    conf_src['label_included'] = 'all'
    conf_src['label_dropped'] = 'none'
    conf_src['mean_samples'] = 'True'
    ##############################################
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf_src['data_path']

    for subj in subjects:
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue

        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
                conf_src['label_dropped'] != 'fixation':
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        r = clustering_analysis(ds_src, ds_tar, analysis, **kwargs)

        total_results[subj] = r
Example #4
0
def test_clustering(path, subjects, analysis, conf_file, source='task', **kwargs):    
    
    if source == 'task':
        target = 'rest'
    else:
        target = 'task'
     
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)
    
    ##############################################
    conf_src['label_included'] = 'all'
    conf_src['label_dropped'] = 'none'
    conf_src['mean_samples'] = 'True'
    ##############################################
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]
        
    total_results = dict()
    
    data_path = conf_src['data_path']
    
    for subj in subjects:
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue
        
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
        
        if conf_src['label_included'] == 'all' and \
                conf_src['label_dropped'] != 'fixation':
                ds_src = balance_dataset_timewise(ds_src, 'fixation')
        
        r = clustering_analysis(ds_src, ds_tar, analysis, **kwargs)
        
        total_results[subj] = r
Example #5
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)

    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']

    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)

    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception as err:
            print err
            continue

        ds = detrend_dataset(ds, type_, **conf)

        balancer = balance_dataset(**conf)

        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i + 1))
            subj_ = "%s_%03d" % (subj, i + 1)

            ds_ = normalize_dataset(ds_, **conf)

            logger.info(ds_.summary())

            r = spatial(ds_, **conf)
            total_results[subj_] = r

            subj_result = rs.SubjectResult(subj_, r, savers)

            result.add(subj_result)

    #result.save()
    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return total_results, subj_result
Example #6
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)
    
    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    total_results = dict()
    
    data_path = conf['data_path']
        
    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    
    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        
        
        ds = detrend_dataset(ds, type_, **conf)
        
        balancer = balance_dataset(**conf)
        
        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i+1))
            subj_ = "%s_%03d" % (subj, i+1)
            
            ds_ = normalize_dataset(ds_, **conf)
            
            logger.info(ds_.summary())
            
            r = spatial(ds_, **conf)
            total_results[subj_] = r
        
            subj_result = rs.SubjectResult(subj_, r, savers)
        
            result.add(subj_result)
Example #7
0
def test_spatial(path, subjects, conf_file, type_, **kwargs):

    #conf = read_configuration(path, conf_file, type_)
    conf = read_json_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    total_results = dict()

    #data_path = conf['data_path']
    data_path = conf['path']['data_path']

    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_

    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)

    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception:
            continue

        #ds = preprocess_dataset(ds, type_, **conf)
        ds = get_preprocessing(**conf).transform(ds)

        r = spatial(ds, **conf)
        total_results[subj] = r

        subj_result = rs.SubjectResult(subj, r, savers)

        result.add(subj_result)

    #result.save()
    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return total_results, subj_result
Example #8
0
 def load_dataset(self, subj):
     
     self.subject = subj
     
     analysis_dict = {}
     for k in self._conf.keys():
         if k in self._default_conf.keys():
             self._conf[k] = self._default_conf[k]
         if k != 'path' and k != 'task':
             analysis_dict[k] = self._conf[k]
     
     
     self.ds_orig = load_dataset(self._data_path, 
                                 self.subject, 
                                 self._data_type, 
                                 **analysis_dict)
     
     return self.ds_orig
Example #9
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):
    
    
    conf = read_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    
    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_
    
    total_results = dict()
    data_path = conf['data_path']
    
    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #
    
    for subj in subjects:
        
        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)
        
        r = searchlight(ds, **kwargs)
        
        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r
        
        result.add(subj_result)
    
    result.summarize()
    

    conf['classes'] = np.unique(ds.targets)  
    #save_results()
    #save_results(path, total_results, conf)
    
    return result, r, subj_result
Example #10
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_

    total_results = dict()
    data_path = conf['data_path']

    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #

    for subj in subjects:

        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)

        r = searchlight(ds, **kwargs)

        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r

        result.add(subj_result)

    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return result, r, subj_result
Example #11
0
def test_spatial(path, subjects, conf_file, type_, **kwargs):
    
    #conf = read_configuration(path, conf_file, type_)
    conf = read_json_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    total_results = dict()
    
    #data_path = conf['data_path']
    data_path = conf['path']['data_path']
        
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_
    
    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    
    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        
        #ds = preprocess_dataset(ds, type_, **conf)
        ds = get_preprocessing(**conf).transform(ds)
                
        r = spatial(ds, **conf)
        total_results[subj] = r
        
        subj_result = rs.SubjectResult(subj, r, savers)
        
        result.add(subj_result)
Example #12
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):
    
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
    p = kwargs['p']
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':
        
        if path.__class__ == conf_file.__class__ == list:  
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file, source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(path, subjects, conf_file, target, **kwargs)
            
            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'
            return 0
    
    else:
        
        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)
    
        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]
        
        
        data_path = conf_src['data_path']
    
    
    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name
    
    
    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p
    
    
    total_results = dict()
    
    
    
    
    summarizers = [rs.CrossDecodingSummarizer(),
                   rs.SimilaritySummarizer(),
                   rs.DecodingSummarizer(),
                   rs.SignalDetectionSummarizer(),
                   ]
    
    savers = [rs.CrossDecodingSaver(),
                   rs.SimilaritySaver(),
                   rs.DecodingSaver(),
                   rs.SignalDetectionSaver(),
                   ]
    
    collection = rs.ResultsCollection(conf_src, path, summarizers)
    
    
    for subj in subjects:
        print '-------------------'
        
        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue
         
        # Evaluate if is correct to do further normalization after merging two ds. 
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
        
        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
                print 'Balancing dataset...'
                ds_src = balance_dataset_timewise(ds_src, 'fixation')        
        
        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)
        
        
        
        
        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']
        
        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m
        
        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new
        
        print c_new
        
        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            
            mahala_data = similarity_measure(r['ds_tar'], r['ds_src'], 
                                             r, p_value=p, method='mahalanobis')
            
            #r['mahalanobis_similarity'] = mahala_data
            for k_,v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']
        
        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'
        
        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)
        
        for k_,v_ in sdt_res.items():
            r[k_] = v_
            
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''
        
        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)
        
        collection.add(subj_result)
Example #13
0
def _group_transfer_learning(path, subjects, analysis,  conf_file, source='task', analysis_type='single', **kwargs):
    
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
   
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':
        
        if path.__class__ == conf_file.__class__ == list:  
            ds_src, s, conf_src = sources_merged_ds(path, subjects, conf_file, source, **kwargs)
            
            conf_src['permutations'] = 0
            
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'
            return 0
    
    else:
        
        conf_src = read_configuration(path, conf_file, source)
        
    
    
    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
    
    
    r_group = spatial(ds_src, **conf_src)
    
    total_results = dict()
    total_results['group'] = r_group
    
    clf = r_group['classifier']
    
    for subj_, conf_, path_ in zip(subjects, conf_file, path):
        for subj in subj_:
            print '-----------'
            r = dict()
            if len(subj_) > 1:
                conf_tar = read_configuration(path_, conf_, target)
        
                for arg in kwargs:
                    
                    conf_tar[arg] = kwargs[arg]
            
            
                data_path = conf_tar['data_path']
                try:
                    ds_tar = load_dataset(data_path, subj, target, **conf_tar)
                except Exception, err:
                    print err
                    continue
    
            
            ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
    
            if conf_src['label_included'] == 'all' and \
               conf_src['label_dropped'] != 'fixation':
                    print 'Balancing dataset...'
                    ds_src = balance_dataset_timewise(ds_src, 'fixation')       
            
            predictions = clf.predict(ds_tar)
           
            pred = np.array(predictions)
            targets = ds_tar.targets
            
            
            for arg in r_group.keys():
                r[arg] = copy.copy(r_group[arg])
            
            r['targets'] = targets
            r['predictions'] = predictions
            
            r['fclf'] = clf
            
            c_m = ConfusionMatrix(predictions=pred, targets=targets)
            c_m.compute()
            r['confusion_target'] = c_m
            print c_m
            
            tr_pred = similarity_measure_mahalanobis(ds_tar, ds_src, r)
            r['mahalanobis_similarity'] = tr_pred
            
            #print tr_pred
            
            c_mat_mahala = ConfusionMatrix(predictions=tr_pred.T[1], targets=tr_pred.T[0])
            c_mat_mahala.compute()
            r['confusion_mahala'] = c_mat_mahala
            
            d_prime, beta, c, c_new = signal_detection_measures(pred, targets, map_list)
            r['d_prime'] = d_prime
            print d_prime
            r['beta'] = beta
            r['c'] = c
            r['confusion_total'] = c_new
            
            '''
            d_prime_maha, c_new_maha = d_prime_statistics(tr_pred.T[1], tr_pred.T[0], map_list)
            r['d_prime_maha'] = d_prime_maha
            r['confusion_tot_maha'] = c_new_maha
            '''
            
            total_results[subj] = r
Example #14
0
def _group_transfer_learning(path,
                             subjects,
                             analysis,
                             conf_file,
                             source='task',
                             analysis_type='single',
                             **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, s, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)

            conf_src['permutations'] = 0

        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')

    r_group = spatial(ds_src, **conf_src)

    total_results = dict()
    total_results['group'] = r_group

    clf = r_group['classifier']

    for subj_, conf_, path_ in zip(subjects, conf_file, path):
        for subj in subj_:
            print '-----------'
            r = dict()
            if len(subj_) > 1:
                conf_tar = read_configuration(path_, conf_, target)

                for arg in kwargs:

                    conf_tar[arg] = kwargs[arg]

                data_path = conf_tar['data_path']
                try:
                    ds_tar = load_dataset(data_path, subj, target, **conf_tar)
                except Exception, err:
                    print err
                    continue

            ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

            if conf_src['label_included'] == 'all' and \
               conf_src['label_dropped'] != 'fixation':
                print 'Balancing dataset...'
                ds_src = balance_dataset_timewise(ds_src, 'fixation')

            predictions = clf.predict(ds_tar)

            pred = np.array(predictions)
            targets = ds_tar.targets

            for arg in r_group.keys():
                r[arg] = copy.copy(r_group[arg])

            r['targets'] = targets
            r['predictions'] = predictions

            r['fclf'] = clf

            c_m = ConfusionMatrix(predictions=pred, targets=targets)
            c_m.compute()
            r['confusion_target'] = c_m
            print c_m

            tr_pred = similarity_measure_mahalanobis(ds_tar, ds_src, r)
            r['mahalanobis_similarity'] = tr_pred

            #print tr_pred

            c_mat_mahala = ConfusionMatrix(predictions=tr_pred.T[1],
                                           targets=tr_pred.T[0])
            c_mat_mahala.compute()
            r['confusion_mahala'] = c_mat_mahala

            d_prime, beta, c, c_new = signal_detection_measures(
                pred, targets, map_list)
            r['d_prime'] = d_prime
            print d_prime
            r['beta'] = beta
            r['c'] = c
            r['confusion_total'] = c_new
            '''
            d_prime_maha, c_new_maha = d_prime_statistics(tr_pred.T[1], tr_pred.T[0], map_list)
            r['d_prime_maha'] = d_prime_maha
            r['confusion_tot_maha'] = c_new_maha
            '''

            total_results[subj] = r
Example #15
0
def get_merged_ds(path, subjects, conf_file, source='task', dim=3, **kwargs):

    #Mettere source e target nel conf!
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    ds_merged_list = []
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)

    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]

    data_path = conf_src['data_path']

    for subj in subjects:
        print '--------'
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue

        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if dim == 4:
            duration = np.min([e['duration'] for e in ds_src.a.events])
            ds_tar = build_events_ds(ds_tar, duration, overlap=duration - 1)
            ds_src = load_spatiotemporal_dataset(ds_src, duration=duration)

        print ds_src.samples.shape
        print ds_tar.samples.shape

        ds_src.sa['task'] = [source for s in range(ds_src.samples.shape[0])]
        ds_tar.sa['task'] = [target for s in range(ds_tar.samples.shape[0])]

        ds_merged = vstack((ds_src, ds_tar))
        ds_merged.a.update(ds_src.a)

        print ds_merged.sa.task

        ds_merged_list.append(ds_merged)
        '''
Example #16
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    p = kwargs['p']
    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(
                path, subjects, conf_file, target, **kwargs)

            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)

        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]

        data_path = conf_src['data_path']

    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p

    total_results = dict()

    summarizers = [
        rs.CrossDecodingSummarizer(),
        rs.SimilaritySummarizer(),
        rs.DecodingSummarizer(),
        rs.SignalDetectionSummarizer(),
    ]

    savers = [
        rs.CrossDecodingSaver(),
        rs.SimilaritySaver(),
        rs.DecodingSaver(),
        rs.SignalDetectionSaver(),
    ]

    collection = rs.ResultsCollection(conf_src, path, summarizers)

    for subj in subjects:
        print '-------------------'

        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue

        # Evaluate if is correct to do further normalization after merging two ds.
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
            print 'Balancing dataset...'
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)

        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']

        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m

        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new

        print c_new

        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            mahala_data = similarity_measure(r['ds_tar'],
                                             r['ds_src'],
                                             r,
                                             p_value=p,
                                             method='mahalanobis')

            #r['mahalanobis_similarity'] = mahala_data
            for k_, v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']

        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'

        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)

        for k_, v_ in sdt_res.items():
            r[k_] = v_
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''

        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)

        collection.add(subj_result)
Example #17
0
def get_merged_ds(path, subjects, conf_file, source='task', dim=3, **kwargs):
    
    
    #Mettere source e target nel conf!
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
    ds_merged_list = []
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)
    
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################
    
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]
    
    data_path = conf_src['data_path']
    
    for subj in subjects:
        print '--------'
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue
        
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 

        if dim == 4:    
            duration = np.min([e['duration'] for e in ds_src.a.events])      
            ds_tar = build_events_ds(ds_tar, duration, overlap=duration-1)
            ds_src = load_spatiotemporal_dataset(ds_src, duration=duration)
        
        print ds_src.samples.shape
        print ds_tar.samples.shape 
        
        ds_src.sa['task'] = [source for s in range(ds_src.samples.shape[0])]
        ds_tar.sa['task'] = [target for s in range(ds_tar.samples.shape[0])]
        
        ds_merged = vstack((ds_src, ds_tar))
        ds_merged.a.update(ds_src.a)
        
        print ds_merged.sa.task
        
        ds_merged_list.append(ds_merged)
        '''
Example #18
0
conf = read_configuration(path, 'memory.conf', 'BETA_MVPA')

conf['analysis_type'] = 'searchlight'
conf['analysis_task'] = 'memory'

summarizers = [rs.SearchlightSummarizer()]
savers = [rs.SearchlightSaver()]
collection = rs.ResultsCollection(conf, path, summarizers)


#for subj in subjects:
    
    data_type = 'BETA_MVPA'
    conf = read_configuration(path, 'memory.conf', data_type)
    data_path = conf['data_path']
    ds_original = load_dataset(data_path, subj, data_type, **conf)

    task_ = 'memory'
    ev = '3'
            
    print '---------------------------------'
    
    ev = str(ev)
    
    ds = ds_original.copy()
    '''
    # label managing
    if task_ == 'memory':
        field_ = 'stim'
        conf['label_dropped'] = 'F0'
        conf['label_included'] = 'N'+ev+','+'O'+ev
Example #19
0
conf = read_configuration(path, conf_file, task)
conf['analysis_type'] = 'searchlight'
conf['analysis_task'] = 'temporal_residual'
summarizers = [rs.SearchlightSummarizer()]
#savers = [rs.DecodingSaver(fields=['classifier', 'stats'])]
savers = [rs.SearchlightSaver()]
result = rs.ResultsCollection(conf, path, summarizers)

for subj in subjects[:1]:

    conf = read_configuration(path, conf_file, task)

    # Load dataset
    ds_orig = load_dataset(path,
                           subj,
                           task,
                           roi_labels={'conjunction': mask},
                           **conf)

    # Change the target
    ds_orig = change_target(ds_orig, target)

    # Process dataset
    ds_orig = detrend_dataset(ds_orig, task, **conf)

    # Balance dataset
    balancer = balance_dataset(balancer__count=5, **conf)

    for ev in evidences:
        for slice_condition in frames:
Example #20
0
conf['analysis_type'] = 'searchlight'
conf['analysis_task'] = 'memory_regression_sample_wise'
conf['mask_area'] = 'total'
task_ = 'BETA_MVPA'
subj = '110929anngio'

partitioners = [NGroupPartitioner(k) for k in np.arange(2, 5)]
result_dict = dict()

summarizers = [rs.SearchlightSummarizer()]
savers = [rs.SearchlightSaver()]
collection = rs.ResultsCollection(conf, path, summarizers)


for i, partitioner in enumerate(partitioners):
    ds = load_dataset(path, subj, task_, **conf)
    
    ds.sa['memory_evidence'] = np.ones_like(ds.targets, dtype=np.int)
    ds.sa.memory_evidence[ds.sa.stim == 'N'] = -1
    ds.sa.memory_evidence = ds.sa.memory_evidence * ds.sa.evidence
    
    ds.targets = [str(ii) for ii in ds.sa.memory_evidence]
    
    conf['label_dropped'] = '0'
    conf['label_included'] = ','.join([str(n) for n in np.array([-5,-3,-1,1,3,5])])
    
    ds = preprocess_dataset(ds, task_, **conf)
    ds.targets = np.float_(ds.targets)
    ds.targets = (ds.targets - np.mean(ds.targets))/np.std(ds.targets)
    cv = CrossValidation(slsim.RegressionMeasure(), 
                            partitioner,