Exemplo n.º 1
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)

    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']

    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)

    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception as err:
            print err
            continue

        ds = detrend_dataset(ds, type_, **conf)

        balancer = balance_dataset(**conf)

        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i + 1))
            subj_ = "%s_%03d" % (subj, i + 1)

            ds_ = normalize_dataset(ds_, **conf)

            logger.info(ds_.summary())

            r = spatial(ds_, **conf)
            total_results[subj_] = r

            subj_result = rs.SubjectResult(subj_, r, savers)

            result.add(subj_result)

    #result.save()
    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return total_results, subj_result
Exemplo n.º 2
0
def test_spatial(path, subjects, conf_file, type_, **kwargs):

    #conf = read_configuration(path, conf_file, type_)
    conf = read_json_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    total_results = dict()

    #data_path = conf['data_path']
    data_path = conf['path']['data_path']

    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_

    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)

    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception:
            continue

        #ds = preprocess_dataset(ds, type_, **conf)
        ds = get_preprocessing(**conf).transform(ds)

        r = spatial(ds, **conf)
        total_results[subj] = r

        subj_result = rs.SubjectResult(subj, r, savers)

        result.add(subj_result)

    #result.save()
    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return total_results, subj_result
Exemplo n.º 3
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_

    total_results = dict()
    data_path = conf['data_path']

    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #

    for subj in subjects:

        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)

        r = searchlight(ds, **kwargs)

        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r

        result.add(subj_result)

    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return result, r, subj_result
Exemplo n.º 4
0
                        ds_roi = ds_[:, ds_.fa[roi] == label]
                        
                        ds_roi = normalize_dataset(ds_roi, normalization='both', chunk_number=7, **conf)
                                                
                        r = spatial(ds_roi, **conf)
                        r = searchlight
                
                        subj_ = "%s_roi_%s_ev_%s_ds_%s_tf_%s" %(subj,
                                                                str(int(label)),
                                                                str(ev), 
                                                                str(i+1), 
                                                                str(slice_condition))
                        #print selected_variables
                        print subj_
                        
                        subj_result = rs.SubjectResult(subj_, r, savers)
            
                        result.add(subj_result)
                        
                """

                r = searchlight(ds_)
                subj_ = "%s_roi_%s_ev_%s_ds_%s_tf_%s" % (subj, str(
                    int(0)), str(ev), str(i + 1), str(slice_condition))

                subj_result = rs.SubjectResult(subj_, r, savers)
                result.add(subj_result)

# Run Analysis
result.summarize()
Exemplo n.º 5
0
     
     print '-------- '+str(p_+1)+' of 100 ------------'
     
     y_perm = permutation(range(len(ds.targets)))
     
     ds.targets = ds.targets[y_perm]
     
     sl_map = sl(ds)
     sl_map.samples *= -1
     sl_map.samples +=  1
 
     map_ = map2nifti(sl_map, imghdr=ds.a.imghdr)
     ni.save(map_, os.path.join(path, subj+'_permutation_'+str(p_+1)+'.nii.gz'))
     permut_.append(map_.get_data())
     
     
 permut_ = np.array(permut_).mean(4)
 permut_ = np.rollaxis(permut_, 0, 4)
 perm_map = ni.Nifti1Image(permut_, map_.get_affine())
 ni.save(perm_map, 
         os.path.join(path, subj+'_permutation_'+str(i)+'_'+task_+'_'+ev+'.nii.gz'))
     
 maps.append(permut_)
     
 name = "%s_%s_%s_evidence_%s_balance_ds_%s" %(subj, task_, data_type, str(ev), str(i+1))
 result_dict['radius'] = 3
 result_dict['map'] = perm_map
     
 subj_result = rs.SubjectResult(name, result_dict, savers)
 collection.add(subj_result)
     
Exemplo n.º 6
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    p = kwargs['p']
    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(
                path, subjects, conf_file, target, **kwargs)

            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)

        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]

        data_path = conf_src['data_path']

    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p

    total_results = dict()

    summarizers = [
        rs.CrossDecodingSummarizer(),
        rs.SimilaritySummarizer(),
        rs.DecodingSummarizer(),
        rs.SignalDetectionSummarizer(),
    ]

    savers = [
        rs.CrossDecodingSaver(),
        rs.SimilaritySaver(),
        rs.DecodingSaver(),
        rs.SignalDetectionSaver(),
    ]

    collection = rs.ResultsCollection(conf_src, path, summarizers)

    for subj in subjects:
        print '-------------------'

        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue

        # Evaluate if is correct to do further normalization after merging two ds.
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
            print 'Balancing dataset...'
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)

        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']

        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m

        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new

        print c_new

        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            mahala_data = similarity_measure(r['ds_tar'],
                                             r['ds_src'],
                                             r,
                                             p_value=p,
                                             method='mahalanobis')

            #r['mahalanobis_similarity'] = mahala_data
            for k_, v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']

        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'

        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)

        for k_, v_ in sdt_res.items():
            r[k_] = v_
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''

        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)

        collection.add(subj_result)