示例#1
0
def test_spatiotemporal(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'balance':
            balance = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']
    conf['analysis_type'] = 'spatiotemporal'
    conf['analysis_task'] = type_

    for subj in subjects:
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        ds = detrend_dataset(ds, type_, **conf)

        if 'balance' in locals() and balance == True:
            if conf['label_included'] == 'all' and \
                conf['label_dropped'] == 'none':
                ds = balance_dataset_timewise(ds, 'fixation')

        r = spatiotemporal(ds, **conf)

        total_results[subj] = r
示例#2
0
def test_spatiotemporal(path, subjects, conf_file, type_, **kwargs):
    
    conf = read_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'balance':
            balance = kwargs[arg]
    
    total_results = dict()
    
    data_path = conf['data_path']
    conf['analysis_type'] = 'spatiotemporal'
    conf['analysis_task'] = type_
    
    for subj in subjects:
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        ds = detrend_dataset(ds, type_, **conf)
        
        if 'balance' in locals() and balance == True:
            if conf['label_included'] == 'all' and \
                conf['label_dropped'] == 'none':
                ds = balance_dataset_timewise(ds, 'fixation')
        
        r = spatiotemporal(ds, **conf)
        
        total_results[subj] = r
示例#3
0
    def pre_operations(self, **options):
        
        # dictionary of conditions used
        self.conditions = {k: options[k] for k in self._condition_names}
        logger.debug(self._default_conf.keys())      

        # On-fly change default options
        # A little bit dangerous!!
        for k, v in options.iteritems():
            logger.debug(k)
            if k in self._default_conf.keys():
                setattr(self, "_"+k, v)
            else:
                setattr(self, k, v)


        ds = self.ds_orig.copy()
        
        ds = change_target(ds, options['task'])
        
        ds = detrend_dataset(ds, 
                            self._data_type, 
                            **self._conf)
        

        ds = slice_dataset(ds, options['condition'])
        
        ds = normalize_dataset(ds, **self._conf)

        return ds
示例#4
0
def test_clustering(path,
                    subjects,
                    analysis,
                    conf_file,
                    source='task',
                    **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        target = 'task'

    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)

    ##############################################
    conf_src['label_included'] = 'all'
    conf_src['label_dropped'] = 'none'
    conf_src['mean_samples'] = 'True'
    ##############################################
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf_src['data_path']

    for subj in subjects:
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue

        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
                conf_src['label_dropped'] != 'fixation':
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        r = clustering_analysis(ds_src, ds_tar, analysis, **kwargs)

        total_results[subj] = r
示例#5
0
def test_clustering(path, subjects, analysis, conf_file, source='task', **kwargs):    
    
    if source == 'task':
        target = 'rest'
    else:
        target = 'task'
     
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)
    
    ##############################################
    conf_src['label_included'] = 'all'
    conf_src['label_dropped'] = 'none'
    conf_src['mean_samples'] = 'True'
    ##############################################
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]
        
    total_results = dict()
    
    data_path = conf_src['data_path']
    
    for subj in subjects:
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue
        
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
        
        if conf_src['label_included'] == 'all' and \
                conf_src['label_dropped'] != 'fixation':
                ds_src = balance_dataset_timewise(ds_src, 'fixation')
        
        r = clustering_analysis(ds_src, ds_tar, analysis, **kwargs)
        
        total_results[subj] = r
示例#6
0
def load_subjectwise_ds(path, 
                       subjects, 
                       conf_file, 
                       task, 
                       extra_sa=None,  
                       **kwargs):
    """
    extra_sa: dict or None, sample attributes added to the final dataset, they should be
    the same length as the subjects.
    
    subject: either a list of subjects or a csv file
    
    """
    
    conf = read_configuration(os.path.join(path, conf_file), task)
           
    conf.update(kwargs)
    logger.debug(conf)
    
    data_path = conf['data_path']
    
    
    if isinstance(subjects, str):        
        subjects, extra_sa = load_subject_file(subjects)
        
    
    logger.info('Merging subjects from '+data_path)
    
    for i, subj in enumerate(subjects):
        
        ds = load_dataset(data_path, subj, task, **conf)
        
        ds = detrend_dataset(ds, task, **conf)
        ds = normalize_dataset(ds, **conf)
        
        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]
        
        
        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)
            
        
        del ds

    return ds_merged, ['group'], conf
示例#7
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)

    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    total_results = dict()

    data_path = conf['data_path']

    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)

    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception as err:
            print err
            continue

        ds = detrend_dataset(ds, type_, **conf)

        balancer = balance_dataset(**conf)

        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i + 1))
            subj_ = "%s_%03d" % (subj, i + 1)

            ds_ = normalize_dataset(ds_, **conf)

            logger.info(ds_.summary())

            r = spatial(ds_, **conf)
            total_results[subj_] = r

            subj_result = rs.SubjectResult(subj_, r, savers)

            result.add(subj_result)

    #result.save()
    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return total_results, subj_result
示例#8
0
def _test_spatial(path, subjects, conf_file, type_, **kwargs):
    warnings.warn("Deprecated use test_spatial.", DeprecationWarning)
    
    conf = read_configuration(path, conf_file, type_)
    conf['analysis_type'] = 'spatial'
    conf['analysis_task'] = type_
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    total_results = dict()
    
    data_path = conf['data_path']
        
    summarizers = [rs.DecodingSummarizer()]
    savers = [rs.DecodingSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    
    for subj in subjects:
        print '------'
        try:
            ds = load_dataset(data_path, subj, type_, **conf)
        except Exception, err:
            print err
            continue
        
        
        ds = detrend_dataset(ds, type_, **conf)
        
        balancer = balance_dataset(**conf)
        
        for i, ds_ in enumerate(balancer.generate(ds)):
            logger.info("Balanced dataset n. %d" % (i+1))
            subj_ = "%s_%03d" % (subj, i+1)
            
            ds_ = normalize_dataset(ds_, **conf)
            
            logger.info(ds_.summary())
            
            r = spatial(ds_, **conf)
            total_results[subj_] = r
        
            subj_result = rs.SubjectResult(subj_, r, savers)
        
            result.add(subj_result)
示例#9
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):
    
    
    conf = read_configuration(path, conf_file, type_)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
    
    
    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_
    
    total_results = dict()
    data_path = conf['data_path']
    
    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #
    
    for subj in subjects:
        
        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)
        
        r = searchlight(ds, **kwargs)
        
        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r
        
        result.add(subj_result)
    
    result.summarize()
    

    conf['classes'] = np.unique(ds.targets)  
    #save_results()
    #save_results(path, total_results, conf)
    
    return result, r, subj_result
示例#10
0
def test_searchlight(path, subjects, conf_file, type_, **kwargs):

    conf = read_configuration(path, conf_file, type_)

    for arg in kwargs:
        conf[arg] = kwargs[arg]

    conf['analysis_type'] = 'searchlight'
    conf['analysis_task'] = type_

    total_results = dict()
    data_path = conf['data_path']

    #
    summarizers = [rs.SearchlightSummarizer()]
    savers = [rs.SearchlightSaver()]
    result = rs.ResultsCollection(conf, path, summarizers)
    #

    for subj in subjects:

        ds = load_dataset(data_path, subj, type_, **conf)
        ds = detrend_dataset(ds, type_, **conf)

        r = searchlight(ds, **kwargs)

        subj_result = rs.SubjectResult(subj, r, savers)
        total_results[subj] = r

        result.add(subj_result)

    result.summarize()

    conf['classes'] = np.unique(ds.targets)
    #save_results()
    #save_results(path, total_results, conf)

    return result, r, subj_result
示例#11
0
def _group_transfer_learning(path, subjects, analysis,  conf_file, source='task', analysis_type='single', **kwargs):
    
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
   
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':
        
        if path.__class__ == conf_file.__class__ == list:  
            ds_src, s, conf_src = sources_merged_ds(path, subjects, conf_file, source, **kwargs)
            
            conf_src['permutations'] = 0
            
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'
            return 0
    
    else:
        
        conf_src = read_configuration(path, conf_file, source)
        
    
    
    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
    
    
    r_group = spatial(ds_src, **conf_src)
    
    total_results = dict()
    total_results['group'] = r_group
    
    clf = r_group['classifier']
    
    for subj_, conf_, path_ in zip(subjects, conf_file, path):
        for subj in subj_:
            print '-----------'
            r = dict()
            if len(subj_) > 1:
                conf_tar = read_configuration(path_, conf_, target)
        
                for arg in kwargs:
                    
                    conf_tar[arg] = kwargs[arg]
            
            
                data_path = conf_tar['data_path']
                try:
                    ds_tar = load_dataset(data_path, subj, target, **conf_tar)
                except Exception, err:
                    print err
                    continue
    
            
            ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
    
            if conf_src['label_included'] == 'all' and \
               conf_src['label_dropped'] != 'fixation':
                    print 'Balancing dataset...'
                    ds_src = balance_dataset_timewise(ds_src, 'fixation')       
            
            predictions = clf.predict(ds_tar)
           
            pred = np.array(predictions)
            targets = ds_tar.targets
            
            
            for arg in r_group.keys():
                r[arg] = copy.copy(r_group[arg])
            
            r['targets'] = targets
            r['predictions'] = predictions
            
            r['fclf'] = clf
            
            c_m = ConfusionMatrix(predictions=pred, targets=targets)
            c_m.compute()
            r['confusion_target'] = c_m
            print c_m
            
            tr_pred = similarity_measure_mahalanobis(ds_tar, ds_src, r)
            r['mahalanobis_similarity'] = tr_pred
            
            #print tr_pred
            
            c_mat_mahala = ConfusionMatrix(predictions=tr_pred.T[1], targets=tr_pred.T[0])
            c_mat_mahala.compute()
            r['confusion_mahala'] = c_mat_mahala
            
            d_prime, beta, c, c_new = signal_detection_measures(pred, targets, map_list)
            r['d_prime'] = d_prime
            print d_prime
            r['beta'] = beta
            r['c'] = c
            r['confusion_total'] = c_new
            
            '''
            d_prime_maha, c_new_maha = d_prime_statistics(tr_pred.T[1], tr_pred.T[0], map_list)
            r['d_prime_maha'] = d_prime_maha
            r['confusion_tot_maha'] = c_new_maha
            '''
            
            total_results[subj] = r
示例#12
0
def get_merged_ds(path, subjects, conf_file, source='task', dim=3, **kwargs):
    
    
    #Mettere source e target nel conf!
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
    ds_merged_list = []
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)
    
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################
    
    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]
    
    data_path = conf_src['data_path']
    
    for subj in subjects:
        print '--------'
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue
        
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 

        if dim == 4:    
            duration = np.min([e['duration'] for e in ds_src.a.events])      
            ds_tar = build_events_ds(ds_tar, duration, overlap=duration-1)
            ds_src = load_spatiotemporal_dataset(ds_src, duration=duration)
        
        print ds_src.samples.shape
        print ds_tar.samples.shape 
        
        ds_src.sa['task'] = [source for s in range(ds_src.samples.shape[0])]
        ds_tar.sa['task'] = [target for s in range(ds_tar.samples.shape[0])]
        
        ds_merged = vstack((ds_src, ds_tar))
        ds_merged.a.update(ds_src.a)
        
        print ds_merged.sa.task
        
        ds_merged_list.append(ds_merged)
        '''
示例#13
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    p = kwargs['p']
    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(
                path, subjects, conf_file, target, **kwargs)

            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)

        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]

        data_path = conf_src['data_path']

    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p

    total_results = dict()

    summarizers = [
        rs.CrossDecodingSummarizer(),
        rs.SimilaritySummarizer(),
        rs.DecodingSummarizer(),
        rs.SignalDetectionSummarizer(),
    ]

    savers = [
        rs.CrossDecodingSaver(),
        rs.SimilaritySaver(),
        rs.DecodingSaver(),
        rs.SignalDetectionSaver(),
    ]

    collection = rs.ResultsCollection(conf_src, path, summarizers)

    for subj in subjects:
        print '-------------------'

        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue

        # Evaluate if is correct to do further normalization after merging two ds.
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
            print 'Balancing dataset...'
            ds_src = balance_dataset_timewise(ds_src, 'fixation')

        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)

        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']

        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m

        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new

        print c_new

        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            mahala_data = similarity_measure(r['ds_tar'],
                                             r['ds_src'],
                                             r,
                                             p_value=p,
                                             method='mahalanobis')

            #r['mahalanobis_similarity'] = mahala_data
            for k_, v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']

        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'

        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)

        for k_, v_ in sdt_res.items():
            r[k_] = v_
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''

        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)

        collection.add(subj_result)
示例#14
0
def test_transfer_learning(path, subjects, analysis,  conf_file, source='task', \
                           analysis_type='single', calculateSimilarity='True', **kwargs):
    
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'
    
    
    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'
    
    p = kwargs['p']
    ##############################################    
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##   
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':
        
        if path.__class__ == conf_file.__class__ == list:  
            ds_src, _, conf_src = sources_merged_ds(path, subjects, conf_file, source, **kwargs)
            ds_tar, subjects, conf_tar = sources_merged_ds(path, subjects, conf_file, target, **kwargs)
            
            conf_src['permutations'] = 0
            conf_tar['permutations'] = 0
        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'
            return 0
    
    else:
        
        conf_src = read_configuration(path, conf_file, source)
        conf_tar = read_configuration(path, conf_file, target)
    
        for arg in kwargs:
            conf_src[arg] = kwargs[arg]
            conf_tar[arg] = kwargs[arg]
        
        
        data_path = conf_src['data_path']
    
    
    conf_src['analysis_type'] = 'transfer_learning'
    conf_src['analysis_task'] = source
    conf_src['analysis_func'] = analysis.func_name
    
    
    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')
        if arg == 'p_dist':
            p = float(conf_src[arg])
            print p
    
    
    total_results = dict()
    
    
    
    
    summarizers = [rs.CrossDecodingSummarizer(),
                   rs.SimilaritySummarizer(),
                   rs.DecodingSummarizer(),
                   rs.SignalDetectionSummarizer(),
                   ]
    
    savers = [rs.CrossDecodingSaver(),
                   rs.SimilaritySaver(),
                   rs.DecodingSaver(),
                   rs.SignalDetectionSaver(),
                   ]
    
    collection = rs.ResultsCollection(conf_src, path, summarizers)
    
    
    for subj in subjects:
        print '-------------------'
        
        if (len(subjects) > 1) or (subj != 'group'):
            try:
                ds_src = load_dataset(data_path, subj, source, **conf_src)
                ds_tar = load_dataset(data_path, subj, target, **conf_tar)
            except Exception, err:
                print err
                continue
         
        # Evaluate if is correct to do further normalization after merging two ds. 
        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar) 
        
        if conf_src['label_included'] == 'all' and \
           conf_src['label_dropped'] != 'fixation':
                print 'Balancing dataset...'
                ds_src = balance_dataset_timewise(ds_src, 'fixation')        
        
        # Make cross-decoding
        r = transfer_learning(ds_src, ds_tar, analysis, **conf_src)
        
        
        
        
        # Now we have cross-decoding results we could process it
        pred = np.array(r['classifier'].ca.predictions)

        targets = r['targets']
        
        c_m = ConfusionMatrix(predictions=pred, targets=targets)
        c_m.compute()
        r['confusion_target'] = c_m
        
        c_new = cross_decoding_confusion(pred, targets, map_list)
        r['confusion_total'] = c_new
        
        print c_new
        
        # Similarity Analysis
        if calculateSimilarity == 'True':
            if 'p' not in locals():
                print 'Ciao!'

            
            mahala_data = similarity_measure(r['ds_tar'], r['ds_src'], 
                                             r, p_value=p, method='mahalanobis')
            
            #r['mahalanobis_similarity'] = mahala_data
            for k_,v_ in mahala_data.items():
                r[k_] = v_
            r['confusion_mahala'] = mahala_data['confusion_mahalanobis']
        
        else:
            #r['mahalanobis_similarity'] = []
            r['confusion_mahala'] = 'Null'
        
        # Signal Detection Theory Analysis
        sdt_res = signal_detection_measures(c_new)
        
        for k_,v_ in sdt_res.items():
            r[k_] = v_
            
            '''
            Same code of:
        
            r['d_prime'] = d_prime
            r['beta'] = beta
            r['c'] = c
            '''
        
        total_results[subj] = r
        subj_result = rs.SubjectResult(subj, r, savers=savers)
        
        collection.add(subj_result)
示例#15
0
for subj in subjects[:1]:

    conf = read_configuration(path, conf_file, task)

    # Load dataset
    ds_orig = load_dataset(path,
                           subj,
                           task,
                           roi_labels={'conjunction': mask},
                           **conf)

    # Change the target
    ds_orig = change_target(ds_orig, target)

    # Process dataset
    ds_orig = detrend_dataset(ds_orig, task, **conf)

    # Balance dataset
    balancer = balance_dataset(balancer__count=5, **conf)

    for ev in evidences:
        for slice_condition in frames:

            selected_variables.update({
                'frame': [1, 2, 3, 4, 5],
                'evidence': [ev]
            })

            print selected_variables
            ds = slice_dataset(ds_orig, selected_variables)
示例#16
0
def get_merged_ds(path, subjects, conf_file, source='task', dim=3, **kwargs):

    #Mettere source e target nel conf!
    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    ds_merged_list = []
    conf_src = read_configuration(path, conf_file, source)
    conf_tar = read_configuration(path, conf_file, target)

    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    for arg in kwargs:
        conf_src[arg] = kwargs[arg]
        conf_tar[arg] = kwargs[arg]

    data_path = conf_src['data_path']

    for subj in subjects:
        print '--------'
        try:
            ds_src = load_dataset(data_path, subj, source, **conf_src)
            ds_tar = load_dataset(data_path, subj, target, **conf_tar)
        except Exception, err:
            print err
            continue

        ds_src = detrend_dataset(ds_src, source, **conf_src)
        ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

        if dim == 4:
            duration = np.min([e['duration'] for e in ds_src.a.events])
            ds_tar = build_events_ds(ds_tar, duration, overlap=duration - 1)
            ds_src = load_spatiotemporal_dataset(ds_src, duration=duration)

        print ds_src.samples.shape
        print ds_tar.samples.shape

        ds_src.sa['task'] = [source for s in range(ds_src.samples.shape[0])]
        ds_tar.sa['task'] = [target for s in range(ds_tar.samples.shape[0])]

        ds_merged = vstack((ds_src, ds_tar))
        ds_merged.a.update(ds_src.a)

        print ds_merged.sa.task

        ds_merged_list.append(ds_merged)
        '''
示例#17
0
def _group_transfer_learning(path,
                             subjects,
                             analysis,
                             conf_file,
                             source='task',
                             analysis_type='single',
                             **kwargs):

    if source == 'task':
        target = 'rest'
    else:
        if source == 'rest':
            target = 'task'

    if source == 'saccade':
        target = 'face'
    else:
        if source == 'face':
            target = 'saccade'

    ##############################################
    ##############################################
    ##    conf_src['label_included'] = 'all'    ##
    ##    conf_src['label_dropped'] = 'none'    ##
    ##    conf_src['mean_samples'] = 'False'    ##
    ##############################################
    ##############################################

    if analysis_type == 'group':

        if path.__class__ == conf_file.__class__ == list:
            ds_src, s, conf_src = sources_merged_ds(path, subjects, conf_file,
                                                    source, **kwargs)

            conf_src['permutations'] = 0

        else:
            print 'In group analysis path, subjects and conf_file must be lists: \
                    Check configuration file and/or parameters!!'

            return 0

    else:

        conf_src = read_configuration(path, conf_file, source)

    for arg in conf_src:
        if arg == 'map_list':
            map_list = conf_src[arg].split(',')

    r_group = spatial(ds_src, **conf_src)

    total_results = dict()
    total_results['group'] = r_group

    clf = r_group['classifier']

    for subj_, conf_, path_ in zip(subjects, conf_file, path):
        for subj in subj_:
            print '-----------'
            r = dict()
            if len(subj_) > 1:
                conf_tar = read_configuration(path_, conf_, target)

                for arg in kwargs:

                    conf_tar[arg] = kwargs[arg]

                data_path = conf_tar['data_path']
                try:
                    ds_tar = load_dataset(data_path, subj, target, **conf_tar)
                except Exception, err:
                    print err
                    continue

            ds_tar = detrend_dataset(ds_tar, target, **conf_tar)

            if conf_src['label_included'] == 'all' and \
               conf_src['label_dropped'] != 'fixation':
                print 'Balancing dataset...'
                ds_src = balance_dataset_timewise(ds_src, 'fixation')

            predictions = clf.predict(ds_tar)

            pred = np.array(predictions)
            targets = ds_tar.targets

            for arg in r_group.keys():
                r[arg] = copy.copy(r_group[arg])

            r['targets'] = targets
            r['predictions'] = predictions

            r['fclf'] = clf

            c_m = ConfusionMatrix(predictions=pred, targets=targets)
            c_m.compute()
            r['confusion_target'] = c_m
            print c_m

            tr_pred = similarity_measure_mahalanobis(ds_tar, ds_src, r)
            r['mahalanobis_similarity'] = tr_pred

            #print tr_pred

            c_mat_mahala = ConfusionMatrix(predictions=tr_pred.T[1],
                                           targets=tr_pred.T[0])
            c_mat_mahala.compute()
            r['confusion_mahala'] = c_mat_mahala

            d_prime, beta, c, c_new = signal_detection_measures(
                pred, targets, map_list)
            r['d_prime'] = d_prime
            print d_prime
            r['beta'] = beta
            r['c'] = c
            r['confusion_total'] = c_new
            '''
            d_prime_maha, c_new_maha = d_prime_statistics(tr_pred.T[1], tr_pred.T[0], map_list)
            r['d_prime_maha'] = d_prime_maha
            r['confusion_tot_maha'] = c_new_maha
            '''

            total_results[subj] = r