Beispiel #1
0
collection = rs.ResultsCollection(conf, path, summarizers)


for i, partitioner in enumerate(partitioners):
    ds = load_dataset(path, subj, task_, **conf)
    
    ds.sa['memory_evidence'] = np.ones_like(ds.targets, dtype=np.int)
    ds.sa.memory_evidence[ds.sa.stim == 'N'] = -1
    ds.sa.memory_evidence = ds.sa.memory_evidence * ds.sa.evidence
    
    ds.targets = [str(ii) for ii in ds.sa.memory_evidence]
    
    conf['label_dropped'] = '0'
    conf['label_included'] = ','.join([str(n) for n in np.array([-5,-3,-1,1,3,5])])
    
    ds = preprocess_dataset(ds, task_, **conf)
    ds.targets = np.float_(ds.targets)
    ds.targets = (ds.targets - np.mean(ds.targets))/np.std(ds.targets)
    cv = CrossValidation(slsim.RegressionMeasure(), 
                            partitioner,
                            #NFoldPartitioner(cvtype=1),
                            errorfx=None
                             )
        
    
    kwa = dict(voxel_indices=Sphere(3))
    queryengine = IndexQueryEngine(**kwa)
    
    sl = Searchlight(cv, queryengine=queryengine)
    
    map_ = sl(ds)
Beispiel #2
0
        count_ = 1
    else: # decision
        field_ = 'decision'
        conf['label_dropped'] = 'FIX0'
        conf['label_included'] = 'NEW'+ev+','+'OLD'+ev
        count_ = 5

    ds.targets = np.core.defchararray.add(np.array(ds.sa[field_].value, dtype=np.str), 
                                          np.array(ds.sa.evidence,dtype= np.str))
    '''

    ds.targets = ds.sa.memory_status

    conf['label_dropped'] = 'None'
    conf['label_included'] = 'all'
    ds = preprocess_dataset(ds, data_type, **conf)
    count_ = 1
    field_ = 'memory'
    balanc = Balancer(count=count_, apply_selection=True, limit=None)
    gen = balanc.generate(ds)
    
    cv_storage = StoreResults()

    clf = LinearCSVMC(C=1)
                
    # This is used for the sklearn crossvalidation
    y = np.zeros_like(ds.targets, dtype=np.int_)
    y[ds.targets == ds.uniquetargets[0]] = 1
    
    # We needs to modify the chunks in order to use sklearn
    ds.chunks = np.arange(len(ds.chunks))