Exemple #1
0
def load_spatiotemporal_dataset(ds, **kwargs):
    
    onset = 0
    
    for arg in kwargs:
        if (arg == 'onset'):
            onset = kwargs[arg]
        if (arg == 'duration'):
            duration = kwargs[arg]
        if (arg == 'enable_results'):
            enable_results = kwargs[arg]
        
        
        
    events = find_events(targets = ds.sa.targets, chunks = ds.sa.chunks)   
    
    #task_events = [e for e in events if e['targets'] in ['Vipassana','Samatha']]
    
    if 'duration' in locals():
        events = [e for e in events if e['duration'] >= duration]
    else:
        duration = np.min([ev['duration'] for ev in events])

    for e in events:
        e['onset'] += onset           
        e['duration'] = duration
        
    evds = eventrelated_dataset(ds, events = events)
    
    return evds
Exemple #2
0
def fx(dataset, behav_file, motion_file, polynomial_order, run_number):
    print("events      ->  %s" % behav_file)
    print("nuisance    ->  %s" % motion_file)

    tsds = dataset
    behav_txt = np.recfromcsv(behav_file, delimiter=',')
    events = [
        dict(onset=float(event['run_volume']) * 2.0,
             duration=6.0,
             targets=event['genre'],
             chunks=int(event['run']),
             stim=event['stim']) for event in behav_txt
    ]

    motion = np.loadtxt(motion_file)

    add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
    hrf_estimates = eventrelated_dataset(
        tsds,
        events,
        model='hrf',
        time_attr='time_coords',
        condition_attr=(('targets', 'chunks')),
        design_kwargs=dict(drift_model='polynomial',
                           drift_order=polynomial_order,
                           hrf_model='canonical with derivative',
                           add_regs=motion,
                           add_reg_names=add_reg_names),
        glmfit_kwargs=dict(model='ar1'))

    #hrf_estimates.sa['subj'] = [subject] * len(hrf_estimates)
    hrf_estimates.sa['run'] = [run_number] * len(hrf_estimates)

    # zscore voxelwise
    # XXX `hrf_estimates` has no chunks! hence zscoring is not performed run-wise!
    zscore(hrf_estimates)
    return hrf_estimates
#bandpass filter
nf = 0.5/TR
ws = [(1/lf)/nf, (1/hf)/nf]
b, a = signal.butter(5, ws, btype='band')
S = [signal.filtfilt(b, a, x) for x in ds.samples.T]
ds.samples = np.array(S).T
ds.samples = ds.samples.astype('float32')

#Create Event-related Dataset
onsets = np.arange(0,ds.nsamples - samples_size/TR, samples_size/TR)
events = []
for on in onsets:
	Ev = dict()
	Ev['onset'] = on
	Ev['duration'] = samples_size / TR
	Ev['target'] = on*TR
	Ev['subj'] = subj
	events.append(Ev)

evds = mvpa.eventrelated_dataset(ds, events=events)
evds.fa['1stidx'] = evds.fa.event_offsetidx==0

#Save pymvpa-dataset as hdf5 in dataset directory 
try:
    os.mkdir(os.path.join(path,'dataset'))
except:
    print 'results directory already exists'

dsfile = subj+'_z'+str(zsc)+'_'+str(samples_size)+'_'+align
mvpa.h5save(os.path.join(path,'dataset',dsfile+'.hdf5'), evds, compression='gzip')
Exemple #4
0
def build_events_ds(ds, new_duration, **kwargs):
    """
    This function is used to convert a dataset in a event_related dataset. Used for
    transfer learning and clustering, thus a classifier has been trained on a 
    event related dataset and the prediction should be done on the same kind of the 
    dataset.
    
    Parameters    
    ----------
    
    ds : Dataset
        The dataset to be converted
    new_duration : integer
        Is the duration of the single event, if experiment events are of different
        length, it takes the events greater or equal to new_duration.
    kwarsg : dict
        win_number: is the number of window of one single event to be extracted,
        if it is not setted, it assumes the ratio between event duration and new_duration
        overlap:
        
    Returns
    -------
    
    Dataset:
        the event_related dataset
    """
    
    for arg in kwargs:
        if arg == 'win_number':
            win_number = kwargs[arg]
        if arg == 'overlap':
            overlap = kwargs[arg]

    events = find_events(targets = ds.sa.targets, chunks = ds.sa.chunks)
    labels = np.unique(ds.targets)
    current_duration = dict()
    for l in labels:
        d = [e['duration'] for e in events if e['targets'] == l]
        current_duration[l] = np.unique(d)[0]

    def calc_overlap(w, l, n):
        return w - np.floor((l - w)/(n - 1))
    
    def calc_win_number (w, l, o):
        return (l - w)/(w - o) + 1
    
    if 'overlap' not in locals():
        overlap = calc_overlap(new_duration, current_duration[l], win_number)
    else:
        if overlap >= new_duration:
            overlap = new_duration - 1
            
    if 'win_number' not in locals():
        #win_number = np.ceil(current_duration[l]/np.float(new_duration))
        win_number = calc_win_number(new_duration, current_duration[l], overlap)
        
    new_event_list = []
    
    for e in events:
        onset = e['onset']
        chunks = e['chunks']
        targets = e['targets']
        duration = e['duration']

        for i in np.arange(win_number):
            new_onset = onset + i * (new_duration - overlap)
            
            new_event = dict()
            new_event['onset'] = new_onset
            new_event['duration'] = new_duration
            new_event['targets'] = targets
            new_event['chunks'] = chunks
            
            new_event_list.append(new_event)
    
    
    logger.info('Building new event related dataset...')
    evds = eventrelated_dataset(ds, events = new_event_list)
    
    return evds
Exemple #5
0
def spatiotemporal(ds, **kwargs):
      
    onset = 0
    
    for arg in kwargs:
        if (arg == 'onset'):
            onset = kwargs[arg]
        if (arg == 'duration'):
            duration = kwargs[arg]
        if (arg == 'enable_results'):
            enable_results = kwargs[arg]
        if (arg == 'permutations'):
            permutations = int(kwargs[arg])
       
    events = find_events(targets = ds.sa.targets, chunks = ds.sa.chunks)   
    
    if 'duration' in locals():
        events = [e for e in events if e['duration'] >= duration]
    else:
        duration = np.min([ev['duration'] for ev in events])

    for e in events:
        e['onset'] += onset           
        e['duration'] = duration
        
    evds = eventrelated_dataset(ds, events = events) 
    
    [fclf, cvte] = setup_classifier(**kwargs)
    
    logger.info('Cross validation is performing ...')
    res = cvte(evds)
    
    print cvte.ca.stats 
    
    
    if permutations != 0:
        print cvte.ca.null_prob.samples
        dist_len = len(cvte.null_dist.dists())
        err_arr = np.zeros(dist_len)
        for i in range(dist_len):
            err_arr[i] = 1 - cvte.ca.stats.stats['ACC']
    
        total_p_value = np.mean(cvte.null_dist.p(err_arr))
        p_value = cvte.ca.null_prob.samples
    else:
        total_p_value = 0.
        p_value = np.array([0,0])
    
    
    try:
        sensana = fclf.get_sensitivity_analyzer()
        res_sens = sensana(evds)
    except Exception, err:
        allowed_keys = ['map', 'sensitivities', 'stats', 
                        'mapper', 'classifier', 'ds', 
                        'perm_pvalue', 'p']
        
        allowed_results = [None, None, cvte.ca.stats, 
                           evds.a.mapper, fclf, evds, 
                           p_value, total_p_value]
        
        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]
                
        return results
ws = [(1 / lf) / nf, (1 / hf) / nf]
b, a = signal.butter(5, ws, btype='band')
S = [signal.filtfilt(b, a, x) for x in ds.samples.T]
ds.samples = np.array(S).T
ds.samples = ds.samples.astype('float32')

#Create Event-related Dataset
onsets = np.arange(0, ds.nsamples - samples_size / TR, samples_size / TR)
events = []
for on in onsets:
    Ev = dict()
    Ev['onset'] = on
    Ev['duration'] = samples_size / TR
    Ev['target'] = on * TR
    Ev['subj'] = subj
    events.append(Ev)

evds = mvpa.eventrelated_dataset(ds, events=events)
evds.fa['1stidx'] = evds.fa.event_offsetidx == 0

#Save pymvpa-dataset as hdf5 in dataset directory
try:
    os.mkdir(os.path.join(path, 'dataset'))
except:
    print 'results directory already exists'

dsfile = subj + '_z' + str(zsc) + '_' + str(samples_size) + '_' + align
mvpa.h5save(os.path.join(path, 'dataset', dsfile + '.hdf5'),
            evds,
            compression='gzip')