Exemple #1
0
    def _csv2ts(self):
        """ Read data from the in_file and generate a nitime TimeSeries object"""
        data, roi_names = self._read_csv()

        TS = TimeSeries(data=data,
                        sampling_interval=self.inputs.TR,
                        time_unit='s')

        TS.metadata = dict(ROIs=roi_names)

        return TS
Exemple #2
0
def get_bold_signals (image, mask, TR, 
                      normalize=True, 
                      ts_extraction='mean', 
                      filter_par=None, 
                      roi_values=None):
    '''
    Image and mask must be in nibabel format
    '''
    
    mask_data = np.int_(mask.get_data())
    if roi_values == None:
        labels = np.unique(mask_data)[1:]
    else:
        labels = np.int_(roi_values)
    
    final_data = []
    #print labels
    for v in labels[:]:
        #print str(v)
        data = image.get_data()[mask_data == v]
        
        if normalize == True:
            data = zscore(data, axis = 1)
            data[np.isnan(data)] = 0

        if ts_extraction=='mean':
            #assert np.mean(data, axis=0) == data.mean(axis=0)
            data = data.mean(axis=0)
        elif ts_extraction=='pca':
            if data.shape[0] > 0:
                data = PCA(n_components=1).fit_transform(data.T)
                data = np.squeeze(data)
            else:
                data = data.mean(axis=0)
                
        ts = TimeSeries(data, sampling_interval=float(TR))
        
        if filter_par != None:
            
            upperf = filter_par['ub']
            lowerf = filter_par['lb']
            
            F = FilterAnalyzer(ts, ub=upperf, lb=lowerf)
            
            ts = TimeSeries(F.fir.data, sampling_interval=float(TR))
            
            del F
        
        final_data.append(ts.data)

    del data
    del mask_data
    del ts
    return TimeSeries(np.vstack(final_data), sampling_interval=float(TR))
Exemple #3
0
    def _csv2ts(self):
        """ Read data from the in_file and generate a nitime TimeSeries object"""
        data,roi_names = self._read_csv()
        
        TS = TimeSeries(data=data,
                        sampling_interval=self.inputs.TR,
                        time_unit='s')
        
        TS.metadata = dict(ROIs=roi_names)

        return TS
def estimate_corr_coh(tc_array, tr):
    print 'estimating correlation and coherence matrices...'
    TR = tr
    f_lb = 0.01
    f_ub = 0.15
    T = TimeSeries(tc_array.T, sampling_interval=TR)
    T.metadata['roi'] = tc_array.columns
    Corr = CorrelationAnalyzer(T)
    Coh = CoherenceAnalyzer(T)
    freq_idx = np.where((Coh.frequencies > f_lb) * \
                        (Coh.frequencies < f_ub))[0]
    return Corr.corrcoef, np.mean(Coh.coherence[:, :, freq_idx], -1)
Exemple #5
0
    def measure(self):

        vars_ = self.time_serie.data.shape[0]
        result = np.zeros((vars_, vars_))

        for i in range(vars_):
            ts_seed = TimeSeries(self.time_serie.data[i], sampling_interval=1.)
            ts_target = TimeSeries(self.time_serie.data[i + 1:],
                                   sampling_interval=1.)
            S = SeedAnalyzer(ts_seed, ts_target, self._measure)
            result[i, i + 1:] = S.measure

        return result
Exemple #6
0
def remove_bold_effect(bold_ts, distance_ts, ts_param, **kwargs):  
    
    for arg in kwargs:
        if arg == 'runs':
            n_runs = np.int(kwargs[arg])
        if arg == 'tr':
            TR = np.float(kwargs[arg])
    
    n_roi = bold_ts.data.shape[0]
 
    rl = bold_ts.data.shape[1]/n_runs - 1 #Run Length (minus one is deconvolution effect)
    dl = distance_ts.data.shape[1]/n_runs #Distance Length
    
    diff = rl - dl
       
    print(distance_ts.data.shape)
    deconv_ts = []
    for i in range(n_roi):
        deconv_distance = []
        for j in range(n_runs):
            full_data = np.ones(rl)
            full_data[diff:] = distance_ts.data[i][j*dl:(j+1)*dl]
            n_data = full_data/(bold_ts.data[i][j*rl:(j+1)*rl])
            deconv_distance.append(n_data[diff:])
        
        assert np.hstack(deconv_distance).shape[0] == dl*n_runs
        deconv_ts.append(np.hstack(deconv_distance))
    
    ts_deconv = TimeSeries(np.vstack(deconv_ts), sampling_interval=TR)
    
    
    return ts_deconv
def compute_coherence(time_windows, TR, f_lb, f_ub, roi_names):
    n_timewindows = time_windows.shape[0]
    n_samples = time_windows.shape[1]
    n_rois = time_windows.shape[2]

    coherence_3Darray = np.zeros((n_timewindows, n_rois, n_rois))

    if n_rois == len(roi_names):

        for time_index in range(n_timewindows):

            ts = time_windows[time_index, :, :]
            data = np.zeros((n_rois, n_samples))

            for n_idx, roi in enumerate(roi_names):
                data[n_idx] = ts[:, n_idx]

            data = percent_change(data)
            T = TimeSeries(data, sampling_interval=TR)
            C = CoherenceAnalyzer(T)
            freq_idx = np.where(
                (C.frequencies > f_lb) * (C.frequencies < f_ub))[0]
            coh = np.mean(C.coherence[:, :, freq_idx], -1)

            coherence_3Darray[time_index] = coh
    else:
        raise Exception(
            "Number of ROIs in 3D Array do not match number of ROI names provided."
        )

    return coherence_3Darray
Exemple #8
0
def get_state_frequencies(state_dynamics, method='spectrum_fourier'):
    """
    Returns the spectrum of the state occurence for each subject.
    
    Parameters
    ----------
    state_dynamics :    n_states x n_subjects x n_timepoints array
                        The state dynamics output from fit_states
                        function.
                        
    method : a string, check nitime.spectral.SpectralAnalyzer for 
             allowed methods.
    
    
    Returns
    -------
    results : n_subjects list of tuple,
              first element is the array of frequencies,
              second element is the array n_states x frequencies
              of the spectrum.
    
    """
    
    results = []
    for s in state_dynamics:
        ts = TimeSeries(s, sampling_interval=1.)
        S = SpectralAnalyzer(ts)
        try:
            result = getattr(S, method)
        except AttributeError as  _:
            result = S.spectrum_fourier
        
        results.append(result)
        
    return results
Exemple #9
0
def hurstexp_welchper(data, samp=1.05, f_max=0, consider_fBm=False):
    """
    These functions compute the Hurst exponent of a signal using the
    Welch periodogram
    data : your signal
    samp : sampling rate in Hz 1 for an fMRI series
    f_max: the higher frequency you want to take into account
    """
    #data could be two dimensional(but no more...) in that cas time should
    #be on second position
    time_series = TimeSeries(data=data, sampling_rate=samp)
    spectral_analysis = SpectralAnalyzer(time_series)
    frq, pwr = spectral_analysis.psd
    #We need to take only the small frequency, but the exact choice is a
    #bit arbitrary we need to have alpha between 0 and 1
    if f_max == 0:
        masker = frq > 0
    else:
        masker = np.all([(frq > 0), (frq < f_max)], axis=0)
    log2frq = np.log2(frq[masker])
    log2pwr = np.log2(pwr.T[masker])
    tmp = np.polyfit(log2frq, log2pwr, deg=1)
    if consider_fBm:
        return (1 - tmp[0]) / 4, {
            'aest': tmp[1],
            'log2frq': log2frq,
            'log2pwr': log2pwr
        }
    return (1 - tmp[0]) / 2, {
        'aest': tmp[1],
        'log2frq': log2frq,
        'log2pwr': log2pwr
    }
def process_enc_timestamps(masked_data, timestamp_run, TR=2.335):
    n_voxels = np.shape(masked_data)[1]
    ####   2. Apply a filter for each voxel
    data_filtered = np.zeros(np.shape(masked_data))
    for voxel in range(0, n_voxels):
        data_to_filter = masked_data[:,
                                     voxel]  #data of the voxel along the session
        #apply the filter
        data_to_filter = TimeSeries(data_to_filter, sampling_interval=TR)
        F = FilterAnalyzer(data_to_filter, ub=0.15,
                           lb=0.02)  ##upper and lower boundaries
        data_filt = F.filtered_boxcar.data
        data_filtered[:, voxel] = data_filt

    ####   3. Subset of data corresponding to the delay times (all voxels)
    encoding_delay_activity = np.zeros(
        (len(timestamp_run), n_voxels))  ## emply matrix (n_trials, n_voxels)
    for idx, t in enumerate(timestamp_run):  #in each trial
        delay_TRs = data_filtered[
            t:t + 2, :]  #take the first scan of the delay and the next
        delay_TRs_mean = np.mean(delay_TRs,
                                 axis=0)  #make the mean in each voxel of 2TR
        encoding_delay_activity[
            idx, :] = delay_TRs_mean  #index the line in the matrix

    ###   4. zscore  in each voxel in the temporal dimension (with the other 2TR of the same session)
    for voxel in range(0, n_voxels):  # by voxel
        vx_act = encoding_delay_activity[:, voxel]
        vx_act_zs = np.array(stats.zscore(vx_act))
        ## zscore
        encoding_delay_activity[:,
                                voxel] = vx_act_zs  ## replace previos activity

    return encoding_delay_activity
Exemple #11
0
    def transform(self, target_ds):

        seed_ds = self.seed_ds

        ts_seed = TimeSeries(seed_ds, sampling_interval=1.)
        ts_target = TimeSeries(target_ds, sampling_interval=1.)

        kwargs = self.kwargs

        seed_analyzer = self.seed_analyzer(ts_seed, ts_target, **kwargs)

        #print ts_seed.shape, ts_target.shape

        self._measure = seed_analyzer.measure

        return self._measure
Exemple #12
0
def get_similarity_timeserie(path, name, condition, time, **kwargs):
    
    TR = 1.
    for arg in kwargs:
        if arg == 'TR':
            TR = np.float(kwargs[arg])
            
    file_list = os.listdir(path)
    
    file_list = [f for f in file_list if f.find(name) != -1 
                                        and f.find('_'+condition) != -1 
                                        and f.find(time) != -1 
                                        ]

    total_data = []
    for f in file_list:
        
        print(os.path.join(path, f))
        
        data = np.loadtxt(os.path.join(path, f), delimiter=',')
        data = np.sqrt(data.T)
        
        data_z = zscore(data, axis=1)
        
        total_data.append(data_z)
    
    ts = TimeSeries(np.vstack(total_data), sampling_interval=TR)
    
    return ts
Exemple #13
0
def euclidean_measure(seed_ds, target_ds):

    roi_correlation = []

    for roi in np.unique(seed_ds.fa.roi_labels)[1:]:

        mask_roi = seed_ds.fa.roi_labels == roi

        seed_ts = TimeSeries(seed_ds[:, mask_roi], sampling_interval=1.)
        target_ts = TimeSeries(target_ds[:, mask_roi], sampling_interval=1.)

        S = SeedAnalyzer(seed_ts, target_ts)

        roi_correlation.append(S.measure)

    return roi_correlation
Exemple #14
0
def mask_fmri_process(fmri_path, masks, sys_use='unix'):
    ### 1. Load and mask the data
    fmri_path = ub_wind_path(fmri_path, system=sys_use) #change the path format wind-unix
    
    mask_img_rh= masks[0] #right hemisphere mask
    mask_img_rh = ub_wind_path(mask_img_rh, system=sys_use)
    mask_img_lh= masks[1] #left hemisphere mask
    mask_img_lh = ub_wind_path(mask_img_lh, system=sys_use)
    
    #Apply the masks and concatenate   
    masked_data_rh = apply_mask(fmri_path, mask_img_rh)
    masked_data_lh = apply_mask(fmri_path, mask_img_lh)    
    masked_data=np.hstack([masked_data_rh, masked_data_lh])
    
    ### 2. Filter ####and zscore
    n_voxels = np.shape(masked_data)[1]
    for voxel in range(0, n_voxels):
        data_to_filter = masked_data[:,voxel]                        
        #apply the filter 
        data_to_filter = TimeSeries(data_to_filter, sampling_interval=2.335)
        F = FilterAnalyzer(data_to_filter, ub=0.15, lb=0.02)
        data_filtered=F.filtered_boxcar.data
        masked_data[:,voxel] = data_filtered                        
        #Z score
        masked_data[:,voxel] = np.array( stats.zscore( masked_data[:,voxel]  ) ) ; ## zscore + 5 just to get + values
    
    #append it and save the data
    return masked_data    
Exemple #15
0
    def execute(self,
                gsr=True,
                filter_params={
                    'ub': 0.08,
                    'lb': 0.009
                },
                tr=4.):

        # Get timeseries
        if not self.loadedSignals:
            self._load_signals(tr, gsr, filter_params=filter_params)
        elif self.loadedSignals['gsr'] != gsr or self.loadedSignals[
                'filter_params'] != filter_params:
            self._load_signals(tr, gsr, filter_params=filter_params)

        beta = glm(self.fmri_ts.data.T, self.regressors.T)

        residuals = self.fmri_ts.data.T - np.dot(self.regressors.T, beta)

        ts_residual = TimeSeries(residuals.T, sampling_interval=tr)
        '''
        ub = filter_params['ub']
        lb = filter_params['lb']
        
        F = FilterAnalyzer(ts_residual, ub=ub, lb=lb)
        '''
        residual_4d = np.zeros_like(self.bold.get_data())
        residual_4d[self.brain_mask.get_data() > 0] = ts_residual.data
        residual_4d[np.isnan(residual_4d)] = 0

        self._save(residual_4d, gsr=gsr)
Exemple #16
0
 def __init__(self,spike_data,**kwargs):
     
     sampling_rate = kwargs.pop('sampling_rate',1000)
     starttime = float(kwargs.setdefault('starttime',0))
     compute = kwargs.pop('compute',True)
     
     # Parse multi-trial spike time data as input
     if isinstance(spike_data,Timestamps):
         multi = spike_data
         data = multi.to_rate(sampling_rate,**kwargs)
     
     # Attempt to convert input to array
     else:
         multi = None
         data = asarray(spike_data)
         
     TimeSeries.__init__(self,data,sampling_rate=sampling_rate,t0=starttime)
     self.multi = multi
Exemple #17
0
def correlation_measure(seed_ds, target_ds):

    roi_correlation = []
    rois = [k for k in seed_ds.fa.keys() if k != 'voxel_indices']
    roi_values = seed_ds.fa[rois[0]].value

    for roi in np.unique(roi_values)[1:]:

        mask_roi = roi_values == roi

        seed_ts = TimeSeries(seed_ds[:, mask_roi], sampling_interval=1.)
        target_ts = TimeSeries(target_ds[:, mask_roi], sampling_interval=1.)

        S = SeedCorrelationAnalyzer(seed_ts, target_ts)

        roi_correlation.append(S.corrcoef)

    return roi_correlation
Exemple #18
0
def analyze_connectivity(imagelist, path_roi, roi_names, ts_param, **kwargs):
    
    TR = 1.
    for arg in kwargs:
        if arg == 'TR':
            TR = np.float(kwargs[arg])
    
    roi_list = os.listdir(path_roi)
    roi_list = [r for r in roi_list if r.find('.hdr') != -1 \
                                    or r.find('nii.gz') != -1]
    '''
    roi_list = ['lower_left_new_.nii.gz',
               'lower_right_new_.nii.gz',
               'upper_left_new_.nii.gz',
               'upper_right_new_.nii.gz',
               'roi_attention_all.4dfp.hdr',
               'roi_default_all.4dfp.hdr',
               'searchlight_separated.nii.gz'
               ]
    '''
    #roi_names = np.loadtxt('/media/DATA/fmri/learning/roi_labels', dtype=np.str)
    print('Length of image list is '+str(len(imagelist)))
    volume_shape = imagelist[0].get_data().shape[:-1]
    n_shape = list(volume_shape)
    n_shape.append(-1)
    coords = list(np.ndindex(volume_shape))
    coords = np.array(coords).reshape(n_shape)
    
    data_arr = []
    for roi in roi_list:
        r_mask = ni.load(os.path.join(path_roi, roi))
        mask = r_mask.get_data().squeeze()
        roi_filt = roi_names[roi_names.T[1] == roi]
        for label in np.unique(mask)[1:]:
            
            roi_m = np.int_(roi_filt.T[2]) == label
            if roi_m.any():
                print('Loading voxels from '+roi_filt[roi_m].T[0][0])
                time_serie = io.time_series_from_file([f.get_filename() for f in imagelist], \
                                                      coords[mask==label].T, \
                                                      TR=float(TR), \
                                                      normalize=ts_param['normalize'], \
                                                      average=ts_param['average'], \
                                                      filter=ts_param['filter'])
                data_arr.append(time_serie)
        del r_mask
    
    data = np.array(data_arr)
    ts = TimeSeries(data, sampling_interval=float(TR))
    del imagelist, time_serie
    C = nta.CorrelationAnalyzer(ts)
    
    return C, ts
Exemple #19
0
def connectivity_analysis(ts_condition, sampling_interval=1.):
    
    matrices = dict()
    for c in ts_condition:
        matrices[c] = []
        for i in range(len(ts_condition[c])):
            
            ts = TimeSeries(ts_condition[c][i], sampling_interval=sampling_interval)
            C = CorrelationAnalyzer(ts)
            
            matrices[c].append(C.corrcoef)
    
    return matrices
Exemple #20
0
def roc(brain_ts, **kwargs):
    
    conditions = kwargs['conditions']
    
    #label_data = LabelEncoder().fit_transform(conditions_)
    label_data = np.zeros_like(conditions, dtype=np.int)
    label_data[conditions == "C"] = 1
    seed_ts = TimeSeries(label_data[np.newaxis,:], sampling_interval=1.)

    S = SeedAnalyzer(seed_ts, brain_ts, roc_auc_score)   
    measure = S.measure
    
    return measure
Exemple #21
0
def bold_convolution(bold_timeseries, duration, win_func=boxcar):
              
    window=win_func(duration)
    
    n_roi = bold_timeseries.data.shape[0]
    convolved_bold = []
    for i in range(n_roi):
        convolution = convolve(np.abs(bold_timeseries.data[i]), window)
        convolved_bold.append(convolution)
    
    ts_convolved = TimeSeries(np.vstack(convolved_bold), 
                              sampling_interval=np.float(bold_timeseries.sampling_interval))
    
    return ts_convolved
Exemple #22
0
def fit_states(X, centroids, distance=euclidean):
    """
    Returns the similarity of the dataset to each centroid,
    using a dissimilarity distance function.
    
    Parameters
    ----------
    X : n_samples x n_features array
        The full dataset used for clustering
    
    centroids : n_cluster x n_features array
        The cluster centroids.
        
    distance : a scipy.spatial.distance function | default: euclidean
        This is the dissimilarity measure, this should be a python
        function, see scipy.spatial.distance.
        
    
    Returns
    -------
    results : n_samples x n_centroids array
        The result of the analysis,
    
    """
    

    ts_seed = TimeSeries(centroids, sampling_interval=1.)
    
    results_ = []
    
    for subj in X:
        ts_target = TimeSeries(subj, sampling_interval=1.)
        S = SeedAnalyzer(ts_seed, ts_target, distance)
        results_.append(S.measure)
        
    
    return results_
Exemple #23
0
def beta_series(brain_ts, **kwargs):
    
    seed_fname = kwargs['seed_fname'][0]
    img_data = kwargs['img_data']
    time_mask = kwargs['time_mask']
    
    seed = ni.load(seed_fname).get_data().squeeze()
    seed_data = img_data[np.nonzero(seed)][:,time_mask]
    seed_data = seed_data.mean(0)[np.newaxis,:]
    seed_ts = TimeSeries(seed_data, sampling_interval=1.)
    
    S = SeedCorrelationAnalyzer(seed_ts, brain_ts)
    measure = S.corrcoef
    
    return measure
def getCorrelation(group_id, subject_id):
    # obtains the correlation between empirical functional connectivity and simulated functional connectivity
    input_path_sim = "fMRI.txt"
    input_path_em = get_path(group_id, subject_id, "T1") + "FC.mat"
    em_mat = scipy.io.loadmat(input_path_em)
    em_fc_matrix = em_mat["FC_cc_DK68"]
    sampling_interval = em_mat["TR"][0][0]
    uidx = np.triu_indices(68, 1)
    em_fc_z = np.arctanh(em_fc_matrix)
    em_fc = em_fc_z[uidx]
    tsr = np.loadtxt(input_path_sim)
    T = TimeSeries(tsr, sampling_interval=sampling_interval)
    C = CorrelationAnalyzer(T)
    sim_fc = np.arctanh(C.corrcoef)[uidx]
    sim_fc = np.nan_to_num(sim_fc)
    pearson_corr, _ = stat.pearsonr(sim_fc, em_fc)
    return pearson_corr
Exemple #25
0
def getCorrelation(run, encoding, subject_id):
    # obtains the correlation between empirical functional connectivity and simulated functional connectivity
    key = str(run) + "_" + str(encoding) + "_" + str(subject_id) + "_rsHRF"
    input_path_sim = "weights" + key + "fMRI.txt"
    em_fc_matrix = get_FC(run, encoding, subject_id)
    sampling_interval = 0.72
    uidx = np.triu_indices(379, 1)
    em_fc_z = np.arctanh(em_fc_matrix)
    em_fc = em_fc_z[uidx]
    tsr = np.loadtxt(input_path_sim)
    T = TimeSeries(tsr, sampling_interval=sampling_interval)
    C = CorrelationAnalyzer(T)
    sim_fc = np.arctanh(C.corrcoef)[uidx]
    sim_fc = np.nan_to_num(sim_fc)
    pearson_corr, _ = stat.pearsonr(sim_fc, em_fc)
    os.remove(input_path_sim)
    return pearson_corr
def plot_spectrum(Timeseries, tr):
    from nitime.timeseries import TimeSeries
    from nitime.analysis import SpectralAnalyzer
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    import os
    import numpy as np
    figure= []
    for i, timeseries in enumerate(Timeseries):
        #T = io.time_series_from_file(in_file,TR=tr)
        title = os.path.abspath('spectra')
        timeseries = np.asarray(timeseries[1:])
        timeseries = timeseries-np.mean(timeseries)*np.ones(timeseries.shape)
        T = TimeSeries(timeseries,sampling_interval=tr)
        S_original = SpectralAnalyzer(T)
        # Initialize a figure to put the results into:
        fig01 = plt.figure(figsize = (8,3))
        ax01 = fig01.add_subplot(1, 1, 1)
        ax01.plot(S_original.psd[0],
            S_original.psd[1],
            label='Welch PSD')

        ax01.plot(S_original.spectrum_fourier[0],
            S_original.spectrum_fourier[1],
            label='FFT')

        ax01.plot(S_original.periodogram[0],
            S_original.periodogram[1],
            label='Periodogram')

        ax01.plot(S_original.spectrum_multi_taper[0],
            S_original.spectrum_multi_taper[1],
            label='Multi-taper')

        ax01.set_xlabel('Frequency (Hz)')
        ax01.set_ylabel('Power')

        ax01.legend()
        Figure = title+'%02d.png'%i
        plt.savefig(Figure, bbox_inches='tight')
        figure.append(Figure)
        plt.close()
    return figure
Exemple #27
0
def difference(brain_ts, **kwargs):
    
    seed_list = kwargs['seed_fname']
    img_data = kwargs['img_data']
    time_mask = kwargs['time_mask']
    
    multiseed_data = []
    
    for s in seed_list:
        seed = ni.load(s).get_data().squeeze()
        seed_data = img_data[np.nonzero(seed)][:,time_mask]
        seed_data = seed_data.mean(0)[np.newaxis,:]
        multiseed_data.append(seed_data)
        
    multiseed_data = np.vstack(multiseed_data)
    diff_ = np.abs(np.diff(multiseed_data, axis=0)[0])
    
    seed_ts = TimeSeries(diff_, sampling_interval=1.)
    S = SeedCorrelationAnalyzer(seed_ts, brain_ts)
    measure = S.corrcoef
    
    return measure
Exemple #28
0
Smoke testing of the viz module.

"""

import numpy as np

from nitime.timeseries import TimeSeries
from nitime.analysis import CorrelationAnalyzer
from nitime.viz import drawmatrix_channels, drawgraph_channels, plot_xcorr


roi_names = ['a','b','c','d','e','f','g','h','i','j']
data = np.random.rand(10,1024)

T = TimeSeries(data, sampling_interval=np.pi)
T.metadata['roi'] = roi_names

#Initialize the correlation analyzer
C = CorrelationAnalyzer(T)

def test_drawmatrix_channels():
    fig01 = drawmatrix_channels(C.corrcoef, roi_names, size=[10., 10.], color_anchor=0)

def test_plot_xcorr():
    xc = C.xcorr_norm

    fig02 = plot_xcorr(xc,
                       ((0, 1),
                        (2, 3)),
                       line_labels=['a', 'b'])
Exemple #29
0
                            color_anchor=0,
                            title='MTM Coherence')
"""

.. image:: fig/multi_taper_coh_01.png

Next we perform the same analysis, using the nitime object oriented interface.

We start by initializing a TimeSeries object with this data and with the
sampling_interval provided above. We set the metadata 'roi' field with the ROI
names.


"""

T = TimeSeries(pdata, sampling_interval=TR)
T.metadata['roi'] = roi_names
"""

We initialize an MTCoherenceAnalyzer object with the TimeSeries object

"""

C2 = MTCoherenceAnalyzer(T)
"""

The relevant indices in the Analyzer object are derived:

"""

freq_idx = np.where((C2.frequencies > 0.02) * (C2.frequencies < 0.15))[0]
def granger_scores(timeseries, order):
    timeseries = TimeSeries(timeseries, sampling_interval=1)
    g = nta.GrangerAnalyzer(timeseries, order=order)
    g_xy_mat = np.mean(g.causality_xy, axis=-1)
    g_yx_mat = np.mean(g.causality_yx, axis=-1)
    return np.concatenate([g_xy_mat[np.tril_indices(3,-1)], g_yx_mat.T[np.triu_indices(3,1)]])
            X = np.expand_dims(regressor, axis=1)
            glm_dist = GeneralLinearModel(X)
            glm_dist.fit(Y)
            beta_dist = glm_dist.get_beta()

            r_signal = np.dot(X, beta_dist)

            regressed_s = Y - r_signal

            save_fn = os.path.join(
                path_dist, 'similarity_regressed_' + name + '_' + condition +
                '_' + rest + '_.txt')
            #np.savetxt(save_fn, regressed_s, fmt='%.4f', delimiter=',')

            r_ts = TimeSeries(regressed_s.T,
                              sampling_interval=dist_ts.sampling_interval)

            C = CorrelationAnalyzer(r_ts)
            s_res.append(np.arctanh(C.corrcoef))

        res.append(s_res)

######################## Cross Correlation ###################################

    correlation_list = []
    connectivity_list = []

    coher_dist = []
    coher_bold = []

    for name in subjects:
Exemple #32
0
def connectivity(x):
    data = np.vstack(x)
    ts = TimeSeries(data, sampling_interval=1.)
    return CorrelationAnalyzer(ts).corrcoef
Exemple #33
0
def analysis(subjects, **kwargs):
    
    default_config = {
                      "path":'/root/robbis/fmri/carlo_ofp/',
                      "file_dir":"analysis_SEP/DE_ASS_noHP/SINGLE_TRIAL_MAGS_voxelwise/",
                      "fields": ["encoding", "level", "response"],
                      "conditions":{
                                    "encoding": ["F", "L"],
                                    "response": ["C"],
                                    "level": ["1","2","3"]
                                      },
                      "condition_dir_dict":{
                                            "encoding":0,
                                            "level":1,
                                            "response":2
                                            },
                      "cond_file":"%s_condition_list.txt",
                      "filename":"residuals.nii.gz",
                      "brain_mask":"glm_atlas_mask_333.nii",
                      "mask_dir":"1_single_ROIs",
                      "seed_mask": ["L_FFA.nii", "L_PPA.nii"],
                      "analysis":"beta"
                      }
    
    
    default_config.update(kwargs)
    
    mapper = {
              "beta": beta_series,
              "roc": roc,
              "abs_difference": difference, 
              }
    
    method = mapper[default_config["analysis"]]
    method_name = default_config["analysis"]
    
    path = default_config['path']
    filename = default_config['filename']
    cond_file = default_config['cond_file']
    brain_mask = default_config['brain_mask']
    seed_mask = default_config['seed_mask']
    mask_dir = default_config['mask_dir']
    file_dir = default_config["file_dir"]
    
    fields = default_config["fields"]
    conditions = default_config["conditions"]
    conditions_dict = default_config["condition_dir_dict"]
    
    
    mask_fname = os.path.join(path, mask_dir, brain_mask)
    seed_fname = [os.path.join(path, mask_dir, s) for s in seed_mask]
    default_config['seed_fname'] = seed_fname
    
    mask = ni.load(mask_fname)
    mask_data = mask.get_data().squeeze()
    
    total_results = []
    
    
    for s in subjects:

        condition_list = np.genfromtxt(os.path.join(path, s, file_dir, cond_file %(s)), 
                                       dtype=np.str, 
                                       delimiter=',')
        
        
        condition_mask = get_condition_mask(condition_list, 
                                            fields, 
                                            conditions, 
                                            conditions_dict)
        
        conditions_ = condition_list[condition_mask].T[-1]
        
        
        img_fname = os.path.join(path, s, file_dir, filename)
        
        time_mask = condition_mask
        default_config['time_mask'] = time_mask
        
        # Load brain
        img = ni.load(img_fname)
        img_data = img.get_data()
        default_config['img_data'] = img_data
        
        brain_img = img_data[np.nonzero(mask_data)][:,time_mask]
        brain_ts = TimeSeries(brain_img, sampling_interval=1.)    
            
            
        measure = method(brain_ts, **default_config)
        
        ### Save file
        result = np.zeros(mask.shape)
        result[...,0][np.nonzero(mask_data)] = measure
        
        condition_unique = np.unique([''.join(c) for c in condition_list[condition_mask]])
        
        fname = "%s_%s_%s_%s.nii.gz" % (method_name, '-'.join(seed_mask), '_'.join(condition_unique), s)
        ni.save(ni.Nifti1Image(result, mask.affine), os.path.join(path, "0_results", fname))
        
        total_results.append(result)
        
    total_results = np.concatenate(total_results, axis=3)
    
    fname = "0_%s_%s_%s_%s.nii.gz" % (method_name, '-'.join(seed_mask), '_'.join(condition_unique), "total")
    ni.save(ni.Nifti1Image(total_results, mask.affine), os.path.join(path, "0_results", fname))
    
    fname = "0_%s_%s_%s_%s.nii.gz" % (method_name, '-'.join(seed_mask), '_'.join(condition_unique), "total_avg")
    ni.save(ni.Nifti1Image(total_results.mean(3), mask.affine), os.path.join(path, "0_results", fname))
Exemple #34
0
In this case, we generate an order 2 AR process, with the following coefficients:


"""

coefs = np.array([0.9, -0.5])
"""

This generates the AR(2) time series:

"""

X, noise, _ = utils.ar_generator(npts, sigma, coefs, drop_transients)

ts_x = TimeSeries(X, sampling_rate=Fs, time_unit='s')
ts_noise = TimeSeries(noise, sampling_rate=1000, time_unit='s')
"""

We use the plot_tseries function in order to visualize the process:

"""

fig01 = plot_tseries(ts_x, label='AR signal')
fig01 = plot_tseries(ts_noise, fig=fig01, label='Noise')
fig01.axes[0].legend()
"""

.. image:: fig/ar_est_1var_01.png

Exemple #35
0
import nitime.viz

reload(nitime.viz)
from nitime.viz import drawmatrix_channels

# This time Import the coherence analyzer
from nitime.analysis import CoherenceAnalyzer

# This part is the same as before
TR = 1.89
data_rec = csv2rec("data/fmri_timeseries.csv")
roi_names = np.array(data_rec.dtype.names)
n_samples = data_rec.shape[0]
data = np.zeros((len(roi_names), n_samples))

for n_idx, roi in enumerate(roi_names):
    data[n_idx] = data_rec[roi]

data = percent_change(data)
T = TimeSeries(data, sampling_interval=TR)
T.metadata["roi"] = roi_names
C = CoherenceAnalyzer(T)

# We look only at frequencies between 0.02 and 0.15 (the physiologically
# relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
freq_idx = np.where((C.frequencies > 0.02) * (C.frequencies < 0.15))[0]

# Extract the coherence and average across these frequency bands:
coh = np.mean(C.coherence[:, :, freq_idx], -1)  # Averaging on the last dimension
drawmatrix_channels(coh, roi_names, size=[10.0, 10.0], color_anchor=0)