Beispiel #1
0
                               for f in ROI_files]

            # Initialize lists for each behavioral condition:
            t_fix = []
            t_left = []
            t_right = []

            for this_fix in this_s[1]['fix_nii']:
                # Read data from each ROI, from each voxel. If you want to
                # average across voxels, set the "average" kwarg to True:

                # what's this filter? Does it average?
                t_fix.append(load_nii(nifti_path+this_fix,
                                      ROI_coords,
                                      TR,
                                      normalize='percent',
                                      filter=dict(method='iir',lb=f_lb,
                                                  ub=f_ub,
                                                  filt_order=50),
                                      verbose=True))
            for this_left in this_s[1]['left_nii']:
                t_left.append(load_nii(nifti_path+this_left,
                                       ROI_coords,
                                       TR,
                                       normalize='percent',
                                       filter=dict(method='iir',lb=f_lb,
                                                   ub=f_ub,
                                                   filt_order=50),
                                       verbose=True))

            for this_right in this_s[1]['right_nii']:
                t_right.append(load_nii(nifti_path+this_right,
        # Get the coordinates of the ROIs, while accounting for the
        # up-sampling:
        ROI_coords = [tsv.upsample_coords(tsv.getROIcoords(f),up_samp)
                           for f in ROI_files]
        
         # Initialize lists for each behavioral condition:
        t_fix = []
        t_left = []
        t_right = []
        nifti_path = fmri_path +sess[0] + '/%s_nifti/' % sess[0]

        # Plot the mean of the TS over SD (SNR) for each ROI
        # len(t_fix)= number of ROIs
        for runName in allRuns:
            for this_fix in sess[1][runName]:
                t_fix.append(load_nii(nifti_path+this_fix, ROI_coords,TR,
                                    normalize='percent', average=True, verbose=True))
        # reshape ROI matrix
        allROIS=reshapeTS(t_fix)
        numRuns=allROIS.shape[1]

        corr_all[subject] = np.zeros((numRuns,len(rois),len(rois))) * np.nan
        coh_all[subject] = np.zeros((numRuns,len(rois),len(rois))) * np.nan
        
        # Get roi correlations and coherence
        for runNum in range(allROIS.shape[1]):
            #need to load timeseries by run
            fixTS=ts.TimeSeries(allROIS[:,runNum,:], sampling_interval=TR)
            fixTS.metadata['roi'] = roi_names
           
            # Get plot and correlations
            C=CorrelationAnalyzer(fixTS)
Beispiel #3
0
            # Initialize lists for each behavioral condition:
            nifti_path = fmri_path +sessName[0] + '/%s_nifti/' % sessName[0]
            save_path=fmri_path+sessName[0]+ '/regressors/'
            
            # Add filtering
            filterType='boxcar'

            #Go through each run and save out ROIs as nifti file
            # Band pass filtering and detrending are done on ROI average timeseries
            for runName in allRuns:
                print 'Analyzing ' + runName
                for this_fix in sessName[1][runName]:
                    t_all=[]
                    # Load the time series and average over ROI
                    t_all=load_nii(nifti_path+this_fix[:-4]+'_stc.nii.gz', ROI_coords,TR, normalize='percent', average=True, verbose=True)
                    
                    for roiNum in range(len(roi_names)):
                        print 'Analyzing '+ roi_names[roiNum]
                        ts_roi=[]; ts_roidt=[]; ts_Box=[];
                        ts_roidv=[]; ts_roidv_dt=[]; ts_roidv_dtBox=[];

                        # Get each time series (1 x TRs)
                        ts_roi=t_all[roiNum].data
                        # Linearly detrend each ROI
                        ts_roidt=signal.detrend(ts_roi, axis=0)

                        # Band pass filter the data using boxcar filter
                        ts_Box=bp_data(ts_roidt, TR, f_ub, f_lb)
                   
                        # Get the derivative
            # Add filtering
            filterType='boxcar'

            #Go through each run and save out ROIs as nifti file
            for runName in allRuns:
                print 'Analyzing ' + runName

                # Initialize lists for each condition:
                t_fix = []
                print runName
                saveFile=base_path+ 'fmri/Results/timeseries/'+subject+sessionName[sess]+'_'+runName+'_%sROIts_%sReg_meanROI_stc.pck' % (len(roi_names), len(nuisReg))
                for this_run in sessName[1][runName]:
                    run_rois=[]
                    # Load STC nifti
                    allData=load_nii(nifti_path+this_run[:-7]+'_stc.nii.gz', ROI_coords, TR, normalize='percent', average=False, verbose=True)
                    regMatrix=[]

                    # Load the nuisance variables into a matrix
                    for reg in nuisReg:
                        regFile=reg_path+this_run[:5]+'_'+reg
                        print 'Regressor ' + reg
                        regMatrix.append(np.loadtxt(regFile))
                    # Convert to array
                    regArray=np.array(regMatrix).transpose()

                    # Go through each ROI
                    for jj in range(len(ROI_coords)):
                        print 'Analyzing '+ roi_names[jj]
                        roiData=[]; ts_roidt=[]; ts_Box=[]; ts_AvgBox=[]; meanROIts=[];meanROIts=[];
Beispiel #5
0
            # Initialize lists for each behavioral condition:
            t_fix = []
            t_left = []
            t_right = []

            for this_fix in this_s[1]['fix_nii']:
                # Read data from each ROI, from each voxel. If you want to
                # average across voxels, set the "average" kwarg to True:

                # what's this filter? Does it average?
                t_fix.append(
                    load_nii(nifti_path + this_fix,
                             ROI_coords,
                             TR,
                             normalize='percent',
                             filter=dict(method='iir',
                                         lb=f_lb,
                                         ub=f_ub,
                                         filt_order=50),
                             verbose=True))
            for this_left in this_s[1]['left_nii']:
                t_left.append(
                    load_nii(nifti_path + this_left,
                             ROI_coords,
                             TR,
                             normalize='percent',
                             filter=dict(method='iir',
                                         lb=f_lb,
                                         ub=f_ub,
                                         filt_order=50),
                             verbose=True))
Beispiel #6
0
        ]

        # Initialize lists for each behavioral condition:
        t_fix = []
        t_left = []
        t_right = []
        nifti_path = fmri_path + sess[0] + '/%s_nifti/' % sess[0]

        # Plot the mean of the TS over SD (SNR) for each ROI
        # len(t_fix)= number of ROIs
        for runName in allRuns:
            for this_fix in sess[1][runName]:
                t_fix.append(
                    load_nii(nifti_path + this_fix,
                             ROI_coords,
                             TR,
                             normalize='percent',
                             average=True,
                             verbose=True))
        # reshape ROI matrix
        allROIS = reshapeTS(t_fix)
        numRuns = allROIS.shape[1]

        corr_all[subject] = np.zeros((numRuns, len(rois), len(rois))) * np.nan
        coh_all[subject] = np.zeros((numRuns, len(rois), len(rois))) * np.nan

        # Get roi correlations and coherence
        for runNum in range(allROIS.shape[1]):
            #need to load timeseries by run
            fixTS = ts.TimeSeries(allROIS[:, runNum, :], sampling_interval=TR)
            fixTS.metadata['roi'] = roi_names
Beispiel #7
0
def get_flat_ts(flat_dir,nii_file,mr_session,TR,up_samp=[1,1,1],
                normalize='zscore',lb=0,ub=None):

    """

    Returns the flattened time-dependent data from a nifti file

    
    Parameters
    ----------
    flat_dir: str
        The full path to the flat directory containing the information for this
        flat patch

    nii_file: str,
        The full path to the nifti file with the data.

    mr_session: str
        Full path to the mrSESSION.mat file, from which the alignment will be
        pulled 
    
    TR: float
       The TR used to sample the data

    lb,ub: float
       The cutoff points for a boxcar filter
       
    Returns
    -------

    ts_out, flat_coords : list with [tseries_l,tseries_r]
    
    """
    coords_mat = sio.loadmat('%s/coords.mat'%flat_dir,squeeze_me=True)
    flat_coords = coords_mat['coords']
    gray_coords = coords_mat['grayCoords']
    
    # Add ones to the end, to make the shape work for the transformation with
    # the alignment matrix, below:
    gray_coords = [np.vstack([gray_coords[i],np.ones(gray_coords[i].shape[-1])])
                   for i in range(2)] # 2 hemispheres
 
    mrSESSION = sio.loadmat(mr_session,squeeze_me=True,struct_as_record=True)

    # The following extracts the alignment matrix from Inplane to Volume
    # coordinates:
    alignment = np.matrix(mrSESSION['mrSESSION']['alignment'][np.newaxis][0].squeeze())

    # The mrSESSION alignment matrix is the one that aligns from Inplane to
    # Volume. For this, we want the inverse:
    alignment = alignment.getI()

    # And we only need the 4 by 4 matrix:
    alignment = alignment[:3,:]

    # Do the transformation for both hemispheres, upsample, and then round , so
    # that we get the Inplane coords:
    inplane_coords = [np.round(upsample_coords(alignment * gray_coords[i],
                                               up_samp)) for i in range(2)]
       
    # Get the data from the nifti file in question, while boxcar filtering into
    # the frequency range defined by the input::
    tseries = load_nii(nii_file,inplane_coords,TR,normalize=normalize,
                       filter=dict(method='boxcar',lb=lb,ub=ub),
                       verbose=True)

    print ('Assigning data to flat coordinates')
    im_size = tuple(coords_mat['imSize'])
    
    # Make the TimeSeries to fill with data (one for each hemisphere):
    ts_out = []

    # Loop over hemispheres: 
    for hemi_idx in range(2):
        # Add a TimeSeries with the right shape:
        ts_out.append(ts.TimeSeries(data=np.ones(
                      np.hstack([im_size,tseries[hemi_idx].shape[-1]]))*np.nan,
                                    sampling_interval=TR))
        
        idx = tuple(np.round(flat_coords[hemi_idx]-1).astype(int))
        my_t = tseries[hemi_idx].time
        for t in my_t:
            ts_out[-1].data[...,my_t.index_at(t)][idx]=tseries[hemi_idx].at(t)
    
    return ts_out,flat_coords
Beispiel #8
0
def get_flat_ts(flat_dir,
                nii_file,
                mr_session,
                TR,
                up_samp=[1, 1, 1],
                normalize='zscore',
                lb=0,
                ub=None):
    """

    Returns the flattened time-dependent data from a nifti file

    
    Parameters
    ----------
    flat_dir: str
        The full path to the flat directory containing the information for this
        flat patch

    nii_file: str,
        The full path to the nifti file with the data.

    mr_session: str
        Full path to the mrSESSION.mat file, from which the alignment will be
        pulled 
    
    TR: float
       The TR used to sample the data

    lb,ub: float
       The cutoff points for a boxcar filter
       
    Returns
    -------

    ts_out, flat_coords : list with [tseries_l,tseries_r]
    
    """
    coords_mat = sio.loadmat('%s/coords.mat' % flat_dir, squeeze_me=True)
    flat_coords = coords_mat['coords']
    gray_coords = coords_mat['grayCoords']

    # Add ones to the end, to make the shape work for the transformation with
    # the alignment matrix, below:
    gray_coords = [
        np.vstack([gray_coords[i],
                   np.ones(gray_coords[i].shape[-1])]) for i in range(2)
    ]  # 2 hemispheres

    mrSESSION = sio.loadmat(mr_session, squeeze_me=True, struct_as_record=True)

    # The following extracts the alignment matrix from Inplane to Volume
    # coordinates:
    alignment = np.matrix(
        mrSESSION['mrSESSION']['alignment'][np.newaxis][0].squeeze())

    # The mrSESSION alignment matrix is the one that aligns from Inplane to
    # Volume. For this, we want the inverse:
    alignment = alignment.getI()

    # And we only need the 4 by 4 matrix:
    alignment = alignment[:3, :]

    # Do the transformation for both hemispheres, upsample, and then round , so
    # that we get the Inplane coords:
    inplane_coords = [
        np.round(upsample_coords(alignment * gray_coords[i], up_samp))
        for i in range(2)
    ]

    # Get the data from the nifti file in question, while boxcar filtering into
    # the frequency range defined by the input::
    tseries = load_nii(nii_file,
                       inplane_coords,
                       TR,
                       normalize=normalize,
                       filter=dict(method='boxcar', lb=lb, ub=ub),
                       verbose=True)

    print('Assigning data to flat coordinates')
    im_size = tuple(coords_mat['imSize'])

    # Make the TimeSeries to fill with data (one for each hemisphere):
    ts_out = []

    # Loop over hemispheres:
    for hemi_idx in range(2):
        # Add a TimeSeries with the right shape:
        ts_out.append(
            ts.TimeSeries(data=np.ones(
                np.hstack([im_size, tseries[hemi_idx].shape[-1]])) * np.nan,
                          sampling_interval=TR))

        idx = tuple(np.round(flat_coords[hemi_idx] - 1).astype(int))
        my_t = tseries[hemi_idx].time
        for t in my_t:
            ts_out[-1].data[...,
                            my_t.index_at(t)][idx] = tseries[hemi_idx].at(t)

    return ts_out, flat_coords
Beispiel #9
0
            # Get session
            sess = subjects[subject][session]

            # Get ROIs
            roi_names=np.array(rois)
            ROI_files=[]
            for roi in rois:
                ROI_files.append(fmri_path+sess[0]+'/Inplane/ROIs/' +roi +'.mat')

            # Get the coordinates of the ROIs, while accounting for the
            # up-sampling:
            ROI_coords = [tsv.upsample_coords(tsv.getROIcoords(f),up_samp) for f in ROI_files]

             # Initialize lists for each behavioral condition:
            t_fix = []
            nifti_path =fmri_path+sess[0]+'/%s_nifti/' % sess[0]


            niftiOrig=load_nii(nifti_path+'epi04_mcf.nii.gz', ROI_coords, TR, average='True')
            niftiSTc=load_nii(nifti_path+'epi04_mcf_stc.nii.gz', ROI_coords, TR, average='True')

            roiNum=21;
            roi1Orig=niftiOrig[roiNum]
            roi1ST=niftiSTc[roiNum]

            plt.figure(); plt.plot(roi1Orig.data); plt.plot(roi1ST.data); plt.legend(('Original', 'Slice Time'));
            plt.title(rois[roiNum])

            1/0
Beispiel #10
0
        # Get the coordinates of the ROIs, while accounting for the
        # up-sampling:
        ROI_coords = [tsv.upsample_coords(tsv.getROIcoords(f),up_samp)
                           for f in ROI_files]

         # Initialize lists for each behavioral condition:
        t_fix = []
        nifti_path = fmri_path +sess[0] + '/%s_nifti/' % sess[0]

        # Plot the mean of the TS over SD (SNR) for each ROI
        # len(t_fix)= number of ROIs
        # Add filtering
        filterType='boxcar'
        for runName in allRuns:
            for this_fix in sess[1][runName]:
                t_fix.append(load_nii(nifti_path+this_fix, ROI_coords,TR, normalize='percent',
                    filter=dict(lb=f_lb, ub=f_ub, method=filterType, filt_order=10), average=True, verbose=True))
        1/0
        # reshape ROI matrix
        allROIS=reshapeTS(t_fix)
        numRuns=allROIS.shape[1]

        corr_all[subject] = np.zeros((numRuns,len(rois),len(rois))) * np.nan
        coh_all[subject] = np.zeros((numRuns,len(rois),len(rois))) * np.nan


        # Average over all the runs, get an ROI by TS array (TS==averages), TS= 30 TRs long (TR=2 S)
        allROISorig=copy.deepcopy(allROIS)
        AvgRuns=np.mean(allROIS, axis=1)

        # Subtract out average
        if normalizeByMean: