Esempio n. 1
0
def mk_dcm_dataset(timepoints,z,noise_sd):
    data=numpy.zeros((len(timepoints),len(z)))
    for i in range(1,len(timepoints)):
        data[i,:]=data[i-1,:] + dcm_model(timepoints[i],data[i-1,:],A,B,C,u)  + numpy.random.randn(len(z))*noise_sd 
    hrf=spm_hrf(stepsize,oversampling=1)
    data_conv=numpy.zeros(data.shape)
    for i in range(len(z)):
        data_conv[:,i]=numpy.convolve(data[:,i],hrf)[:data.shape[0]]        
    return data,data_conv    
import numpy
import nibabel
from nipy.modalities.fmri.hemodynamic_models import spm_hrf,compute_regressor
import os

outdir='../results/dcmfiles'
if not os.path.exists(outdir):
    os.mkdir(outdir)

dcmdata=numpy.load('../results/dcmdata.npz')
data_conv=dcmdata['data']

u=numpy.convolve(dcmdata['u'],spm_hrf(0.01,oversampling=1))
u=u[range(0,data_conv.shape[0],int(1./0.01))]
ntp=u.shape[0]

data=data_conv[range(0,data_conv.shape[0],int(1./0.01))]

roi_locations=[[2,2,2],[4,4,4],[6,6,6],[8,8,8],[10,10,10]]
datamat=numpy.zeros((12,12,12,ntp))
datamat[2:11,2:11,2:11,:]+=100
for i in range(5):
    datamat[roi_locations[i][0],roi_locations[i][1],roi_locations[i][2],:]+=data[:,i]

img=nibabel.Nifti1Image(datamat,numpy.identity(4))
img.to_filename(os.path.join(outdir,'all.nii'))

for i in range(ntp):
    tmp=datamat[:,:,:,i]
    img=nibabel.Nifti1Image(tmp,numpy.identity(4))
    img.to_filename(os.path.join(outdir,'img%03d.nii'%i))
Esempio n. 3
0
#############################################################################

#Compute Tstats image for each block
#############################################################################

T = np.zeros((128, number_features))
betas = np.zeros((128, number_features))

subject = np.zeros((128))
subject_num = 0
t = 0

from nipy.modalities.fmri import hemodynamic_models
hrf = hemodynamic_models.spm_hrf(tr=3.0,
                                 oversampling=1,
                                 onset=0.0,
                                 time_length=400)

DesignMat = np.zeros((132, 8))
DesignMat[20:27:, 0] = np.arange(1, 8)
DesignMat[34:41:, 1] = np.arange(1, 8)
DesignMat[48:55, 2] = np.arange(1, 8)
DesignMat[62:69, 3] = np.arange(1, 8)
DesignMat[76:83, 4] = np.arange(1, 8)
DesignMat[90:97, 5] = np.arange(1, 8)
DesignMat[104:111, 6] = np.arange(1, 8)
DesignMat[118:125, 7] = np.arange(1, 8)

lab = np.zeros((132))
lab[20:27] = 1
lab[34:41] = 1
Esempio n. 4
0
if not os.path.exists(results_dir):
    os.mkdir(results_dir)


# Load the data generated using the DCM forward model. In this model, there is a significant static connectivity from 1->2 and 1->3 (A matrix), and a PPI for 0->2 and 0->4 (B matrix) and a significant input to ROI 0 (C matrix).

# In[2]:

_,data_conv,params=sim_dcm_dataset(verbose=True)

A_mtx=params['A']
B_mtx=params['B']
u=params['u']

# downsample design to 1 second TR
u=numpy.convolve(params['u'],spm_hrf(params['stepsize'],oversampling=1))
u=u[range(0,data_conv.shape[0],int(1./params['stepsize']))]
ntp=u.shape[0]




# ###Generate a set of synthetic datasets, referring to individual subjects

# In[3]:

tetrad_dir='/home/vagrant/data/tetrad_files'
if not os.path.exists(tetrad_dir):
    os.mkdir(tetrad_dir)

nfiles=10
Esempio n. 5
0
def loadTaskTiming(subj, task, taskModel='canonical', nRegsFIR=25):
    nRunsPerTask = 2

    taskkey = task[6:]  # Define string identifier for tasks
    taskEVs = taskEV_Identifier[taskkey]
    stimMat = np.zeros(
        (taskLength[taskkey] * nRunsPerTask, len(taskEV_Identifier[taskkey])))
    stimdir = basedir + 'HCP352Data/data/timingfiles3/'
    stimfiles = glob.glob(stimdir + subj + '*EV*' + taskkey + '*1D')

    for stimcount in range(len(taskEVs)):
        ev = taskEVs[stimcount] + 1
        stimfile = glob.glob(stimdir + subj + '*EV' + str(ev) + '_' + taskkey +
                             '*1D')
        stimMat[:, stimcount] = np.loadtxt(stimfile[0])

    nTRsPerRun = int(stimMat.shape[0] / 2.0)

    ##
    if taskModel == 'FIR':
        # Convolve taskstim regressors based on SPM canonical HRF (likely period of task-induced activity)

        ## First set up FIR design matrix
        stim_index = []
        taskStims_FIR = []
        for stim in range(stimMat.shape[1]):
            taskStims_FIR.append([])
            time_ind = np.where(stimMat[:, stim] == 1)[0]
            blocks = _group_consecutives(
                time_ind)  # Get blocks (i.e., sets of consecutive TRs)
            # Identify the longest block - set FIR duration to longest block
            maxRegsForBlocks = 0
            for block in blocks:
                if len(block) > maxRegsForBlocks: maxRegsForBlocks = len(block)
            taskStims_FIR[stim] = np.zeros(
                (stimMat.shape[0], maxRegsForBlocks + nRegsFIR)
            )  # Task timing for this condition is TR x length of block + FIR lag
            stim_index.extend(np.repeat(stim, maxRegsForBlocks + nRegsFIR))
        stim_index = np.asarray(stim_index)

        ## Now fill in FIR design matrix
        # Make sure to cut-off FIR models for each run separately
        trcount = 0

        for run in range(nRunsPerTask):
            trstart = trcount
            trend = trstart + nTRsPerRun

            for stim in range(stimMat.shape[1]):
                time_ind = np.where(stimMat[:, stim] == 1)[0]
                blocks = _group_consecutives(
                    time_ind)  # Get blocks (i.e., sets of consecutive TRs)
                for block in blocks:
                    reg = 0
                    for tr in block:
                        # Set impulses for this run/task only
                        if trstart < tr < trend:
                            taskStims_FIR[stim][tr, reg] = 1
                            reg += 1

                        if not trstart < tr < trend:
                            continue  # If TR not in this run, skip this block

                    # If TR is not in this run, skip this block
                    if not trstart < tr < trend: continue

                    # Set lag due to HRF
                    for lag in range(1, nRegsFIR + 1):
                        # Set impulses for this run/task only
                        if trstart < tr + lag < trend:
                            taskStims_FIR[stim][tr + lag, reg] = 1
                            reg += 1
            trcount += nTRsPerRun

        taskStims_FIR2 = np.zeros((stimMat.shape[0], 1))
        task_index = []
        for stim in range(stimMat.shape[1]):
            task_index.extend(np.repeat(stim, taskStims_FIR[stim].shape[1]))
            taskStims_FIR2 = np.hstack((taskStims_FIR2, taskStims_FIR[stim]))

        taskStims_FIR2 = np.delete(taskStims_FIR2, 0, axis=1)

        #taskRegressors = np.asarray(taskStims_FIR)
        taskRegressors = taskStims_FIR2

        # To prevent SVD does not converge error, make sure there are no columns with 0s
        zero_cols = np.where(np.sum(taskRegressors, axis=0) == 0)[0]
        taskRegressors = np.delete(taskRegressors, zero_cols, axis=1)
        stim_index = np.delete(stim_index, zero_cols)

    elif taskModel == 'canonical':
        ##
        # Convolve taskstim regressors based on SPM canonical HRF (likely period of task-induced activity)
        taskStims_HRF = np.zeros(stimMat.shape)
        spm_hrfTS = spm_hrf(trLength, oversampling=1)

        trcount = 0
        for run in range(nRunsPerTask):
            trstart = trcount
            trend = trstart + nTRsPerRun

            for stim in range(stimMat.shape[1]):

                # Perform convolution
                tmpconvolve = np.convolve(stimMat[trstart:trend, stim],
                                          spm_hrfTS)
                tmpconvolve_run = tmpconvolve[:
                                              nTRsPerRun]  # Make sure to cut off at the end of the run
                taskStims_HRF[trstart:trend, stim] = tmpconvolve_run

            trcount += nTRsPerRun

        taskRegressors = taskStims_HRF.copy()

        stim_index = []
        for stim in range(stimMat.shape[1]):
            stim_index.append(stim)
        stim_index = np.asarray(stim_index)

    # Create temporal mask (skipping which frames?)
    tMask = []
    tmp = np.ones((nTRsPerRun, ), dtype=bool)
    tmp[:framesToSkip] = False
    tMask.extend(tmp)
    tMask.extend(tmp)
    tMask = np.asarray(tMask, dtype=bool)

    output = {}
    # Commented out since we demean each run prior to loading data anyway
    output['taskRegressors'] = taskRegressors[tMask, :]
    output['taskDesignMat'] = stimMat[tMask, :]
    output['stimIndex'] = stim_index

    return output