Exemplo n.º 1
0
class Threedcalc(AFNICommand):
    """
    For complete details, see the `3dcalc Documentation.
    <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_
    """

    @property
    def cmd(self):
        """Base command for Threedcalc"""
        return '3dcalc'

    def inputs_help(self):
        doc = """
        """
        print doc

    def _populate_inputs(self):
        """Initialize the inputs attribute."""

        self.inputs = Bunch(
            infile_a=None,
            expr=None,
            session=None,
            datum=None,
            outfile=None,
            )

    def _parseinputs(self):
        """Parse valid input options for Threedcalc command.

        Ignore options set to None.

        """

        out_inputs = []
        inputs = {}
        [inputs.update({k:v}) for k, v in self.inputs.items() \
             if v is not None]

        if inputs.has_key('infile_a'):
            val = inputs.pop('infile_a')
            out_inputs.append('-a %s' % val)
        if inputs.has_key('expr'):
            val = inputs.pop('expr')
            out_inputs.append('-expr %s' % val)
        if inputs.has_key('session'):
            val = inputs.pop('session')
            out_inputs.append('-session %s' % val)
        if inputs.has_key('datum'):
            val = inputs.pop('datum')
            out_inputs.append('-datum %s' % val)
        if inputs.has_key('outfile'):
            val = inputs.pop('outfile')
            out_inputs.append('-prefix %s' % val)

        if len(inputs) > 0:
            print '%s: unsupported options: %s' % (
                self.__class__.__name__, inputs.keys())

        return out_inputs
Exemplo n.º 2
0
    def _populate_inputs(self):
        """Initialize the inputs attribute."""

        self.inputs = Bunch(
            infile_a=None,
            expr=None,
            session=None,
            datum=None,
            outfile=None,
            )
        feedback_duration_per_trial.append([events.Feedback_duration[i]])

    info = [
        Bunch(conditions=trials,
              onsets=feedback_onset_per_trial,
              durations=feedback_duration_per_trial,
              regressors=[
                  list(
                      confounds.FramewiseDisplacement.fillna(0)
                      [analysis_info['remove_TRs']:]),
                  list(confounds.X[analysis_info['remove_TRs']:]),
                  list(confounds.Y[analysis_info['remove_TRs']:]),
                  list(confounds.Z[analysis_info['remove_TRs']:]),
                  list(confounds.RotX[analysis_info['remove_TRs']:]),
                  list(confounds.RotY[analysis_info['remove_TRs']:]),
                  list(confounds.RotZ[analysis_info['remove_TRs']:]),
                  list(confounds.aCompCor00[analysis_info['remove_TRs']:]),
                  list(confounds.aCompCor01[analysis_info['remove_TRs']:]),
                  list(confounds.aCompCor02[analysis_info['remove_TRs']:]),
                  list(confounds.aCompCor03[analysis_info['remove_TRs']:]),
                  list(confounds.aCompCor04[analysis_info['remove_TRs']:]),
                  list(confounds.aCompCor05[analysis_info['remove_TRs']:])
              ],
              regressor_names=[
                  'FramewiseDisplacement', 'X', 'Y', 'Z', 'RotX', 'RotY',
                  'RotZ', 'aCompCor00', 'aCompCor01', 'aCompCor02',
                  'aCompCor03', 'aCompCor04', 'aCompCor05'
              ])
    ]

    # Preparing single trial contrasts (t-stats)
datasource.inputs.template = '%s%s/%s%s_%03d.img'
datasource.inputs.template_args = info
"""
Experimental paradigm specific components
-----------------------------------------

Here we create a structure that provides information
about the experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information
necessary to generate an SPM design matrix.
"""

from nipype.interfaces.base import Bunch

subjectinfo = [
    Bunch(conditions=['Task'], onsets=[range(6, 84, 12)], durations=[[6]])
]
"""Setup the contrast structure that needs to be evaluated. This is a
list of lists. The inner list specifies the contrasts and has the
following format - [Name,Stat,[list of condition names],[weights on
those conditions]. The condition names must match the `names` listed
in the `subjectinfo` function described above.
"""

cont1 = ('active > rest', 'T', ['Task'], [1])
contrasts = [cont1]

# set up node specific inputs
modelspecref = l1pipeline.inputs.analysis.modelspec
modelspecref.input_units = 'scans'
modelspecref.output_units = 'scans'
Exemplo n.º 5
0
def get_subject_info(subject_id):
    import numpy as np
    from nipype.interfaces.base import Bunch

    # Condition names
    condition_names = [
        'pmTarget', 'pmProbe', 'pmFeedback', 'pmTargProbe', 'nonPMtarget',
        'nonPMprobes', 'nonPMfeedback'
    ]

    subjectinfo = []
    for r in range(5):
        blockNum = r + 1
        evDir = '/Users/srk482-admin/Documents/forcemem_mriDat/forcemem_{0}/fsl_analysis/EVs/block{1}/'.format(
            subject_id, blockNum)
        #Rest the onset duration and amp lists
        onset = []
        duration = []
        amp = []
        #if r>1:
        #del(onset)
        #del(duration)
        #del(amp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        #I am going to open each file by name for the time being.
        fileOpen = evDir + 'pm_targetEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        fileOpen = evDir + 'pm_probeEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        fileOpen = evDir + 'pm_feedbackEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        fileOpen = evDir + 'pm_targProbeEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        fileOpen = evDir + 'npm_targetEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        fileOpen = evDir + 'npm_probeEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        fileOpen = evDir + 'pm_feedbackEVs.txt'
        tmpFile = open(fileOpen)
        for tmpLine in tmpFile:
            tmpOnset.append(float(tmpLine.split()[0]))
            tmpDuration.append(float(tmpLine.split()[1]))
            tmpAmp.append(float(tmpLine.split()[2]))

        onset.append(tmpOnset)
        duration.append(tmpDuration)
        amp.append(tmpAmp)

        #Set values to empty
        tmpOnset = []
        tmpDuration = []
        tmpAmp = []

        #Now turn these lists into np arrays and export from file.
        onset = np.asarray(onset)
        duration = np.asarray(duration)
        amp = np.asarray(amp)

        #return onset, duration, amp

        subjectinfo.insert(
            r,
            Bunch(conditions=condition_names,
                  onsets=onset,
                  durations=duration,
                  amplitudes=amp,
                  tmod=None,
                  pmod=None,
                  regressor_names=None,
                  regressors=None))
    return subjectinfo
Exemplo n.º 6
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename2)
    # Test case when only one duration is passed, as being the same for all onsets.
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [
        Bunch(conditions=['cond1'],
              onsets=[[2, 50, 100, 170]],
              durations=[[1]]),
        Bunch(conditions=['cond1'],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(
        res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array(
            [1., 1., 1., 1., 1., 1., 1., 1.])
    # Test case of scans as output units instead of seconds
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6
    # Test case for no concatenation with seconds as output units
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0])
    # Test case for variable number of events in separate runs, sometimes unique.
    filename3 = os.path.join(tempdir, 'test3.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename3)
    s.inputs.functional_runs = [filename1, filename2, filename3]
    info = [
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2, 4]],
              durations=[[1, 1], [1, 1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][1]['duration']), np.array([
            1.,
        ])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[1]['cond'][1]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[2]['cond'][1]['duration']), np.array([
            1.,
        ])
    # Test case for variable number of events in concatenated runs, sometimes unique.
    s.inputs.concatenate_runs = True
    info = [
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2, 4]],
              durations=[[1, 1], [1, 1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array(
            [1., 1., 1., 1., 1., 1.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][1]['duration']), np.array(
            [1., 1., 1., 1.])
    rmtree(tempdir)
Exemplo n.º 7
0
Arquivo: model.py Projeto: kiani/lyman
    def _run_interface(self, runtime):

        info = Bunch(self.inputs.info)

        # Load the anatomical template to get image geometry information
        anat_img = nib.load(self.inputs.anat_file)
        affine, header = anat_img.affine, anat_img.header

        # TODO define contrasts properly accounting for missing EVs
        result_directories = []
        for i, contrast_tuple in enumerate(info.contrasts):

            name, _, _ = contrast_tuple

            result_directories.append(op.abspath(name))
            os.makedirs(op.join(name, "qc"))

            con_frames = []
            var_frames = []

            # Load the parameter and variance data for each run/contrast.
            con_images = map(nib.load, self.inputs.contrast_files)
            var_images = map(nib.load, self.inputs.variance_files)

            # Files are input as a list of 4D images where list entries are
            # runs and the last axis is contrast; we want to concatenate runs
            # for each contrast, so we need to transpose" the ordering.
            for con_img, var_img in zip(con_images, var_images):
                con_frames.append(con_img.get_data()[..., i])
                var_frames.append(var_img.get_data()[..., i])

            con_data = np.stack(con_frames, axis=-1)
            var_data = np.stack(var_frames, axis=-1)

            # Define a mask as voxels with nonzero variance in each run
            # and extract voxel data as arrays
            mask = (var_data > 0).all(axis=-1)
            mask_img = nib.Nifti1Image(mask.astype(np.int8), affine, header)
            con = con_data[mask]
            var = var_data[mask]

            # Compute the higher-level fixed effects parameters
            con_ffx, var_ffx, t_ffx = glm.contrast_fixed_effects(con, var)

            # Convert to image volume format
            con_img = matrix_to_image(con_ffx.T, mask_img)
            var_img = matrix_to_image(var_ffx.T, mask_img)
            t_img = matrix_to_image(t_ffx.T, mask_img)

            # Write out output images
            con_img.to_filename(op.join(name, "contrast.nii.gz"))
            var_img.to_filename(op.join(name, "variance.nii.gz"))
            t_img.to_filename(op.join(name, "tstat.nii.gz"))
            mask_img.to_filename(op.join(name, "mask.nii.gz"))

            # Contrast t statistic overlay
            stat_m = Mosaic(anat_img, t_img, mask_img, show_mask=True)
            stat_m.plot_overlay("coolwarm", -10, 10)
            stat_m.savefig(op.join(name, "qc", "tstat.png"), close=True)

            # Analysis mask
            mask_m = Mosaic(anat_img, mask_img)
            mask_m.plot_mask()
            mask_m.savefig(op.join(name, "qc", "mask.png"), close=True)

        # Output a list of directories with results.
        # This makes the connections in the workflow more opaque, but it
        # simplifies placing files in subdirectories named after contrasts.
        self._results["result_directories"] = result_directories

        return runtime
Exemplo n.º 8
0
datasource.inputs.sort_filelist = True

preproc = create_featreg_preproc(whichvol='first')
TR = 3.
preproc.inputs.inputspec.fwhm = 5
preproc.inputs.inputspec.highpass = 100. / TR

modelspec = pe.Node(interface=model.SpecifyModel(),
                    name="modelspec")
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = 100
modelspec.inputs.subject_info = [Bunch(conditions=['Visual', 'Auditory'],
                                 onsets=[list(range(0, int(180*TR), 60)), list(range(0, int(180*TR), 90))],
                                 durations=[[30], [45]],
                                 amplitudes=None,
                                 tmod=None,
                                 pmod=None,
                                 regressor_names=None,
                                 regressors=None)]

modelfit = create_modelfit_workflow(f_contrasts=True)
modelfit.inputs.inputspec.interscan_interval = TR
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}}
cont1 = ['Visual>Baseline', 'T', ['Visual', 'Auditory'], [1, 0]]
cont2 = ['Auditory>Baseline', 'T', ['Visual', 'Auditory'], [0, 1]]
cont3 = ['Task', 'F', [cont1, cont2]]
modelfit.inputs.inputspec.contrasts = [cont1, cont2, cont3]

registration = create_reg_workflow()
registration.inputs.inputspec.target_image = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
Exemplo n.º 9
0
def subjectinfo(subject_id):
    import os
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    import numpy as np
    base_proj_dir = '/home/data/madlab/data/mri/wmaze/scanner_behav'
    # Empty array to contain info from each run (index 1-6)
    output = []

    # For the current run, of which there are 6
    model_counter = 0  #MODEL COUNTER WILL UPDATE FOR EACH TRIAL OF A GIVEN CONDITION
    for curr_run in range(1, 7):
        data_FX_before_COND_corr = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_LSS2/run{1}_all_before_B_corr.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_FX_before_COND_incorr = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_LSS2/run{1}_all_before_B_incorr.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_all_remaining = np.genfromtxt(
            base_proj_dir + '/{0}/model_LSS2/run{1}_all_remaining.txt'.format(
                subject_id, curr_run),
            dtype=str)

        # CONSOLIDATE ALL FIXED B4 CONDITIONAL DATA INTO A DICTIONARY FOR ITERATION
        # USE THE FOLLOWING CONDITIONALS TO DEAL WITH THE FACT THAT SOMETIMES PEOPLE DIDN'T MAKE MISTAKES
        if data_FX_before_COND_incorr.size > 0:
            orig_all_fixed_b4_cond_data = {
                'FX_before_COND_corr': data_FX_before_COND_corr,
                'FX_before_COND_incorr': data_FX_before_COND_incorr
            }
        else:
            orig_all_fixed_b4_cond_data = {
                'FX_before_COND_corr': data_FX_before_COND_corr
            }

        # ITERATE OVER THE KEYS OF THE DICTIONARY TO ISOLATE THE CONDITIONS OF INTEREST
        for curr_key in orig_all_fixed_b4_cond_data.keys():
            # ESTABLISH TRIAL COUNTER FOR NAMING OF REGRESSORS
            trial_counter = 1
            # ISOLATE CURRENT CONDITION DATA USING POP FUNCTION
            # DICTIONARY WILL NO LONGER HAVE THAT KEY
            # I USE THAT FUNCTIONALITY TO ESTABLISH THE PENDING KEYS (NOT YET ITERATED OVER)
            copy_all_fixed_b4_cond_data = dict(orig_all_fixed_b4_cond_data)
            curr_condition_data = copy_all_fixed_b4_cond_data.pop(curr_key)

            if curr_condition_data.size == 3:  # ONLY ONE EVENT OF THIS CONDITION DURING THIS RUN
                names = [
                    curr_key + '_run%d_trl%d_onset%0.2f' %
                    (curr_run, trial_counter, float(curr_condition_data[0]))
                ]
                onsets = [[float(curr_condition_data[0])]]
                durations = [[float(curr_condition_data[1])]]
                amplitudes = [[float(curr_condition_data[2])]]
                # DEAL WITH THE REMAINING DATA THAT HASN'T BEEN ITERATED THROUGH YET (AKA PENDING)
                for pending_key in copy_all_fixed_b4_cond_data.keys():
                    names.append(pending_key)
                    pending_data = copy_all_fixed_b4_cond_data[pending_key]
                    if pending_data.size == 3:  #ONLY ONE EVENT OF THIS CONDITION
                        onsets.append([float(pending_data[0])])
                        durations.append([float(pending_data[1])])
                        amplitudes.append([float(pending_data[2])])
                    else:
                        onsets.append(map(float, pending_data[:, 0]))
                        durations.append(map(float, pending_data[:, 1]))
                        amplitudes.append(map(float, pending_data[:, 2]))
                # INSERT THE ALL REAMINING EV INTO THE MODEL
                names.append('all_remaining')
                onsets.append(map(float, data_all_remaining[:, 0]))
                durations.append(map(float, data_all_remaining[:, 1]))
                amplitudes.append(map(float, data_all_remaining[:, 2]))

                # UPDATE TRIAL COUNTER
                trial_counter = trial_counter + 1

                # Insert the contents of each run at the index of model_counter
                output.insert(
                    model_counter,
                    Bunch(conditions=names,
                          onsets=deepcopy(onsets),
                          durations=deepcopy(durations),
                          amplitudes=deepcopy(amplitudes),
                          tmod=None,
                          pmod=None,
                          regressor_names=None,
                          regressors=None))

                # UPDATE MODEL COUNTER
                model_counter = model_counter + 1
            else:  # THERE IS MORE THAN ONE EVENT OF THIS CONDITION DURING THIS RUN
                # ITERATE OVER THE NUMBER OF TRIALS WITHIN THAT CONDITION
                for curr_cond_trl in range(len(curr_condition_data)):
                    # ESTABLISH THE LISTS FOR NAMES, ONSETS, DURATIONS, AND AMPLITUDES FOR ALL MODELS
                    # WE WILL HAVE AS MANY MODELS AS TRIALS ACROSS RUNS FOR THE DIFFERENT CONDITIONS
                    names = []
                    onsets = []
                    durations = []
                    amplitudes = []
                    curr_cond_trl_name = curr_key + '_run%d_trl%d_onset%0.2f' % (
                        curr_run, trial_counter,
                        float(curr_condition_data[curr_cond_trl][0]))
                    curr_cond_trl_onset = [
                        float(curr_condition_data[curr_cond_trl][0])
                    ]
                    curr_cond_trl_dur = [
                        float(curr_condition_data[curr_cond_trl][1])
                    ]
                    curr_cond_trl_amp = [
                        float(curr_condition_data[curr_cond_trl][2])
                    ]

                    names.append(curr_cond_trl_name)
                    onsets.append(curr_cond_trl_onset)
                    durations.append(curr_cond_trl_dur)
                    amplitudes.append(curr_cond_trl_amp)

                    # ISOLATE THE REMAINING TRIALS FOR THE CURRENT CONDITION USING THE NUMPY DELETE FUNCTION
                    # THIS FUNCTION WILL NOT MODIFY THE ORIGINAL VARIABLE LIKE POP DOES ABOVE
                    curr_cond_remaining_data = np.delete(
                        curr_condition_data, curr_cond_trl, 0)
                    curr_cond_remaining_name = curr_key + '_allbut_run%d_trl%d' % (
                        curr_run, trial_counter)
                    curr_cond_remaining_onsets = map(
                        float, curr_cond_remaining_data[:, 0])
                    curr_cond_remaining_durs = map(
                        float, curr_cond_remaining_data[:, 1])
                    curr_cond_remaining_amps = map(
                        float, curr_cond_remaining_data[:, 2])

                    names.append(curr_cond_remaining_name)
                    onsets.append(curr_cond_remaining_onsets)
                    durations.append(curr_cond_remaining_durs)
                    amplitudes.append(curr_cond_remaining_amps)

                    # DEAL WITH THE PENDING DATA THAT HASN'T BEEN ITERATED THROUGH YET
                    # THIS IS WHERE THAT POP FUNCTION ABOVE CAME IN HANDY
                    for pending_key in copy_all_fixed_b4_cond_data.keys():
                        names.append(pending_key)
                        pending_data = copy_all_fixed_b4_cond_data[pending_key]
                        if pending_data.size == 3:  #ONLY ONE EVENT OF THIS CONDITION
                            onsets.append([float(pending_data[0])])
                            durations.append([float(pending_data[1])])
                            amplitudes.append([float(pending_data[2])])
                        else:
                            onsets.append(map(float, pending_data[:, 0]))
                            durations.append(map(float, pending_data[:, 1]))
                            amplitudes.append(map(float, pending_data[:, 2]))

                    # INSERT THE ALL REAMINING EV INTO THE MODEL
                    names.append('all_remaining')
                    onsets.append(map(float, data_all_remaining[:, 0]))
                    durations.append(map(float, data_all_remaining[:, 1]))
                    amplitudes.append(map(float, data_all_remaining[:, 2]))

                    # UPDATE TRIAL COUNTER
                    trial_counter = trial_counter + 1

                    # Insert the contents of each run at the index of model_counter
                    output.insert(
                        model_counter,
                        Bunch(conditions=names,
                              onsets=deepcopy(onsets),
                              durations=deepcopy(durations),
                              amplitudes=deepcopy(amplitudes),
                              tmod=None,
                              pmod=None,
                              regressor_names=None,
                              regressors=None))

                    # UPDATE MODEL COUNTER
                    model_counter = model_counter + 1
    return output
Exemplo n.º 10
0
                       return_type='file')[0]

## Getting experiment info from the event file, into a Bunch object
trialInfo = pd.read_csv(fileEvent, sep='\t')
conditions = sorted(list(set(trialInfo.Stimulus)))
onsets = []
durations = []

for itrial in conditions:
    onsets.append(list(trialInfo[trialInfo.Stimulus == itrial].onset))
    durations.append(list(trialInfo[trialInfo.Stimulus == itrial].duration))

subject_info = [
    Bunch(
        conditions=conditions,
        onsets=onsets,
        durations=durations,
    )
]

## Defining contrasts
cont01 = ['congruent', 'T', conditions, [1, 0]]
cont02 = ['incongruent', 'T', conditions, [0, 1]]
cont03 = ['cong>incong', 'T', conditions, [1, -1]]
cont04 = ['incong>cong', 'T', conditions, [-1, 1]]
cont05 = ['average', 'T', conditions, [0.5, 0.5]]

contrast_list = [cont01, cont02, cont03, cont04, cont05]

###########
#
        onsets = []
        durations = []

        for group in trialinfo.groupby('Stim'):
            conditions.append(group[0])
            onsets.append(list(group[1].Onset))
            durations.append(group[1].Duration.tolist())

        subject_info = Bunch(
            conditions=conditions,
            onsets=onsets,
            durations=durations,
            regressors=[
                list(confoundinfo.framewise_displacement.fillna(0)),
                list(confoundinfo.trans_x),
                list(confoundinfo.trans_y),
                list(confoundinfo.trans_z),
                list(confoundinfo.rot_x),
                list(confoundinfo.rot_y),
                list(confoundinfo.rot_z),
                list(confoundinfo.csf),
                list(confoundinfo.white_matter),
                list(np.ones_like(confoundinfo.white_matter))  # INTERCEPT
            ])

        session_info[session][run] = subject_info

# Get imaging data
base_dir = '/central/groups/mobbslab/toby/foraging/data/derivatives/'

fMRI = []
Exemplo n.º 12
0
def tsv2subjectinfo(events_file,
                    confounds_file=None,
                    exclude=None,
                    trim_indices=None):
    """
    Function to go from events tsv + confounds tsv to subjectinfo,
    which can then be passed to model setup functions.

    events_file, confounds_file: paths to these things
    exclude: trial_types in the events_file to be ignored (does not apply to confounds)
    trim_indices: either none or a tuple that will be used to slice the confounds
                    to conform to the length of the timeseries that is passed to the GLM
                    (TRs/volumes, not seconds)

    TODO: currently the event files are basically handmade, so they artificially reflect
            the (hardcoded) trim values for the hemifield task (6 volumes up front, 1 at the end)
            instead these should be generated by the experiment code and reflect the untrimmed data,
            and then the trimming of fMRI data AND the confounds/events timeseries done in nipype (here)

    SO: for now the trim_indices only apply to the confounds (which reflect the untrimmed data)
        but in the near future they'll be applied to the events as well.
    """
    import numpy as np
    import pandas as pd
    from nipype.interfaces.base import Bunch

    # Events first
    events = pd.read_csv(events_file, sep="\t")
    if exclude is not None:  # not tested
        events.drop(exclude, axis=1, inplace=True)

    conditions = sorted(events['trial_type'].unique())
    onsets = [
        events['onset'][events['trial_type'] == tt].tolist()
        for tt in conditions
    ]
    durations = [
        events['duration'][events['trial_type'] == tt].tolist()
        for tt in conditions
    ]
    if 'weight' in events.columns:
        amplitudes = [
            events['weight'][events['trial_type'] == tt].tolist()
            for tt in conditions
        ]
    else:
        amplitudes = [np.ones(len(d)) for d in durations]

    # Confounds next
    if confounds_file is None or confounds_file == '':
        regressor_names = []
        regressors = []
    else:
        confounds = pd.read_csv(confounds_file, sep="\t",
                                na_values="n/a")  # fmriprep confounds file
        regressor_names = [  # 'white_matter', 'global_signal','framewise_displacement',
            # 'a_comp_cor_00',
            # 'a_comp_cor_01',
            # 'a_comp_cor_02',
            # 'a_comp_cor_03',
            # 'a_comp_cor_04',
            # 'a_comp_cor_05',
            'trans_x',
            'trans_y',
            'trans_z',
            'rot_x',
            'rot_y',
            'rot_z'
        ]

        if trim_indices is None:
            regressors = [
                list(confounds[reg].fillna(0)) for reg in regressor_names
            ]
        else:
            assert (len(trim_indices) == 2)
            if trim_indices[
                    -1] == 0:  # if nothing is to be trimmed from the end, 0 will be passed in, but slice() wants None
                regressors = [
                    list(confounds[reg].fillna(0))[slice(
                        trim_indices[0], None)] for reg in regressor_names
                ]
            else:
                regressors = [
                    list(confounds[reg].fillna(0))[slice(*trim_indices)]
                    for reg in regressor_names
                ]

    bunch = Bunch(conditions=conditions,
                  onsets=onsets,
                  durations=durations,
                  amplitudes=amplitudes,
                  regressor_names=regressor_names,
                  regressors=regressors)

    return bunch
Exemplo n.º 13
0
def subjectinfo(subject_id):
    import os
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    import numpy as np
    base_proj_dir = '/home/data/madlab/data/mri/wmaze/scanner_behav'
    output = []

    for curr_run in range(1, 7):
        names = []
        onsets = []
        durations = []
        amplitudes = []

        #EV files
        data_fixed_before_cond_corr = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_GLM3/run{1}_fixed_before_cond_corr.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_fixed_before_cond_incorr = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_GLM3/run{1}_fixed_before_cond_incorr.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_same = np.genfromtxt(
            base_proj_dir + '/{0}/model_GLM3/run{1}_fixed_same.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_change = np.genfromtxt(
            base_proj_dir + '/{0}/model_GLM3/run{1}_fixed_change.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_lost = np.genfromtxt(
            base_proj_dir + '/{0}/model_GLM3/run{1}_fixed_lost.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_nonresponse = np.genfromtxt(
            base_proj_dir + '/{0}/model_GLM3/run{1}_nonresponse.txt'.format(
                subject_id, curr_run),
            dtype=str)

        #obtain name, onsets, durations, and amplitudes from fixed_before_cond trials
        corr_array_name = eval('data_fixed_before_cond_corr')
        incorr_array_name = eval('data_fixed_before_cond_incorr')
        if incorr_array_name.size > 0:  #more than 1 incorrect
            curr_names = ['fixed_before_cond_corr', 'fixed_before_cond_incorr']
            curr_corr_onsets = map(float, corr_array_name[:, 0])
            curr_corr_durations = map(float, corr_array_name[:, 1])
            curr_corr_amplitudes = map(float, corr_array_name[:, 2])
            if incorr_array_name.size == 3:  #only 1 incorrect
                curr_incorr_onsets = [float(incorr_array_name[0])]
                curr_incorr_durations = [float(incorr_array_name[1])]
                curr_incorr_amplitudes = [float(incorr_array_name[2])]
            else:
                curr_incorr_onsets = map(float, incorr_array_name[:, 0])
                curr_incorr_durations = map(float, incorr_array_name[:, 1])
                curr_incorr_amplitudes = map(float, incorr_array_name[:, 2])
            curr_onsets = [curr_corr_onsets, curr_incorr_onsets]
            curr_durations = [curr_corr_durations, curr_incorr_durations]
            curr_amplitudes = [curr_corr_amplitudes, curr_incorr_amplitudes]
        else:  #no incorrect
            curr_names = ['fixed_before_cond_corr']
            curr_corr_onsets = map(float, corr_array_name[:, 0])
            curr_corr_durations = map(float, corr_array_name[:, 1])
            curr_corr_amplitudes = map(float, corr_array_name[:, 2])
            curr_onsets = [curr_corr_onsets]
            curr_durations = [curr_corr_durations]
            curr_amplitudes = [curr_corr_amplitudes]
        names.append(curr_names)
        onsets.append(curr_onsets)
        durations.append(curr_durations)
        amplitudes.append(curr_amplitudes)

        #obtain name, onsets, durations, and amplitudes from same, change, and lost trials
        sequence = ['same', 'change', 'lost']
        for curr_type in sequence:
            array_name = eval('data_{0}'.format(curr_type))
            if array_name.size > 0:  #more than 1
                curr_names = ['{0}'.format(curr_type)]
                if array_name.size == 3:  #only 1
                    curr_onsets = [float(array_name[0])]
                    curr_durations = [float(array_name[1])]
                    curr_amplitudes = [float(array_name[2])]
                else:
                    curr_onsets = map(float, array_name[:, 0])
                    curr_durations = map(float, array_name[:, 1])
                    curr_amplitudes = map(float, array_name[:, 2])
                curr_onsets = [curr_onsets]
                curr_durations = [curr_durations]
                curr_amplitudes = [curr_amplitudes]
            names.append(curr_names)
            onsets.append(curr_onsets)
            durations.append(curr_durations)
            amplitudes.append(curr_amplitudes)

        if data_nonresponse.size > 0:
            curr_names = ['nonresponse']
            if data_nonresponse.size == 3:
                curr_onsets = [float(data_nonresponse[0])]
                curr_durations = [float(data_nonresponse[1])]
                curr_amplitudes = [float(data_nonresponse[2])]
            else:  #MORE THAN ONE TRIAL
                curr_onsets = map(float, data_nonresponse[:, 0])
                curr_durations = map(float, data_nonresponse[:, 1])
                curr_amplitudes = map(float, data_nonresponse[:, 2])
            curr_onsets = [curr_onsets]
            curr_durations = [curr_durations]
            curr_amplitudes = [curr_amplitudes]
            names.append(curr_names)
            onsets.append(curr_onsets)
            durations.append(curr_durations)
            amplitudes.append(curr_amplitudes)

        if any(isinstance(el, list)
               for el in names):  #unpacks subarrays into one mega array
            names = [el for sublist in names for el in sublist]
        if any(isinstance(el, list) for el in onsets):
            onsets = [el_o for sublist_o in onsets for el_o in sublist_o]
        if any(isinstance(el, list) for el in durations):
            durations = [el_d for sublist_d in durations for el_d in sublist_d]
        if any(isinstance(el, list) for el in amplitudes):
            amplitudes = [
                el_a for sublist_a in amplitudes for el_a in sublist_a
            ]

        output.insert(
            curr_run,  #insert the contents of each run at the index of curr_run (1-6) 
            Bunch(conditions=names,
                  onsets=deepcopy(onsets),
                  durations=deepcopy(durations),
                  amplitudes=deepcopy(amplitudes),
                  tmod=None,
                  pmod=None,
                  regressor_names=None,
                  regressors=None))
    return output
Exemplo n.º 14
0
#Session 2
condnames = ['hap', 'sad', 'neu', 'hor', 'ver', 'chk']
o2 = [
    np.arange(97.2, 502.2, 194.4).tolist(),
    np.arange(32.4, 502.2, 194.4).tolist(),
    np.arange(162, 502.2, 194.4).tolist(),
    np.arange(0, 502.2, 194.4).tolist(),
    np.arange(129.6, 502.2, 194.4).tolist(),
    np.arange(64.8, 502.2, 194.4).tolist()
]
d2 = [[16.8] * 3, [16.8] * 3, [16.8] * 2, [16.8] * 3, [16.8] * 2, [16.8] * 3]

#Create list of Bunch objects
design = [
    Bunch(conditions=condnames, onsets=o1, durations=d1),
    Bunch(conditions=condnames, onsets=o2, durations=d2)
]

#Input model specifications
modelspec = Node(interface=modelgen.SpecifySPMModel(), name='modelspec')
modelspec.inputs.input_units = 'secs'
modelspec.inputs.high_pass_filter_cutoff = 100.0
modelspec.inputs.concatenate_runs = False
modelspec.inputs.subject_info = design

#Design first level model
level1design = Node(interface=spm.Level1Design(), name='level1design')
level1design.inputs.interscan_interval = 3.0
level1design.inputs.timing_units = 'secs'
level1design.inputs.model_serial_correlations = 'AR(1)'
Exemplo n.º 15
0
	def eprime2dm(eprime,pppi):
	   	import os
	   	import re
	   	import scipy.io as sp
		import glob as gl
		import numpy
		import nipype.interfaces.matlab as mlab 
		from nipype.interfaces.base import Bunch
	    
		# convert numpy data array
		def convert_numpy(ar,toString = False):
			lst = []
			for a in ar.tolist():
				if toString:
					lst.append(str(a))
				elif type(a) == numpy.ndarray:
					if a.size > 1:			
						lst.append(a.tolist())
					else:
						lst.append([a.tolist()])
				else:
					lst.append([a])
			return lst

		m = re.search("eprime_([a-z]+)\.txt",eprime)
		sequence = m.group(1)
	   	
		# get nDM. mat files
		mat = os.path.join(os.path.dirname(eprime),"nDM*"+sequence+".mat")
		mat = gl.glob(mat)
		if len(mat) > 0:
			mat = mat[0]
		else:
			# execute matlab script to generate nDM file
			m = mlab.MatlabCommand()
			m.inputs.mfile = False
			m.inputs.script = sequence+"_eprime2dm_embarcs(\'"+eprime+"\');"
			m.run();
		
			# get nDM file (again)
			mat = os.path.join(os.path.dirname(eprime),'nDM*.mat')
			mat = gl.glob(mat)
			if len(mat) > 0:
				mat = mat[0]


		dm = sp.loadmat(mat,squeeze_me=True)
		
		names  = convert_numpy(dm.get('names'),True)
		onsets = convert_numpy(dm.get('onsets'))
		durations = convert_numpy(dm.get('durations'))
	
		# load up values and convert them
		# for PPPI remove last column for reward PPI; for ert last 3 columns
		# error, posterror, misc
		if pppi:
			trim = 3
			if sequence == "reward":
				trim = 1
			names = names[0:(len(names)-trim)]
			durations = durations[0:(len(durations)-trim)]
			onsets = onsets[0:(len(onsets)-trim)]
	
		# create bunch to return
		bunch = Bunch(conditions=names,onsets=onsets,durations=durations)
		if 'pmod' in dm:
			pmod = []
			for i in range(0,len(dm.get('pmod'))):
				if dm['pmod']['name'][i].size == 0:
					pmod.append(None)
				else:
					name = str(dm['pmod']['name'][i])
					param = dm['pmod']['param'][i].tolist()
					poly = dm['pmod']['poly'][i]
					pmod.append(Bunch(name=[name],param=[param],poly=[poly]))
			bunch.pmod = pmod
	
		return bunch
Exemplo n.º 16
0
class ThreedBrickStat(AFNICommand):
    """
    For complete details, see the `3dBrickStat Documentation.
    <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBrickStat.html>`_
    """

    @property
    def cmd(self):
        """Base command for ThreedBrickStat"""
        return '3dBrickStat'

    def inputs_help(self):
        doc = """
        """
        print doc

    def _populate_inputs(self):
        """Initialize the inputs attribute."""

        self.inputs = Bunch(automask=None,
                            percentile=None,
                            infile=None)

    def _parseinputs(self):
        """Parse valid input options for ThreedBrickStat command.

        Ignore options set to None.

        """

        out_inputs = []
        inputs = {}
        [inputs.update({k:v}) for k, v in self.inputs.items() \
             if v is not None]

        if inputs.has_key('automask'):
            val = inputs.pop('automask')
            out_inputs.append('-automask')
        if inputs.has_key('percentile'):
            val = inputs.pop('percentile')
            inputssub = {}
            [inputssub.update({k:v}) for k, v in val.items() \
                if v is not None]

            if inputssub.has_key('p0'):
                valsub = inputssub.pop('p0')
                out_inputs.append('-percentile %s' % str(valsub))
            else:
                valsub=None
                print('Warning: value \'p0\' required for percentile')
            if inputssub.has_key('pstep'):
                valsub = inputssub.pop('pstep')
                out_inputs.append('%s' % str(valsub))
            else:
                valsub=None
                print('Warning: value \'pstep\' required for percentile')
            if inputssub.has_key('p1'):
                valsub = inputssub.pop('p1')
                out_inputs.append('%s' % str(valsub))
            else:
                valsub=None
                print('Warning: value \'p1\' required for percentile')

        if inputs.has_key('infile'):
            val = inputs.pop('infile')
            out_inputs.append('%s' % val)

        if len(inputs) > 0:
            print '%s: unsupported options: %s' % (
                self.__class__.__name__, inputs.keys())

        return out_inputs
Exemplo n.º 17
0
def read_tsv_train_mod(tsv_files):
    import configparser
    import csv
    import nipype
    import numpy as np
    from nipype.interfaces.base import Bunch
    from io import StringIO
    cfg = configparser.ConfigParser()
    # cfg.read(r'/home/benjamin/Projects/biggan_encoder/scripts/fmri/glm_with_modulation.ini')
    cfg.read(r'/home/ahmad/Project/fmri/fmri/glm_with_modulation.ini')
    latent_size = int(cfg['params']['latent_size'])
    latent_tsv_file = cfg['params']['latent_tsv']

    condition_names = ['rest', 'oneback', 'stimulus']
    pmod_names = [f'Z{s:05d}' for s in range(latent_size)]
    pmod_poly = [1] * len(pmod_names)
    with open(latent_tsv_file) as tsv:
        all_latent = list(
            csv.DictReader(tsv, skipinitialspace=True, delimiter=str('\t')))
    all_latent_dict = {l['stim_id']: l['latent_vector'] for l in all_latent}

    subject_info = []
    for i in range(len(tsv_files)):
        onsets = {'rest': [], 'stimulus': [], 'oneback': []}
        durs = {'rest': [], 'stimulus': [], 'oneback': []}
        amps = {'rest': [], 'stimulus': [], 'oneback': []}
        latents = []
        with open(tsv_files[i]) as tsv:
            reader = list(
                csv.DictReader(tsv, skipinitialspace=True,
                               delimiter=str('\t')))
            for idx in range(len(reader)):
                cond = reader[idx]['event_type']
                if cond == 'stimulus' and idx > 0:
                    if reader[idx]['stim_id'] == reader[
                            idx - 1]['stim_id']:  # event idx is a oneback
                        cond = 'oneback'

                onsets[cond].append(float(reader[idx]['onset']))
                durs[cond].append(float(reader[idx]['duration']))
                amps[cond].append(1.0)

                if cond == 'stimulus':
                    latents.append(
                        np.genfromtxt(StringIO(
                            all_latent_dict[reader[idx]['stim_id']]),
                                      delimiter=","))

        pmod_param = np.array(latents)
        pmod_param_c = [pmod_param[:, z].tolist() for z in range(latent_size)]
        pmod = [
            None, None,
            Bunch(name=pmod_names, param=pmod_param_c, poly=pmod_poly)
        ]
        subject_info.append(
            Bunch(conditions=condition_names,
                  onsets=[onsets[k] for k in condition_names],
                  durations=[durs[k] for k in condition_names],
                  amplitudes=[amps[k] for k in condition_names],
                  tmod=None,
                  pmod=pmod,
                  regressor_names=None,
                  regressors=None))
    return subject_info
Exemplo n.º 18
0
def get_subject_info(events, confounds):
    """
    FUNCTION TO GET THE SUBJECT-SPECIFIC INFORMATION
    :param events: list with paths to events files
    :param confounds: list with paths to confounds files
    :return: Bunch object with event onsets, durations and regressors
    """

    # import libraries (needed to be done in the function):
    import pandas as pd
    from nipype.interfaces.base import Bunch

    # event types we consider:
    event_spec = {
        'correct_rejection': {
            'target': 0,
            'key_down': 0
        },
        'hit': {
            'target': 1,
            'key_down': 1
        },
        'false_alarm': {
            'target': 0,
            'key_down': 1
        },
        'miss': {
            'target': 1,
            'key_down': 0
        },
    }

    #event_names = ['correct_rejection']

    # read the events and confounds files of the current run:
    #events = selectfiles_results.outputs.events[0]
    #confounds = selectfiles_results.outputs.confounds[0]
    run_events = pd.read_csv(events, sep="\t")
    run_confounds = pd.read_csv(confounds, sep="\t")

    # define confounds to include as regressors:
    confounds = ['trans', 'rot', 'a_comp_cor', 'framewise_displacement']

    # search for confounds of interest in the confounds data frame:
    regressor_names = [
        col for col in run_confounds.columns
        if any([conf in col for conf in confounds])
    ]

    def replace_nan(regressor_values):
        # calculate the mean value of the regressor:
        mean_value = regressor_values.mean(skipna=True)
        # replace all values containing nan with the mean value:
        regressor_values[regressor_values.isnull()] = mean_value
        # return list of the regressor values:
        return list(regressor_values)

    # create a nested list with regressor values
    regressors = [replace_nan(run_confounds[conf]) for conf in regressor_names]

    onsets = []
    durations = []
    event_names = []

    for event in event_spec:

        onset_list = list(run_events['onset'][
            (run_events['condition'] == 'oddball')
            & (run_events['target'] == event_spec[event]['target']) &
            (run_events['key_down'] == event_spec[event]['key_down'])])

        duration_list = list(run_events['duration'][
            (run_events['condition'] == 'oddball')
            & (run_events['target'] == event_spec[event]['target']) &
            (run_events['key_down'] == event_spec[event]['key_down'])])

        if (onset_list != []) & (duration_list != []):
            event_names.append(event)
            onsets.append(onset_list)
            durations.append(duration_list)

    # create a bunch for each run:
    subject_info = Bunch(conditions=event_names,
                         onsets=onsets,
                         durations=durations,
                         regressor_names=regressor_names,
                         regressors=regressors)

    return subject_info, sorted(event_names)
Exemplo n.º 19
0
def subjectinfo(subject_id):
    base_proj_dir = "/home/data/madlab/data/mri/wmaze"
    import os
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    import numpy as np
    
    output = []
    for curr_run in range(1,7):
        names = []
        onsets = []
        durations = []
        amplitudes = []

	data_baseline = np.genfromtxt(base_proj_dir + "/scanner_behav/{0}/model_ABC/run{1}_BL.txt".format(subject_id, curr_run),dtype=str)
        for stim in ['A', 'B', 'C']:
            data_corr = np.genfromtxt(base_proj_dir + "/scanner_behav/{0}/model_ABC/run{1}_{2}_corr.txt".format(subject_id,curr_run,stim),dtype=str)
            data_incorr = np.genfromtxt(base_proj_dir +"/scanner_behav/{0}/model_ABC/run{1}_{2}_incorr.txt".format(subject_id,curr_run,stim),dtype=str)	

            if data_incorr.size > 0:
                curr_names = [stim + '_corr', stim + '_incorr']
                curr_corr_onsets = map(float, data_corr[:,0])
                curr_corr_durations = map(float, data_corr[:,1])
                curr_corr_amplitudes = map(float, data_corr[:,2])
                if data_incorr.size == 3: #ONLY ONE ERROR WAS MADE
                    curr_incorr_onsets = [float(data_incorr[0])]
                    curr_incorr_durations = [float(data_incorr[1])]
                    curr_incorr_amplitudes = [float(data_incorr[2])]
                else:
                    curr_incorr_onsets = map(float, data_incorr[:,0])
                    curr_incorr_durations = map(float, data_incorr[:,1])
                    curr_incorr_amplitudes = map(float, data_incorr[:,2])
                curr_onsets = [curr_corr_onsets, curr_incorr_onsets]
                curr_durations = [curr_corr_durations, curr_incorr_durations]
                curr_amplitudes = [curr_corr_amplitudes, curr_incorr_amplitudes]
            else: #NO ERRORS WERE MADE
                curr_names = [stim + '_corr']
                curr_corr_onsets = map(float, data_corr[:,0])
                curr_corr_durations = map(float, data_corr[:,1])
                curr_corr_amplitudes = map(float, data_corr[:,2])
                curr_onsets = [curr_corr_onsets]
                curr_durations = [curr_corr_durations]
                curr_amplitudes = [curr_corr_amplitudes]
            names.append(curr_names) 
            onsets.append(curr_onsets)
            durations.append(curr_durations)
            amplitudes.append(curr_amplitudes)
 
        curr_names = ['baseline']
        curr_corr_onsets = map(float, data_baseline[:,0])
        curr_corr_durations = map(float, data_baseline[:,1])
        curr_corr_amplitudes = map(float, data_baseline[:,2])
        curr_onsets = [curr_corr_onsets]
        curr_durations = [curr_corr_durations]
        curr_amplitudes = [curr_corr_amplitudes]         
        names.append(curr_names)  
        onsets.append(curr_onsets)
        durations.append(curr_durations)
        amplitudes.append(curr_amplitudes) 

        if any(isinstance(el, list) for el in names):
            names_list = names
            names = [el for sublist in names_list for el in sublist]
        if any(isinstance(el, list) for el in onsets):
            onsets_list = onsets
            onsets = [el_o for sublist_o in onsets_list for el_o in sublist_o]
        if any(isinstance(el, list) for el in durations):
            durations_list = durations
            durations = [el_d for sublist_d in durations_list for el_d in sublist_d]
        if any(isinstance(el, list) for el in amplitudes):
            amplitudes_list = amplitudes
            amplitudes = [el_a for sublist_a in amplitudes_list for el_a in sublist_a]

        output.insert(curr_run,
                      Bunch(conditions = names,
                            onsets = deepcopy(onsets),
                            durations = deepcopy(durations),
                            amplitudes = deepcopy(amplitudes),
                            tmod = None, pmod = None,
                            regressor_names = None, regressors = None))
    return output
Exemplo n.º 20
0
Arquivo: model.py Projeto: kiani/lyman
    def _run_interface(self, runtime):

        subject = self.inputs.subject
        session = self.inputs.session
        run = self.inputs.run
        info = Bunch(self.inputs.info)
        data_dir = self.inputs.data_dir

        # Load the timeseries
        ts_img = nib.load(self.inputs.ts_file)
        affine, header = ts_img.affine, ts_img.header

        # Load the anatomical segmentation and fine analysis mask
        run_mask = nib.load(self.inputs.mask_file).get_data() > 0
        seg_img = nib.load(self.inputs.seg_file)
        seg = seg_img.get_data()
        mask = (seg > 0) & (seg < 5) & run_mask
        n_vox = mask.sum()
        mask_img = nib.Nifti1Image(mask.astype(np.int8), affine, header)

        # Load the noise segmentation
        # TODO implement noisy voxel removal
        noise_img = nib.load(self.inputs.noise_file)

        # Spatially filter the data
        fwhm = info.smooth_fwhm
        # TODO use smooth_segmentation instead?
        signals.smooth_volume(ts_img, fwhm, mask_img, noise_img, inplace=True)

        if info.surface_smoothing:
            # TODO this is double smoothing the surface voxels!
            vert_data = nib.load(self.inputs.surf_file).get_data()
            for i, mesh_file in enumerate(self.inputs.mesh_files):
                sm = surface.SurfaceMeasure.from_file(mesh_file)
                vert_img = nib.Nifti1Image(vert_data[..., i], affine)
                signals.smooth_surface(ts_img, vert_img, sm, fwhm, noise_img,
                                       inplace=True)

        # Compute the mean image for later
        # TODO limit to gray matter voxels?
        data = ts_img.get_data()
        mean = data.mean(axis=-1)
        mean_img = nib.Nifti1Image(mean, affine, header)

        # Temporally filter the data
        n_tp = ts_img.shape[-1]
        hpf_matrix = glm.highpass_filter_matrix(n_tp,
                                                info.hpf_cutoff,
                                                info.tr)
        data[mask] = np.dot(hpf_matrix, data[mask].T).T

        # TODO remove the mean from the data
        # data[gray_mask] += mean[gray_mask, np.newaxis]
        data[~mask] = 0  # TODO this is done within smoothing actually

        # Define confound regressons from various sources
        # TODO
        mc_data = pd.read_csv(self.inputs.mc_file)

        # Detect artifact frames
        # TODO

        # Convert to percent signal change?
        # TODO

        # Build the design matrix
        # TODO move out of moss and simplify
        design_file = op.join(data_dir, subject, "design",
                              info.model_name + ".csv")
        design = pd.read_csv(design_file)
        run_rows = (design.session == session) & (design.run == run)
        design = design.loc[run_rows]
        # TODO better error when this fails (maybe check earlier too)
        assert len(design) > 0
        dmat = mossglm.DesignMatrix(design, ntp=n_tp, tr=info.tr)
        X = dmat.design_matrix.values

        # Save out the design matrix
        design_file = self.define_output("design_file", "design.csv")
        dmat.design_matrix.to_csv(design_file, index=False)

        # Prewhiten the data
        ts_img = nib.Nifti1Image(data, affine)
        WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X)

        # Fit the final model
        B, SS, XtXinv, E = glm.iterative_ols_fit(WY, WX)

        # TODO should we re-compute the tSNR on the residuals?

        # Convert outputs to image format
        beta_img = matrix_to_image(B.T, mask_img)
        error_img = matrix_to_image(SS, mask_img)
        XtXinv_flat = XtXinv.reshape(n_vox, -1)
        ols_img = matrix_to_image(XtXinv_flat.T, mask_img)
        resid_img = matrix_to_image(E, mask_img, ts_img)

        # Write out the results
        self.write_image("mask_file", "mask.nii.gz", mask_img)
        self.write_image("beta_file", "beta.nii.gz", beta_img)
        self.write_image("error_file", "error.nii.gz", error_img)
        self.write_image("ols_file", "ols.nii.gz", ols_img)
        if info.save_residuals:
            self.write_image("resid_file", "resid.nii.gz", resid_img)

        # Make some QC plots
        # We want a version of the resid data with an intact mean so that
        # the carpet plot can compute percent signal change.
        # (Maybe carpetplot should accept a mean image and handle that
        # internally)?
        # TODO standarize the representation of mean in this method
        resid_data = np.zeros(ts_img.shape, np.float32)
        resid_data += np.expand_dims(mean * mask, axis=-1)
        resid_data[mask] += E.T
        resid_img = nib.Nifti1Image(resid_data, affine, header)

        p = CarpetPlot(resid_img, seg_img, mc_data)
        self.write_visualization("resid_plot", "resid.png", p)

        # Plot the deisgn matrix
        # TODO update when improving design matrix code
        design_plot = self.define_output("design_plot", "design.png")
        dmat.plot(fname=design_plot, close=True)

        # Plot the sigma squares image for QC
        error_m = Mosaic(mean_img, error_img, mask_img)
        error_m.plot_overlay("cube:.8:.2", 0, fmt=".0f")
        self.write_visualization("error_plot", "error.png", error_m)

        return runtime
Exemplo n.º 21
0
 def _agg(objekt, runtime):
     outputs = Bunch(BrainExtractionMask=os.path.join(
         datadir, 'testBrainExtractionRPTBrainExtractionMask.nii.gz'))
     return outputs
Exemplo n.º 22
0
def test_modelgen1():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 200),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 200),
                np.eye(4)).to_filename(filename2)
    s = SpecifyModel()
    s.inputs.input_units = 'scans'
    set_output_units = lambda: setattr(s.inputs, 'output_units', 'scans')
    yield assert_raises, TraitError, set_output_units
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [
        Bunch(conditions=['cond1'],
              onsets=[[2, 50, 100, 180]],
              durations=[[1]],
              amplitudes=None,
              pmod=None,
              regressors=None,
              regressor_names=None,
              tmod=None),
        Bunch(conditions=['cond1'],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]],
              amplitudes=None,
              pmod=None,
              regressors=None,
              regressor_names=None,
              tmod=None)
    ]
    s.inputs.subject_info = info
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 2
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 0
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [12, 300, 600, 1080])
    info = [
        Bunch(conditions=['cond1'], onsets=[[2]], durations=[[1]]),
        Bunch(conditions=['cond1'], onsets=[[3]], durations=[[1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array([6.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[1]['cond'][0]['duration']), np.array([6.])
    info = [
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2, 4]],
              durations=[[1, 1], [1, 1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    s.inputs.input_units = 'scans'
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array([6., 6.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][1]['duration']), np.array([
            6.,
        ])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[1]['cond'][1]['duration']), np.array([6., 6.])
    rmtree(tempdir)
Exemplo n.º 23
0
def pick_onsets(subject_id):
    '''Picks onsets and durations per condition and adds them to lists.
    This function specifically picks onsets for the speech vs speaker
    where the presentation is clear or in noise.
    The function accepts event files.

    'subject_id' is a string, i.e., sub-001
    '''

    cond_names = ['gamble']
    onset = {}
    duration = {}
    weights_gain = {}
    weights_loss = {}
    runs = ['01', '02', '03', '04']

    for r in range(len(runs)):  # Loop over number of runs.
        onset.update({s + '_run' + str(r+1): [] for s in cond_names})
        duration.update({s + '_run' + str(r+1): [] for s in cond_names})
        weights_gain.update({'gain_run' + str(r+1): []})
        weights_loss.update({'loss_run' + str(r+1): []})

    base_name = '/data/pt_nmc002/other/narps/event_tsvs/'
    # subject_id = 'sub-001'
    for ir, run in enumerate(runs):
        f_events = base_name + subject_id + \
            '_task-MGT_run-' + runs[ir] + '_events.tsv'
        with open(f_events, 'rt') as f:
            next(f)  # skip the header
            for line in f:
                info = line.strip().split()
                for cond in cond_names:
                    val = cond + '_run' + str(ir+1)
                    val_gain = 'gain_run' + str(ir+1)
                    val_loss = 'loss_run' + str(ir+1)
                    onset[val].append(float(info[0]))
                    duration[val].append(float(info[1]))
                    weights_gain[val_gain].append(float(info[2]))
                    weights_loss[val_loss].append(float(info[3]))
    #                if cond == 'gain':
    #                    weights[val].append(float(info[2]))
    #                elif cond == 'loss':
    #                    weights[val].append(float(info[3]))
    #                elif cond == 'task-activ':
    #                    weights[val].append(float(1))
    from nipype.interfaces.base import Bunch

    # Bunching is done per run, i.e. cond1_run1, cond2_run1, etc.
    subjectinfo = []
    for r in range(len(runs)):

        cond = [c + '_run' + str(r+1) for c in cond_names]
        gain = 'gain_run' + str(r+1)
        loss = 'loss_run' + str(r+1)

        subjectinfo.insert(r,
                           Bunch(conditions=cond,
                                 onsets=[onset[k] for k in cond],
                                 durations=[duration[k] for k in cond],
                                 amplitudes=None,
                                 tmod=None,
                                 pmod=[Bunch(name=[gain, loss],
                                             poly=[1, 1],
                                             param=[weights_gain[gain],
                                                    weights_loss[loss]])],
                                 regressor_names=None,
                                 regressors=None))

    return subjectinfo
    def get_subj_info(task_file, design_col, confounds, conditions):
        '''
        Makes a Bunch, giving all necessary data about conditions, onsets, and durations to
            FSL first level model. Needs a task file to run.
        Inputs:
            task file [string], path to the subject events.tsv file, as per BIDS format.
            design_col [string], column name within task file, identifying event conditions to model.
            confounds [pandas dataframe], pd.df of confounds, gathered from get_confounds node.
            conditions [list],
                e.g. ['condition1',
                      'condition2',
                     ['condition1', 'parametric1', 'no_cent', 'no_norm'],
                     ['condition2', 'paramatric2', 'cent', 'norm']]
                     each string entry (e.g. 'condition1') specifies a event condition in the design_col column.
                     each list entry includes 4 strings:
                         entry 1 is a condition within the design_col column
                         entry 2 is a column in the events folder, which will be used for parametric weightings.
                         entry 3 is either 'no_cent', or 'cent', indicating whether to center the parametric variable.
                         entry 4 is either 'no_norm', or 'norm', indicating whether to normalize the parametric variable.
                 Onsets and durations will be taken from corresponding values for entry 1
                 parametric weighting specified by entry 2, scaled/centered as specified, then
                appended to the design matrix.
        '''
        from nipype.interfaces.base import Bunch
        import pandas as pd
        import numpy as np
        from sklearn.preprocessing import scale

        onsets = []
        durations = []
        amplitudes = []
        df = pd.read_csv(task_file, sep='\t', parse_dates=False)
        for idx, cond in enumerate(conditions):
            if isinstance(cond, list):
                if cond[2] == 'no_cent': # determine whether to center/scale
                    c = False
                elif cond[2] == 'cent':
                    c = True
                if cond[3] == 'no_norm':
                    n = False
                elif cond[3] == 'norm':
                    n = True
                # grab parametric terms.
                onsets.append(list(df[df[design_col] == cond[0]].onset))
                durations.append(list(df[df[design_col] == cond[0]].duration))
                amp_temp = list(scale(df[df[design_col] == cond[0]][cond[1]].tolist(),
                                   with_mean=c, with_std=n)) # scale
                amp_temp = pd.Series(amp_temp, dtype=object).fillna(0).tolist() # fill na
                amplitudes.append(amp_temp) # append
                conditions[idx] = cond[0]+'_'+cond[1] # combine condition/parametric names and replace.
            elif isinstance(cond, str):
                onsets.append(list(df[df[design_col] == cond].onset))
                durations.append(list(df[df[design_col] == cond].duration))
                # dummy code 1's for non-parametric conditions.
                amplitudes.append(list(np.repeat(1, len(df[df[design_col] == cond].onset))))
            else:
                print('cannot identify condition:', cond)
        #             return None
        output = Bunch(conditions= conditions,
                           onsets=onsets,
                           durations=durations,
                           amplitudes=amplitudes,
                           tmod=None,
                           pmod=None,
                           regressor_names=confounds.columns.values,
                           regressors=confounds.T.values.tolist()) # movement regressors added here. List of lists.
        return output
Exemplo n.º 25
0
def setup(taskname, run_number):
    global aroma
    # events = pd.read_csv(os.path.join(bidsdir, "task-" + taskname + "_events.tsv"), sep="\t")  # maybe use BIDSLayout for this?
    events_file = layout.get(task=taskname, extension='tsv')[0]
    events = pd.read_csv(events_file.path, sep="\t")

    # # Get session and subject from *FMRIPREP* directory structure
    try:
        subject_paths = [
            fn for fn in glob.glob(fmriprepdir + '/sub-*')
            if not os.path.basename(fn).endswith('html')
        ]
        subject_path = subject_paths[0]
        fmriprep_subject = subject_path.split('-')[1]
        session_path = glob.glob(subject_path + '/ses-*')[0]
        fmriprep_session = session_path.split('-')[2]
    except IndexError as e:
        print(glob.glob(fmriprepdir + '/sub-*'))
        print(subject_path if subject_path else "no subject path")
        print(fmriprep_subject if fmriprep_subject else "no fmriprep subject")
        print(
            glob.glob(subject_path +
                      '/ses-*') if subject_path else "no subject path")
        print(session_path if session_path else "no session path")
        print(fmriprep_session if fmriprep_session else "no fmriprep session")
        print(e)

    confounds_path = os.path.join(
        fmriprepdir, "sub-" + fmriprep_subject, "ses-" + fmriprep_session,
        "func",
        "sub-" + fmriprep_subject + "_ses-" + fmriprep_session + "_task-" +
        taskname + "_run-" + run_number + "_desc-confounds_regressors.tsv")
    aroma_path = os.path.join(
        fmriprepdir, "sub-" + fmriprep_subject, "ses-" + fmriprep_session,
        "func", "sub-" + fmriprep_subject + "_ses-" + fmriprep_session +
        "_task-" + taskname + "_run-" + run_number +
        "_space-MNI152NLin6Asym_desc-smoothAROMAnonaggr_bold.nii.gz")

    simple_design = False
    confounds = ''

    # Always use AROMA-denoised images for the scenemem task (if they're there)
    if taskname == 'scenemem' and os.path.isfile(aroma_path):
        aroma = True

    # Check if AROMA-denoised images exist. If they don't and the aroma config is selected, that's bad news
    if aroma and not os.path.isfile(aroma_path):
        print(
            "You selected the AROMA configuration, but no AROMA-denoised images were found."
            " Using standard preprocessed bold images.")
        aroma = False

    # Check if confounds files actually exist and what to do if they don't
    if os.path.isfile(confounds_path):
        print("Found confounds tsv at %s" % confounds_path)
        confounds = pd.read_csv(confounds_path, sep="\t", na_values="n/a")
    elif not os.path.isfile(confounds_path) and os.path.isfile(aroma_path):
        print("Confounds file not found...using AROMA-denoised image")
        aroma = True
    else:
        print(
            "Could not find confounds file or AROMA-denoised image."
            " Using simplest design matrix. WARNING: resulting maps will be noisy."
        )
        simple_design = True

    if aroma and os.path.isfile(
            aroma_path):  # if AROMA config selected and the images exist
        print("AROMA config selected. Using ICA-AROMA denoised image.")
        subject_info = [
            Bunch(conditions=[taskname],
                  onsets=[
                      list(events[events.trial_type == 'stimulus'].onset),
                      list(events[events.trial_type == 'baseline'].onset)
                  ],
                  durations=[
                      list(events[events.trial_type == 'stimulus'].duration),
                      list(events[events.trial_type == 'baseline'].duration)
                  ])
        ]

        prepped_img = os.path.join(
            fmriprepdir, "sub-" + fmriprep_subject, "ses-" + fmriprep_session,
            "func", "sub-" + fmriprep_subject + "_ses-" + fmriprep_session +
            "_task-" + taskname + "_run-" + run_number +
            "_space-MNI152NLin6Asym_desc-smoothAROMAnonaggr_bold.nii.gz")
    else:
        if simple_design:
            subject_info = [
                Bunch(
                    conditions=[taskname],
                    onsets=[
                        list(events[events.trial_type == 'stimulus'].onset),
                        list(events[events.trial_type == 'baseline'].onset)
                    ],
                    durations=[
                        list(events[events.trial_type == 'stimulus'].duration),
                        list(events[events.trial_type == 'baseline'].duration)
                    ])
            ]
        else:
            subject_info = [
                Bunch(
                    conditions=[taskname],
                    onsets=[
                        list(events[events.trial_type == 'stimulus'].onset),
                        list(events[events.trial_type == 'baseline'].onset)
                    ],
                    durations=[
                        list(events[events.trial_type == 'stimulus'].duration),
                        list(events[events.trial_type == 'baseline'].duration)
                    ],
                    regressors=[
                        confounds['global_signal'],
                        confounds['csf'],
                        confounds['white_matter'],
                        confounds['a_comp_cor_00'],
                        confounds['a_comp_cor_01'],
                        confounds['a_comp_cor_02'],
                        confounds['a_comp_cor_03'],
                        confounds['a_comp_cor_04'],
                        confounds['a_comp_cor_05'],
                        confounds['trans_x'],
                        confounds['trans_y'],
                        confounds['trans_z'],
                        confounds['rot_x'],
                        confounds['rot_y'],
                        confounds['rot_z'],
                    ],
                    regressor_names=[
                        'global_signal', 'csf', 'white_matter',
                        'a_comp_cor_00', 'a_comp_cor_01', 'a_comp_cor_02',
                        'a_comp_cor_03', 'a_comp_cor_04', 'a_comp_cor_05',
                        'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y',
                        'rot_z'
                    ])
            ]

        prepped_img = os.path.join(
            fmriprepdir, "sub-" + fmriprep_subject, "ses-" + fmriprep_session,
            "func", "sub-" + fmriprep_subject + "_ses-" + fmriprep_session +
            "_task-" + taskname + "_run-" + run_number +
            "_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz")

    mask_file = os.path.join(
        fmriprepdir, "sub-" + fmriprep_subject, "ses-" + fmriprep_session,
        "func", "sub-" + fmriprep_subject + "_ses-" + fmriprep_session +
        "_task-" + taskname + "_run-" + run_number +
        "_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz")

    print('Using ' + os.path.basename(prepped_img) + ' as preprocessed image.')

    return prepped_img, subject_info, confounds, mask_file
Exemplo n.º 26
0
                ]:
                    new_onsets[c] += onsets[n]
                    new_durations[c] += durations[n]

    # Make sure conditions are in the same order to facilitate lining up across runs
    new_conditions = sorted(new_conditions)

    session_info = Bunch(
        conditions=new_conditions,
        onsets=[new_onsets[k] for k in new_conditions],
        durations=[new_durations[k] for k in new_conditions],
        amplitudes=[list(scale(amplitudes[k]) + 1) for k in new_conditions
                    ],  # Mean center, mean of 1 (Mumford et al., 2015)
        regressors=[
            list(confoundinfo.framewise_displacement.fillna(0)),
            list(confoundinfo.trans_x),
            list(confoundinfo.trans_y),
            list(confoundinfo.trans_z),
            list(confoundinfo.rot_x),
            list(confoundinfo.rot_y),
            list(confoundinfo.rot_z),
            list(confoundinfo.csf),
            list(confoundinfo.white_matter),
            list(np.ones_like(confoundinfo.white_matter))  # INTERCEPT
        ])

    # print(session_info.conditions)

    # Load fMRI data
    fMRI_data = os.path.join(
        base_dir,
        'fmriprep/sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_task-mos_run-0{2}_'
Exemplo n.º 27
0
def subjectinfo(subject_id):
    """define individual subject info"""
    import pandas as pd
    from nipype.interfaces.base import Bunch

    def construct_sj(trialinfo, subject_id, run_num, cond_name):
        """construct df"""
        df_sj = trialinfo[(trialinfo['subject'] == int(subject_id))
                          & (trialinfo['session'] == int(run_num))]
        sj_info = pd.DataFrame()
        sj_info['onset'] = df_sj['runtime']
        sj_info['duration'] = 0.
        sj_info['weight'] = 1.
        trial_type = df_sj['seq'].replace({1: 'Low', 2: 'High'})
        sj_info['trial_type'] = trial_type
        sj_info_cond = sj_info[sj_info['trial_type'] == cond_name]
        return sj_info_cond

    def select_confounds(subject_id, run_num):
        """import confounds tsv files"""
        confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)
        confounds_file = confounds_dir + f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (
            int(subject_id), int(run_num))
        conf_df = pd.read_csv(confounds_file, sep='\t')
        return conf_df

    def confounds_regressor(conf_df, conf_names):
        """select confounds for regressors"""
        conf_select = conf_df[conf_names].loc[4:].fillna(
            0)  # ignore first 4 dummy scans
        conf_select_list = [
            conf_select[col].values.tolist() for col in conf_select
        ]
        return conf_select_list

    def find_runs(subject_id):
        """find available runs from func"""
        from glob import glob
        func_dir = f'/output/smooth_nomask/preproc/sub-%02d/' % int(subject_id)
        func_files = glob(func_dir + '*bold.nii')
        runs = []
        for f in func_files:
            tmp = f.split('/')
            run = tmp[5].split('_')[2].split('-')[1]
            runs.append(int(run))
        return sorted(runs)

    conf_names = [
        'csf', 'white_matter', 'global_signal', 'dvars', 'std_dvars',
        'framewise_displacement', 'rmsd', 'a_comp_cor_00', 'a_comp_cor_01',
        'a_comp_cor_02', 'a_comp_cor_03', 'a_comp_cor_04', 'a_comp_cor_05',
        'cosine00', 'cosine01', 'cosine02', 'cosine03', 'cosine04', 'cosine05',
        'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'
    ]

    alltrialinfo = pd.read_csv('/code/data/fmri_behavioural_new.csv')
    alltrialinfo.head()

    subject_info = []
    onset_list = []
    condition_names = ['High', 'Low']
    runs = find_runs(subject_id)
    print(runs)
    for run in runs:
        for cond in condition_names:
            run_cond = construct_sj(alltrialinfo, subject_id, run, cond)
            onset_run_cond = run_cond['onset'].values
            onset_list.append(sorted(onset_run_cond))

    subject_info = []
    for r in range(len(runs)):
        onsets = [onset_list[r * 2], onset_list[r * 2 + 1]]
        regressors_all = select_confounds(subject_id, runs[r])
        regressors = confounds_regressor(regressors_all, conf_names)

        subject_info.insert(
            r,
            Bunch(conditions=condition_names,
                  onsets=onsets,
                  durations=[[0], [0]],
                  regressors=regressors,
                  regressor_names=conf_names,
                  amplitudes=None,
                  tmod=None,
                  pmod=None))

    return subject_info  # this output will later be returned to infosource
Exemplo n.º 28
0
datasource.inputs.sort_filelist = True


"""
Experimental paradigm specific components
-----------------------------------------

Here we create a structure that provides information
about the experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information
necessary to generate an SPM design matrix.
"""

from nipype.interfaces.base import Bunch
subjectinfo = [Bunch(conditions=['Task'],
                     onsets=[list(range(6, 84, 12))],
                     durations=[[6]])]

"""Setup the contrast structure that needs to be evaluated. This is a
list of lists. The inner list specifies the contrasts and has the
following format - [Name,Stat,[list of condition names],[weights on
those conditions]. The condition names must match the `names` listed
in the `subjectinfo` function described above.
"""

cont1 = ('active > rest', 'T', ['Task'], [1])
contrasts = [cont1]

# set up node specific inputs
modelspecref = l1pipeline.inputs.analysis.modelspec
modelspecref.input_units = 'scans'
Exemplo n.º 29
0
def test_modelgen1(tmpdir):
    filename1 = tmpdir.join('test1.nii').strpath
    filename2 = tmpdir.join('test2.nii').strpath
    Nifti1Image(np.random.rand(10, 10, 10, 200),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 200),
                np.eye(4)).to_filename(filename2)
    s = SpecifyModel()
    s.inputs.input_units = 'scans'
    set_output_units = lambda: setattr(s.inputs, 'output_units', 'scans')
    with pytest.raises(TraitError):
        set_output_units()
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [
        Bunch(conditions=['cond1'],
              onsets=[[2, 50, 100, 180]],
              durations=[[1]],
              amplitudes=None,
              pmod=None,
              regressors=None,
              regressor_names=None,
              tmod=None),
        Bunch(conditions=['cond1'],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]],
              amplitudes=None,
              pmod=None,
              regressors=None,
              regressor_names=None,
              tmod=None)
    ]
    s.inputs.subject_info = info
    res = s.run()
    assert len(res.outputs.session_info) == 2
    assert len(res.outputs.session_info[0]['regress']) == 0
    assert len(res.outputs.session_info[0]['cond']) == 1
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]['cond'][0]['onset']),
        np.array([12, 300, 600, 1080]))
    info = [
        Bunch(conditions=['cond1'], onsets=[[2]], durations=[[1]]),
        Bunch(conditions=['cond1'], onsets=[[3]], durations=[[1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]['cond'][0]['duration']),
        np.array([6.]))
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[1]['cond'][0]['duration']),
        np.array([6.]))
    info = [
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2, 4]],
              durations=[[1, 1], [1, 1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    s.inputs.input_units = 'scans'
    res = s.run()
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]['cond'][0]['duration']),
        np.array([6., 6.]))
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]['cond'][1]['duration']),
        np.array([
            6.,
        ]))
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[1]['cond'][1]['duration']),
        np.array([6., 6.]))
Exemplo n.º 30
0
def load_design_matrix(mat_file,trim=0):
	import os
	import re
	import scipy.io as sp
	import glob as gl
	import numpy
	import nipype.interfaces.matlab as mlab 
	from nipype.interfaces.base import Bunch

	# convert numpy data array
	def convert_numpy(ar,toString = False):
		lst = []
		for a in ar.tolist():
			if toString:
				lst.append(str(a))
			elif type(a) == numpy.ndarray:
				if a.size > 1:			
					lst.append(a.tolist())
				else:
					lst.append([a.tolist()])
			else:
				lst.append([a])
		return lst

	# if list of mat_files, then do a list
	mat_files = []	
	if isinstance(mat_file,list):
		mat_files = mat_file
	else:
		mat_files = [mat_file]
		
	bunches = []

	# go over mat files
	for mat_file in mat_files:
		# load design matrix 
		dm = sp.loadmat(mat_file,squeeze_me=True)

		names  = convert_numpy(dm.get('names'),True)
		onsets = convert_numpy(dm.get('onsets'))
		durations = convert_numpy(dm.get('durations'))
		
		# load up values and convert them
		# for PPPI remove last column for reward PPI; for ert last 3 columns
		# error, posterror, misc
		if trim > 0:
			names = names[0:(len(names)-trim)]
			durations = durations[0:(len(durations)-trim)]
			onsets = onsets[0:(len(onsets)-trim)]

		# create bunch to return
		bunch = Bunch(conditions=names,onsets=onsets,durations=durations)
		if 'pmod' in dm:
			pmod = []
			for i in range(0,len(dm.get('pmod'))):
				if isinstance(dm['pmod']['name'][i],unicode) or dm['pmod']['name'][i].size == 1:
					name = str(dm['pmod']['name'][i])
					param = dm['pmod']['param'][i].tolist()
					poly = dm['pmod']['poly'][i]
					pmod.append(Bunch(name=[name],param=[param],poly=[poly]))
				elif dm['pmod']['name'][i].size >  1:
					names = []
					params = []
					polys = []
					for j in range(0,dm['pmod']['name'][i].size):
						names.append(str(dm['pmod']['name'][i][j]))
						params.append(dm['pmod']['param'][i][j].tolist())
						polys.append(dm['pmod']['poly'][i][j])
					pmod.append(Bunch(name=names,param=params,poly=polys))
				else:
					pmod.append(None)
			bunch.pmod = pmod
		bunches.append(bunch)
	return bunches
Exemplo n.º 31
0
                      select_smoothed_files, 'index')

rename = pe.MapNode(util.Rename(format_string="%(orig)s"),
                    name="rename",
                    iterfield=['in_file'])
rename.inputs.parse_string = "(?P<orig>.*)"

preprocessing.connect(select_smoothed_files, 'out', rename, 'in_file')

specify_model = pe.Node(interface=model.SpecifyModel(), name="specify_model")
specify_model.inputs.input_units = 'secs'
specify_model.inputs.time_repetition = 3.
specify_model.inputs.high_pass_filter_cutoff = 120
specify_model.inputs.subject_info = [
    Bunch(conditions=['Task-Odd', 'Task-Even'],
          onsets=[range(15, 240, 60), range(45, 240, 60)],
          durations=[[15], [15]])
] * 4

level1design = pe.Node(interface=spm.Level1Design(), name="level1design")
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
level1design.inputs.timing_units = 'secs'
level1design.inputs.interscan_interval = specify_model.inputs.time_repetition

level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical': 1}

contrastestimate = pe.Node(interface=spm.EstimateContrast(),
                           name="contrastestimate")
contrastestimate.inputs.contrasts = [('Task>Baseline', 'T',
                                      ['Task-Odd', 'Task-Even'], [0.5, 0.5])]
Exemplo n.º 32
0
    def _populate_inputs(self):
        """Initialize the inputs attribute."""

        self.inputs = Bunch(outfile=None,
                            infile=None)
Exemplo n.º 33
0
def subjectinfo(subject_id):
    import os
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    import numpy as np
    base_proj_dir = '/home/data/madlab/data/mri/wmaze/scanner_behav'
    # Empty array to contain info from each run (index 1-6)
    output = []

    # For the current run, of which there are 6
    for curr_run in range(1, 7):
        names = []
        onsets = []
        durations = []
        amplitudes = []

        data_all_before_B_corr = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_GLM1.2/run{1}_all_before_B_corr.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_all_before_B_incorr = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_GLM1.2/run{1}_all_before_B_incorr.txt'.format(
                subject_id, curr_run),
            dtype=str)
        data_all_remaining = np.genfromtxt(
            base_proj_dir +
            '/{0}/model_GLM1.2/run{1}_all_remaining.txt'.format(
                subject_id, curr_run),
            dtype=str)

        sequence = ['all_before_B']
        for curr_type in sequence:
            corr_array_name = eval('data_{0}_corr'.format(curr_type))
            incorr_array_name = eval('data_{0}_incorr'.format(curr_type))
            if incorr_array_name.size > 0:  #MORE THAN ONE MISTAKE MADE
                curr_names = [
                    '{0}_corr'.format(curr_type),
                    '{0}_incorr'.format(curr_type)
                ]
                curr_corr_onsets = map(float, corr_array_name[:, 0])
                curr_corr_durations = map(float, corr_array_name[:, 1])
                curr_corr_amplitudes = map(float, corr_array_name[:, 2])
                if incorr_array_name.size == 3:  #ONLY ONE ERROR
                    curr_incorr_onsets = [float(incorr_array_name[0])]
                    curr_incorr_durations = [float(incorr_array_name[1])]
                    curr_incorr_amplitudes = [float(incorr_array_name[2])]
                else:  #MORE THAN ONE ERROR
                    curr_incorr_onsets = map(float, incorr_array_name[:, 0])
                    curr_incorr_durations = map(float, incorr_array_name[:, 1])
                    curr_incorr_amplitudes = map(float, incorr_array_name[:,
                                                                          2])
                curr_onsets = [curr_corr_onsets, curr_incorr_onsets]
                curr_durations = [curr_corr_durations, curr_incorr_durations]
                curr_amplitudes = [
                    curr_corr_amplitudes, curr_incorr_amplitudes
                ]
            else:  #NO MISTAKES WERE MADE
                curr_names = ['{0}_corr'.format(curr_type)]
                curr_corr_onsets = map(float, corr_array_name[:, 0])
                curr_corr_durations = map(float, corr_array_name[:, 1])
                curr_corr_amplitudes = map(float, corr_array_name[:, 2])
                curr_onsets = [curr_corr_onsets]
                curr_durations = [curr_corr_durations]
                curr_amplitudes = [curr_corr_amplitudes]
            names.append(curr_names)
            onsets.append(curr_onsets)
            durations.append(curr_durations)
            amplitudes.append(curr_amplitudes)

        ## ALL REMAINING TRIALS ##
        curr_names = ['all_remaining']
        curr_corr_onsets = map(float, data_all_remaining[:, 0])
        curr_corr_durations = map(float, data_all_remaining[:, 1])
        curr_corr_amplitudes = map(float, data_all_remaining[:, 2])
        curr_onsets = [curr_corr_onsets]
        curr_durations = [curr_corr_durations]
        curr_amplitudes = [curr_corr_amplitudes]
        names.append(curr_names)
        onsets.append(curr_onsets)
        durations.append(curr_durations)
        amplitudes.append(curr_amplitudes)

        # If any element in names is a list instead of a single value, for those elements
        if any(isinstance(el, list) for el in names):
            names = [el for sublist in names for el in sublist]
        if any(isinstance(el, list) for el in onsets):
            onsets = [el_o for sublist_o in onsets for el_o in sublist_o]
        if any(isinstance(el, list) for el in durations):
            durations = [el_d for sublist_d in durations for el_d in sublist_d]
        if any(isinstance(el, list) for el in amplitudes):
            amplitudes = [
                el_a for sublist_a in amplitudes for el_a in sublist_a
            ]

        output.insert(
            curr_run,
            Bunch(conditions=names,
                  onsets=deepcopy(onsets),
                  durations=deepcopy(durations),
                  amplitudes=deepcopy(amplitudes),
                  tmod=None,
                  pmod=None,
                  regressor_names=None,
                  regressors=None))
    return output
Exemplo n.º 34
0
    def _populate_inputs(self):
        """Initialize the inputs attribute."""

        self.inputs = Bunch(automask=None,
                            percentile=None,
                            infile=None)
Exemplo n.º 35
0
preproc = create_featreg_preproc(whichvol='first')
TR = 3.
preproc.inputs.inputspec.fwhm = 5
preproc.inputs.inputspec.highpass = 100 / TR

modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = 100
modelspec.inputs.subject_info = [
    Bunch(conditions=['Visual', 'Auditory'],
          onsets=[range(0, int(180 * TR), 60),
                  range(0, int(180 * TR), 90)],
          durations=[[30], [45]],
          amplitudes=None,
          tmod=None,
          pmod=None,
          regressor_names=None,
          regressors=None)
]

modelfit = create_modelfit_workflow(f_contrasts=True)
modelfit.inputs.inputspec.interscan_interval = TR
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}}
cont1 = ['Visual>Baseline', 'T', ['Visual', 'Auditory'], [1, 0]]
cont2 = ['Auditory>Baseline', 'T', ['Visual', 'Auditory'], [0, 1]]
cont3 = ['Task', 'F', [cont1, cont2]]
modelfit.inputs.inputspec.contrasts = [cont1, cont2, cont3]
Exemplo n.º 36
0
 def _make_matlab_command(self, _):
     """validates spm options and generates job structure
     """
     contrasts = []
     cname = []
     for i, cont in enumerate(self.inputs.contrasts):
         cname.insert(i, cont[0])
         contrasts.insert(i, Bunch(name=cont[0],
                                  stat=cont[1],
                                  conditions=cont[2],
                                  weights=None,
                                  sessions=None))
         if len(cont) >= 4:
             contrasts[i].weights = cont[3]
         if len(cont) >= 5:
             contrasts[i].sessions = cont[4]
     script = "% generated by nipype.interfaces.spm\n"
     script += "spm_defaults;\n"
     script += "jobs{1}.stats{1}.con.spmmat  = {'%s'};\n" % self.inputs.spm_mat_file
     script += "load(jobs{1}.stats{1}.con.spmmat{:});\n"
     script += "SPM.swd = '%s';\n" % os.getcwd()
     script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n"
     script += "names = SPM.xX.name;\n"
     # get names for columns
     if isdefined(self.inputs.group_contrast) and self.inputs.group_contrast:
         script += "condnames=names;\n"
     else:
         if self.inputs.use_derivs:
             script += "pat = 'Sn\([0-9*]\) (.*)';\n"
         else:
             script += "pat = 'Sn\([0-9*]\) (.*)\*bf\(1\)|Sn\([0-9*]\) .*\*bf\([2-9]\)|Sn\([0-9*]\) (.*)';\n"
         script += "t = regexp(names,pat,'tokens');\n"
         # get sessidx for columns
         script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n"
         script += "t1 = regexp(names,pat1,'tokens');\n"
         script += "for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if ~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n"
     # BUILD CONTRAST SESSION STRUCTURE
     for i, contrast in enumerate(contrasts):
         if contrast.stat == 'T':
             script += "consess{%d}.tcon.name   = '%s';\n" % (i + 1, contrast.name)
             script += "consess{%d}.tcon.convec = zeros(1,numel(names));\n" % (i + 1)
             for c0, cond in enumerate(contrast.conditions):
                 script += "idx = strmatch('%s',condnames,'exact');\n" % (cond)
                 script += "if isempty(idx), throw(MException('CondName:Chk', sprintf('Condition %%s not found in design','%s'))); end;\n" % cond
                 if contrast.sessions:
                     for sno, sw in enumerate(contrast.sessions):
                         script += "sidx = find(condsess(idx)==%d);\n" % (sno + 1)
                         script += "consess{%d}.tcon.convec(idx(sidx)) = %f;\n" % (i + 1, sw * contrast.weights[c0])
                 else:
                     script += "consess{%d}.tcon.convec(idx) = %f;\n" % (i + 1, contrast.weights[c0])
     for i, contrast in enumerate(contrasts):
         if contrast.stat == 'F':
             script += "consess{%d}.fcon.name   =  '%s';\n" % (i + 1, contrast.name)
             for cl0, fcont in enumerate(contrast.conditions):
                 try:
                     tidx = cname.index(fcont[0])
                 except:
                     Exception("Contrast Estimate: could not get index of" \
                               " T contrast. probably not defined prior " \
                                   "to the F contrasts")
                 script += "consess{%d}.fcon.convec{%d} = consess{%d}.tcon.convec;\n" % (i + 1, cl0 + 1, tidx + 1)
     script += "jobs{1}.stats{1}.con.consess = consess;\n"
     script += "if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');jobs=spm_jobman('spm5tospm8',{jobs});end\n"
     script += "spm_jobman('run',jobs);"
     return script
Exemplo n.º 37
0
class SubjectInfoOutputSpec(TraitedSpec):
    information = traits.List(Bunch())