Example #1
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir,'test1.nii')
    filename2 = os.path.join(tempdir,'test2.nii')
    Nifti1Image(np.random.rand(10,10,10,50), np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10,10,10,50), np.eye(4)).to_filename(filename2)
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.output_units = 'scans'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]], amplitudes=None,
                  pmod=None, regressors = None, regressor_names = None, tmod=None),
            Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]], amplitudes=None,
                  pmod=None, regressors = None, regressor_names = None, tmod=None)]
    s.inputs.subject_info = info
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, res.outputs.session_info[0]['cond'][0]['onset'], [2.0, 50.0, 100.0, 180.0, 330.0, 340.0, 400.0, 450.0]
    rmtree(tempdir)
Example #2
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2)
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]),
            Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])/6
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0])
    rmtree(tempdir)
Example #3
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename2)
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [
        Bunch(conditions=['cond1'],
              onsets=[[2, 50, 100, 170]],
              durations=[[1]]),
        Bunch(conditions=['cond1'],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(
        res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0])
    rmtree(tempdir)
def test_SpecifySPMModel_outputs():
    output_map = dict(session_info=dict(), )
    outputs = SpecifySPMModel.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
def test_SpecifySPMModel_inputs():
    input_map = dict(
        concatenate_runs=dict(usedefault=True, ),
        event_files=dict(
            mandatory=True,
            xor=['subject_info'],
        ),
        functional_runs=dict(
            copyfile=False,
            mandatory=True,
        ),
        high_pass_filter_cutoff=dict(mandatory=True, ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        input_units=dict(mandatory=True, ),
        outlier_files=dict(copyfile=False, ),
        output_units=dict(usedefault=True, ),
        realignment_parameters=dict(copyfile=False, ),
        subject_info=dict(
            mandatory=True,
            xor=['event_files'],
        ),
        time_repetition=dict(mandatory=True, ),
    )
    inputs = SpecifySPMModel.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SpecifySPMModel_inputs():
    input_map = dict(concatenate_runs=dict(usedefault=True,
    ),
    event_files=dict(mandatory=True,
    xor=['subject_info', 'event_files'],
    ),
    functional_runs=dict(copyfile=False,
    mandatory=True,
    ),
    high_pass_filter_cutoff=dict(mandatory=True,
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    input_units=dict(mandatory=True,
    ),
    outlier_files=dict(copyfile=False,
    ),
    output_units=dict(usedefault=True,
    ),
    realignment_parameters=dict(copyfile=False,
    ),
    subject_info=dict(mandatory=True,
    xor=['subject_info', 'event_files'],
    ),
    time_repetition=dict(mandatory=True,
    ),
    )
    inputs = SpecifySPMModel.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SpecifySPMModel_outputs():
    output_map = dict(session_info=dict(),
    )
    outputs = SpecifySPMModel.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #8
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2)
    # Test case when only one duration is passed, as being the same for all onsets.
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]),
            Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1., 1., 1., 1., 1., 1., 1.])
    # Test case of scans as output units instead of seconds
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6
    # Test case for no concatenation with seconds as output units
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0])
    # Test case for variable number of events in separate runs, sometimes unique.
    filename3 = os.path.join(tempdir, 'test3.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename3)
    s.inputs.functional_runs = [filename1, filename2, filename3]
    info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2, 4]], durations=[[1, 1], [1, 1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][1]['duration']), np.array([1.,])
    yield assert_almost_equal, np.array(res.outputs.session_info[1]['cond'][1]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(res.outputs.session_info[2]['cond'][1]['duration']), np.array([1.,])
    # Test case for variable number of events in concatenated runs, sometimes unique.
    s.inputs.concatenate_runs = True
    info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2, 4]], durations=[[1, 1], [1, 1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1., 1., 1., 1., 1.])
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][1]['duration']), np.array([1., 1., 1., 1.])
    rmtree(tempdir)
def spm_model_specification(behavioral_data, fmri_sessions, onset_name,
                            condition_name, duration_name, time_repetition,
                            realignment_parameters, delimiter, start,
                            concatenate_runs, high_pass_filter_cutoff,
                            output_directory):
    """ Specify the SPM model used in the GLM and estimate the design matrix.

    .. note::

        * `fmri_sessions` and `behavioral_data` must have the same number
          of elements.
        * `onsets` and `durations` values must have the same units as the
          TR used in the processings (ie. seconds).

    <unit>
        <input name="behavioral_data" type="List" content="File" desc="list of
            .csv session behavioral data." />
        <input name="fmri_sessions" type="List" content="File" desc="list of
            path to fMRI sessions." />
        <input name="onset_name" type="String" desc="the name of the column
            in the `behavioral_data` file containing the onsets."/>
        <input name="condition_name" type="String" desc="the name of the
            column in the `behavioral_data` file containing the conditions."/>
        <input name="duration_name" type="String" desc="the name of the column
            in the `behavioral_data` file containing the condition durations.
            "/>
        <input name="time_repetition" type="Float" desc="the repetition time
            in seconds (in seconds)."/>
        <input name="realignment_parameters" type="File" desc="path to the SPM
            realign output parameters."/>
        <input name="delimiter" type="String" desc="separator used to split
            the `behavioral_data` file."/>
        <input name="start" type="Int" desc="line from which we start reading
            the `behavioral_data` file."/>
        <input name="concatenate_runs" type="Bool" desc="concatenate all runs
            to look like a single session."/>
        <input name="high_pass_filter_cutoff" type="Float" desc="high-pass
            filter cutoff in secs."/>
        <input name="output_directory" type="Directory" desc="Where to store
            the output file"/>
        <output name="session_info" type="Any" desc="session info to leverage
            the first level design."/>
        <output name="model_specifications" type="File" desc="file containing
            all model specifications" />
    </unit>
    """
    # Local imports
    from nipype.interfaces.base import Bunch
    from nipype.algorithms.modelgen import SpecifySPMModel

    # Assert that we have one behavioral data per session
    if len(behavioral_data) != len(fmri_sessions):
        raise ValueError("One behavioral data per session is required, "
                         "got {0} behaviral data and {1} session.".format(
                             len(behavioral_data), len(fmri_sessions)))

    # Get each session acquisition conditions
    info = []
    for csvfile in behavioral_data:

        # Parse the behavioural file
        all_onsets = get_onsets(csvfile,
                                condition_name, onset_name, duration_name,
                                delimiter, start)

        # Create a nipype Bunch (dictionary-like) structure
        conditions = []
        onsets = []
        durations = []
        for condition_name, item in all_onsets.items():
            conditions.append(condition_name)
            onsets.append([float(x) for x in item["onsets"]])
            durations.append([float(x) for x in item["durations"]])
        info.append(
            Bunch(conditions=conditions, onsets=onsets, durations=durations))

    # Make a model specification compatible with spm designer
    spec_interface = SpecifySPMModel(
        concatenate_runs=concatenate_runs,
        input_units="secs",
        output_units="secs",
        time_repetition=time_repetition,
        high_pass_filter_cutoff=high_pass_filter_cutoff,
        functional_runs=fmri_sessions,
        subject_info=info,
        realignment_parameters=realignment_parameters)
    spec_interface.run()

    # The previous interface use numpy in double precision. In order to be
    # python-json compliant need to cast expicitely all float items
    def cast_to_float(obj):
        """ Recursive method that cast numpy.double items.

        Parameters
        ----------
        obj: object
            a generic python object.

        Returns
        -------
        out: object
            the float-casted input object.
        """
        # Deal with dictionary
        if isinstance(obj, dict):
            out = {}
            for key, val in obj.items():
                out[key] = cast_to_float(val)

        # Deal with tuple and list
        elif isinstance(obj, (list, tuple)):
            out = []
            for val in obj:
                out.append(cast_to_float(val))
            if isinstance(obj, tuple):
                out = tuple(out)

        # Otherwise cast if it is a numpy.double
        else:
            out = obj
            if isinstance(obj, float):
                out = float(obj)

        return out

    session_info = cast_to_float(spec_interface.aggregate_outputs().get()[
        "session_info"])

    model_specifications = os.path.join(output_directory,
                                        "model_specifications.json")

    # save the design parameters
    with open(model_specifications, "w") as _file:
        json.dump(session_info, _file, indent=4)

    return session_info, model_specifications
Example #10
0
    FEATModel,
    FILMGLS,
    )

input_node = Node(IdentityInterface(fields=[
    'bold',
    'events',
    ]), name='input')

output_node = Node(IdentityInterface(fields=[
    'T_image',
    ]), name='output')


# node design matrix
model = Node(interface=SpecifySPMModel(), name='design_matrix')
model.inputs.input_units = 'secs'
model.inputs.output_units = 'secs'
model.inputs.high_pass_filter_cutoff = 128.
model.inputs.time_repetition = .85
model.inputs.bids_condition_column = 'trial_name'


def create_workflow_temporalpatterns_fsl():

    replace_nan = Node(interface=MathsCommand(), name='replace_nan')
    replace_nan.inputs.nan2zeros = True

    # GLM
    design = Node(interface=fsl_design(), name='design')
    design.inputs.interscan_interval = .85
mean_func_file = preprocessed_heroes['mean ASL'][0]

# Define directory to save .m scripts and outputs to
subject_directory = os.path.relpath(func_file, subjects_parent_directory)
subject_directory = subject_directory.split(os.sep)[0]
working_directory = os.path.join('/tmp', subject_directory)
if not os.path.exists(working_directory):
    os.mkdir(working_directory)

os.chdir(working_directory)

#  Generate SPM-specific Model
from nipype.algorithms.modelgen import SpecifySPMModel
tr = 2.5
modelspec = SpecifySPMModel(input_units='secs',
                            time_repetition=tr,
                            high_pass_filter_cutoff=128)
modelspec.inputs.realignment_parameters = realignment_parameters
modelspec.inputs.functional_runs = func_file
# Read the conditions
import numpy as np
from nipype.interfaces.base import Bunch
paradigm = np.recfromcsv(paradigm_file)
conditions = np.unique(paradigm['name'])
onsets = [paradigm['onset'][paradigm['name'] == condition].tolist()
          for condition in conditions]
durations = [paradigm['duration'][paradigm['name'] == condition].tolist()
             for condition in conditions]
modelspec.inputs.subject_info = Bunch(conditions=conditions, onsets=onsets,
                                      durations=durations)
out_modelspec = modelspec.run()
Example #12
0
gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file'])
firstlev.connect(selectderivs, 'func', gunzip, 'in_file')

# Smooth warped functionals. Watch out it smoothes again if you stop here!
smooth = Node(Smooth(),
              overwrite=False,
              name="smooth")
smooth.iterables = ("fwhm", fwhmlist)
firstlev.connect(gunzip, 'out_file', smooth, 'in_files')

getsubinforuns = Node(Function(input_names=["subject_id"],
                               output_names=["subject_info"],
                               function=pick_onsets),
                      name='getsubinforuns')

modelspec = Node(SpecifySPMModel(),
                 overwrite=False,
                 name='modelspec')
modelspec.inputs.concatenate_runs = False
modelspec.inputs.input_units = 'secs'
modelspec.inputs.output_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = 128

firstlev.connect([
    (infosource, getsubinforuns, [('subject_id', 'subject_id')]),
    (getsubinforuns, modelspec, [('subject_info', 'subject_info')]),
    (selectderivs, modelspec, [('conf',
                                'realignment_parameters')]),
    (smooth, modelspec, [('smoothed_files', 'functional_runs')])
])
Example #13
0
def main():

    #######################
    # Commandline Arguments
    #######################
    # list of subject identifiers
    task_name = "Training" if training else "Test"
    print(project_folder, subject_list, task_name, nb_prc)

    #############################################################
    # Extracting fMRI Params (Only works with Kamitani's Dataset)
    #############################################################
    TR = 3.0
    voxel_size = (3, 3, 3)
    number_of_slices = 50
    json_file1 = opj(project_folder,
                     "dataset/ds001246-download/task-imagery_bold.json")
    json_file2 = opj(project_folder,
                     "dataset/ds001246-download/task-perception_bold.json")

    file = open(json_file1)
    data = json.load(file)
    slice_timing1 = data['SliceTiming']
    file.close()

    file = open(json_file2)
    data = json.load(file)
    slice_timing2 = data['SliceTiming']
    file.close()

    sorted1 = np.argsort(slice_timing1)
    sorted2 = np.argsort(slice_timing2)
    print(np.all(sorted1 == sorted2))

    slice_order = list(sorted1 + 1)
    print("Slice order:", slice_order)

    ##########################
    # Creating essential nodes
    ##########################
    # Model Spec
    modelspec_node = Node(SpecifySPMModel(concatenate_runs=True,
                                          input_units='secs',
                                          output_units='secs',
                                          time_repetition=TR,
                                          high_pass_filter_cutoff=128),
                          name='modelspec')

    # Level1Design - Generates a SPM design matrix
    level1design_node = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                          timing_units='secs',
                                          interscan_interval=TR,
                                          model_serial_correlations='AR(1)',
                                          mask_threshold='-Inf'),
                             name="level1design")

    # EstimateModel - estimate the parameters of the model (GLM)
    level1estimate_node = Node(
        EstimateModel(estimation_method={'Classical': 1}),
        name="level1estimate")

    # Infosource - a function free node to iterate over the list of subject names
    infosrc_subjects = Node(IdentityInterface(fields=['subject_id']),
                            name="infosrc_subjects")
    infosrc_subjects.iterables = [('subject_id', subject_list)]

    # SelectFiles - it select files based on template matching
    tsv_file = opj('dataset', 'ds001246-download', '{subject_id}',
                   'ses-p*' + task_name + '*', 'func',
                   '{subject_id}_ses-p*' + task_name + '*_task-*_events.tsv')
    reg_file = opj('preprocess', '_subject_id_{subject_id}',
                   '_session_id_ses-p*' + task_name + '*', 'Realign',
                   'rp_a{subject_id}_ses-p*' + task_name + '*_task-*_bold.txt')
    func_file = opj(
        'preprocess', '_subject_id_{subject_id}',
        '_session_id_ses-p*' + task_name + '*', 'Coregister',
        'rara{subject_id}_ses-p*' + task_name + '*_task-*_bold.nii')
    mask_file = opj('datasink', 'preprocessed_masks', '{subject_id}',
                    '{subject_id}_full_mask.nii')

    templates = {
        'tsv': tsv_file,
        'reg': reg_file,
        'func': func_file,
        'mask': mask_file
    }

    selectfiles = Node(SelectFiles(templates, base_directory=project_folder),
                       name="selectfiles")

    # Subject Info
    subject_info_node = Node(Function(
        input_names=['tsv_files'],
        output_names=['subject_info'],
        function=read_tsv_train if training else read_tsv_test),
                             name='subject_info')

    # Datasink - creates output folder for important outputs
    datasink_node = Node(DataSink(base_directory=project_folder,
                                  container='datasink'),
                         name="datasink")

    substitutions = [('_subject_id_', '')]
    datasink_node.inputs.substitutions = substitutions

    #####################
    # Create the workflow
    #####################
    wf_name = 'glm_train_nomod' if training else 'glm_test'
    glm = Workflow(name=wf_name)
    glm.base_dir = project_folder

    # connect infosource to selectfile
    glm.connect([(infosrc_subjects, selectfiles, [('subject_id', 'subject_id')
                                                  ])])
    glm.connect([(selectfiles, subject_info_node, [('tsv', 'tsv_files')])])

    # connect infos to modelspec
    glm.connect([(subject_info_node, modelspec_node, [('subject_info',
                                                       'subject_info')])])
    glm.connect([(selectfiles, modelspec_node, [('reg',
                                                 'realignment_parameters')])])
    glm.connect([(selectfiles, modelspec_node, [('func', 'functional_runs')])])

    # connect modelspec to level1design
    glm.connect([(modelspec_node, level1design_node, [('session_info',
                                                       'session_info')])])
    glm.connect([(selectfiles, level1design_node, [('mask', 'mask_image')])])

    # connect design to estimate
    glm.connect([(level1design_node, level1estimate_node, [('spm_mat_file',
                                                            'spm_mat_file')])])

    # keeping estimate files params
    glm.connect([(level1estimate_node, datasink_node,
                  [('mask_image', f'{wf_name}.@mask_img')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('beta_images', f'{wf_name}.@beta_imgs')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('residual_image', f'{wf_name}.@res_img')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('RPVimage', f'{wf_name}.@rpv_img')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('spm_mat_file', f'{wf_name}.@spm_mat_file')])])

    glm.write_graph(graph2use='flat', format='png', simple_form=True)
    #     from IPython.display import Image
    #     Image(filename=opj(glm.base_dir, {wf_name}, 'graph_detailed.png'))

    ##################
    # Run the workflow
    ##################
    glm.run('MultiProc', plugin_args={'n_procs': nb_prc})
Example #14
0
# ======================================================================
# DEFINE NODE: LEAVE-ONE-RUN-OUT SELECTION OF DATA
# ======================================================================
leave_one_run_out = Node(Function(
    input_names=['subject_info', 'event_names', 'data_func', 'run'],
    output_names=['subject_info', 'data_func', 'contrasts'],
    function=leave_one_out),
                         name='leave_one_run_out')
# define the number of rows as an iterable:
leave_one_run_out.iterables = ('run', range(num_runs))
# ======================================================================
# DEFINE NODE: SPECIFY SPM MODEL (GENERATE SPM-SPECIFIC MODEL)
# ======================================================================
# function: makes a model specification compatible with spm designers
# adds SPM specific options to SpecifyModel
l1model = Node(SpecifySPMModel(), name="l1model")
# input: concatenate runs to a single session (boolean, default: False):
l1model.inputs.concatenate_runs = False
# input: units of event onsets and durations (secs or scans):
l1model.inputs.input_units = 'secs'
# input: units of design event onsets and durations (secs or scans):
l1model.inputs.output_units = 'secs'
# input: time of repetition (a float):
l1model.inputs.time_repetition = time_repetition
# high-pass filter cutoff in secs (a float, default = 128 secs):
l1model.inputs.high_pass_filter_cutoff = 128
# ======================================================================
# DEFINE NODE: LEVEL 1 DESIGN (GENERATE AN SPM DESIGN MATRIX)
# ======================================================================
# function: generate an SPM design matrix
l1design = Node(Level1Design(), name="l1design")
def spm_model_specification(behavioral_data, fmri_sessions, onset_name,
                            condition_name, duration_name, time_repetition,
                            realignment_parameters, delimiter, start,
                            concatenate_runs, high_pass_filter_cutoff):
    """ Specify the SPM model used in the GLM and estimate the design matrix.

    .. note::

        * `fmri_sessions` and `behavioral_data` must have the same number
          of elements.
        * `onsets` and `durations` values must have the same units as the
          TR used in the processings (ie. seconds).

    <process>
        <return name="session_info" type="List" desc="session info to leverage
            the first level design."/>
        <input name="behavioral_data" type="List_File" desc="list of .csv
            session behavioral data." />
        <input name="fmri_sessions" type="List_File" desc="list of path to
            fMRI sessions." />
        <input name="onset_name" type="String" desc="the name of the column
            in the `behavioral_data` file containing the onsets."/>
        <input name="condition_name" type="String" desc="the name of the
            column in the `behavioral_data` file containing the conditions."/>
        <input name="duration_name" type="String" desc="the name of the column
            in the `behavioral_data` file containing the condition durations."/>
        <input name="time_repetition" type="Float" desc="the repetition time
            in seconds (in seconds)."/>
        <input name="realignment_parameters" type="File" desc="path to the SPM
            realign output parameters."/>
        <input name="delimiter" type="String" desc="separator used to split
            the `behavioral_data` file."/>
        <input name="start" type="Int" desc="line from which we start reading
            the `behavioral_data` file."/>
        <input name="concatenate_runs" type="Bool" desc="concatenate all runs
            to look like a single session."/>
        <input name="high_pass_filter_cutoff" type="Float" desc="high-pass
            filter cutoff in secs."/>
    </process>
    """
    # Local imports
    from nipype.interfaces.base import Bunch
    from nipype.algorithms.modelgen import SpecifySPMModel

    # Assert that we have one behavioral data per session
    if len(behavioral_data) != len(fmri_sessions):
        raise ValueError("One behavioral data per session is required, "
                         "got {0} behaviral data and {1} session.".format(
                             len(behavioral_data), len(fmri_sessions)))

    # Get each session acquisition conditions
    info = []
    for csvfile in behavioral_data:

        # Parse the behavioural file
        all_onsets = get_onsets(csvfile, condition_name, onset_name,
                                duration_name, delimiter, start)

        # Create a nipype Bunch (dictionary-like) structure
        conditions = []
        onsets = []
        durations = []
        for condition_name, item in all_onsets.items():
            conditions.append(condition_name)
            onsets.append([float(x) for x in item["onsets"]])
            durations.append([float(x) for x in item["durations"]])
        info.append(
            Bunch(conditions=conditions, onsets=onsets, durations=durations))

    # Make a model specification compatible with spm designer
    spec_interface = SpecifySPMModel(
        concatenate_runs=concatenate_runs,
        input_units="secs",
        output_units="secs",
        time_repetition=time_repetition,
        high_pass_filter_cutoff=high_pass_filter_cutoff,
        functional_runs=fmri_sessions,
        subject_info=info,
        realignment_parameters=realignment_parameters)
    spec_interface.run()

    # The previous interface use numpy in double precision. In order to be
    # python-json compliant need to cast expicitely all float items
    def cast_to_float(obj):
        """ Recursive method that cast numpy.double items.

        Parameters
        ----------
        obj: object
            a generic python object.

        Returns
        -------
        out: object
            the float-casted input object.
        """
        # Deal with dictionary
        if isinstance(obj, dict):
            out = {}
            for key, val in obj.items():
                out[key] = cast_to_float(val)

        # Deal with tuple and list
        elif isinstance(obj, (list, tuple)):
            out = []
            for val in obj:
                out.append(cast_to_float(val))
            if isinstance(obj, tuple):
                out = tuple(out)

        # Otherwise cast if it is a numpy.double
        else:
            out = obj
            if isinstance(obj, float):
                out = float(obj)

        return out

    session_info = cast_to_float(
        spec_interface.aggregate_outputs().get()["session_info"])

    return session_info
def batch_paramatric_GLM(nii_root_dir, sub_num_list, total_session_num,
                         all_sub_dataframe, params_name, contrast_list,
                         cache_folder, result_folder, parallel_cores):

    from nipype import Node, Workflow, Function
    from nipype.interfaces.spm import Level1Design, EstimateModel, EstimateContrast
    from nipype.algorithms.modelgen import SpecifySPMModel
    from nipype.interfaces.utility import IdentityInterface
    from nipype import DataSink

    # Define the helper functions

    def nii_selector(root_dir,
                     sub_num,
                     session_num,
                     all_sub_dataframe,
                     data_type="Smooth_8mm"):
        import os
        import glob
        session_list = ["session" + str(i) for i in range(1, session_num + 1)]
        sub_name = "sub" + str(sub_num)
        # print(file_path)
        nii_list = []
        for s in session_list:
            file_path = os.path.join(root_dir, sub_name, data_type, s)
            nii_list.append(glob.glob(file_path + "/*.nii"))
        single_sub_data = all_sub_dataframe[all_sub_dataframe.Subject_num ==
                                            sub_num]
        return (nii_list, single_sub_data, sub_name)

    def condition_generator(single_sub_data, params_name, duration=2):
        from nipype.interfaces.base import Bunch
        run_num = set(single_sub_data.run)
        subject_info = []
        for i in run_num:
            tmp_table = single_sub_data[single_sub_data.run == i]
            tmp_onset = tmp_table.onset.values.tolist()

            pmod_names = []
            pmod_params = []
            pmod_poly = []
            for param in params_name:
                pmod_params.append(tmp_table[param].values.tolist())
                pmod_names.append(param)
                pmod_poly.append(1)

            tmp_Bunch = Bunch(conditions=["trial_onset_run" + str(i)],
                              onsets=[tmp_onset],
                              durations=[[duration]],
                              pmod=[
                                  Bunch(name=pmod_names,
                                        poly=pmod_poly,
                                        param=pmod_params)
                              ])
            subject_info.append(tmp_Bunch)

        return subject_info

    # Define each Nodes in the workflow

    NiiSelector = Node(Function(
        input_names=[
            "root_dir", "sub_num", "session_num", "all_sub_dataframe",
            "data_type"
        ],
        output_names=["nii_list", "single_sub_data", "sub_name"],
        function=nii_selector),
                       name="NiiSelector")

    ConditionGenerator = Node(Function(
        input_names=["single_sub_data", "params_name", "duration"],
        output_names=["subject_info"],
        function=condition_generator),
                              name="ConditionGenerator")

    glm_input = Node(IdentityInterface(
        fields=['nii_list', 'single_sub_data', 'params_name', 'contrast_list'],
        mandatory_inputs=True),
                     name="glm_input")

    # SpecifyModel - Generates SPM-specific Model
    modelspec = Node(SpecifySPMModel(concatenate_runs=False,
                                     input_units='scans',
                                     output_units='scans',
                                     time_repetition=2,
                                     high_pass_filter_cutoff=128),
                     name="modelspec")

    # Level1Design - Generates an SPM design matrix
    level1design = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                     timing_units='scans',
                                     interscan_interval=2),
                        name="level1design")

    level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                          name="level1estimate")

    level1conest = Node(EstimateContrast(), name="level1conest")

    OutputNode = Node(DataSink(), name="OutputNode")

    # Define the attributes of those nodes

    NiiSelector.inputs.root_dir = nii_root_dir
    NiiSelector.iterables = ("sub_num", sub_num_list)
    NiiSelector.inputs.session_num = total_session_num
    NiiSelector.inputs.data_type = "Smooth_8mm"
    NiiSelector.inputs.all_sub_dataframe = all_sub_dataframe

    glm_input.inputs.params_name = params_name
    glm_input.inputs.contrast_list = contrast_list

    OutputNode.inputs.base_directory = result_folder

    # Define the workflows

    single_sub_GLM_wf = Workflow(name='single_sub_GLM_wf')
    single_sub_GLM_wf.connect([
        (glm_input, ConditionGenerator, [('single_sub_data',
                                          'single_sub_data'),
                                         ('params_name', 'params_name')]),
        (glm_input, modelspec, [('nii_list', 'functional_runs')]),
        (glm_input, level1conest, [('contrast_list', 'contrasts')]),
        (ConditionGenerator, modelspec, [('subject_info', 'subject_info')]),
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
        (level1estimate, level1conest, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')])
    ])

    batch_GLM_wf = Workflow(name="batch_GLM_wf", base_dir=cache_folder)
    batch_GLM_wf.connect([(NiiSelector, single_sub_GLM_wf, [
        ('nii_list', 'glm_input.nii_list'),
        ('single_sub_data', 'glm_input.single_sub_data')
    ]), (NiiSelector, OutputNode, [('sub_name', 'container')]),
                          (single_sub_GLM_wf, OutputNode,
                           [('level1conest.spm_mat_file', '1stLevel.@spm_mat'),
                            ('level1conest.spmT_images', '1stLevel.@T'),
                            ('level1conest.con_images', '1stLevel.@con'),
                            ('level1conest.spmF_images', '1stLevel.@F'),
                            ('level1conest.ess_images', '1stLevel.@ess')])])

    # Excute the workflow
    batch_GLM_wf.run(plugin='MultiProc',
                     plugin_args={'n_procs': parallel_cores})
Example #17
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename2)
    # Test case when only one duration is passed, as being the same for all onsets.
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [
        Bunch(conditions=['cond1'],
              onsets=[[2, 50, 100, 170]],
              durations=[[1]]),
        Bunch(conditions=['cond1'],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(
        res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array(
            [1., 1., 1., 1., 1., 1., 1., 1.])
    # Test case of scans as output units instead of seconds
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6
    # Test case for no concatenation with seconds as output units
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0])
    # Test case for variable number of events in separate runs, sometimes unique.
    filename3 = os.path.join(tempdir, 'test3.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename3)
    s.inputs.functional_runs = [filename1, filename2, filename3]
    info = [
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2, 4]],
              durations=[[1, 1], [1, 1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][1]['duration']), np.array([
            1.,
        ])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[1]['cond'][1]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[2]['cond'][1]['duration']), np.array([
            1.,
        ])
    # Test case for variable number of events in concatenated runs, sometimes unique.
    s.inputs.concatenate_runs = True
    info = [
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2, 4]],
              durations=[[1, 1], [1, 1]]),
        Bunch(conditions=['cond1', 'cond2'],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['duration']), np.array(
            [1., 1., 1., 1., 1., 1.])
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][1]['duration']), np.array(
            [1., 1., 1., 1.])
    rmtree(tempdir)
Example #18
0
def combine_wkflw(c,prep_c=foo, name='work_dir'):
    import nipype.interfaces.utility as util    # utility
    import nipype.pipeline.engine as pe         # pypeline engine
    import nipype.interfaces.io as nio          # input/output
    from nipype.algorithms.modelgen import SpecifySPMModel, SpecifySparseModel
    import numpy as np
    modelflow = pe.Workflow(name=name)
    modelflow.base_dir = os.path.join(c.working_dir)
    
    preproc = c.datagrabber.create_dataflow()#preproc_datagrabber(prep_c)
    
    #infosource = pe.Node(util.IdentityInterface(fields=['subject_id']),
    #                     name='subject_names')

    #if c.test_mode:
    #    infosource.iterables = ('subject_id', [c.subjects[0]])
    #else:
    #    infosource.iterables = ('subject_id', c.subjects)

    infosource = preproc.get_node('subject_id_iterable')
    #modelflow.connect(infosource,'subject_id',preproc,'subject_id')
    #preproc.iterables = ('fwhm', prep_c.fwhm)

    subjectinfo = pe.Node(util.Function(input_names=['subject_id'], output_names=['output']), name='subjectinfo')
    subjectinfo.inputs.function_str = c.subjectinfo

    def getsubs(subject_id,cons,info,fwhm):
        #from config import getcontrasts, get_run_numbers, subjectinfo, fwhm
        subs = [('_subject_id_%s/'%subject_id,''),
                ('_plot_type_',''),
                ('_fwhm','fwhm'),
                ('_dtype_mcf_mask_mean','_mean'),
                ('_dtype_mcf_mask_smooth_mask_gms_tempfilt','_smoothed_preprocessed'),
                ('_dtype_mcf_mask_gms_tempfilt','_unsmoothed_preprocessed'),
                ('_dtype_mcf','_mcf')]
        
        for i in range(4):
            subs.append(('_plot_motion%d'%i, ''))
            subs.append(('_highpass%d/'%i, ''))
            subs.append(('_realign%d/'%i, ''))
            subs.append(('_meanfunc2%d/'%i, ''))
        runs = range(len(info))
        for i, run in enumerate(runs):
            subs.append(('_modelestimate%d/'%i, '_run_%d_%02d_'%(i,run)))
            subs.append(('_modelgen%d/'%i, '_run_%d_%02d_'%(i,run)))
            subs.append(('_conestimate%d/'%i,'_run_%d_%02d_'%(i,run)))
        for i, con in enumerate(cons):
            subs.append(('cope%d.'%(i+1), 'cope%02d_%s.'%(i+1,con[0])))
            subs.append(('varcope%d.'%(i+1), 'varcope%02d_%s.'%(i+1,con[0])))
            subs.append(('zstat%d.'%(i+1), 'zstat%02d_%s.'%(i+1,con[0])))
            subs.append(('tstat%d.'%(i+1), 'tstat%02d_%s.'%(i+1,con[0])))
        """for i, name in enumerate(info[0].conditions):
            subs.append(('pe%d.'%(i+1), 'pe%02d_%s.'%(i+1,name)))
        for i in range(len(info[0].conditions), 256):
            subs.append(('pe%d.'%(i+1), 'others/pe%02d.'%(i+1)))"""
        for i in fwhm:
            subs.append(('_register%d/'%(i),''))

        for j in range(0,20):
            subs.append(('_convert%d'%j,''))        

        return subs

    get_substitutions = pe.Node(util.Function(input_names=['subject_id',
                                                           'cons',
                                                           'info',
                                                           'fwhm'],
        output_names=['subs'], function=getsubs), name='getsubs')

    # create a node to create the subject info
    if not c.is_sparse:
        s = pe.Node(SpecifySPMModel(),name='s')
    else:
        s = pe.Node(SpecifySparseModel(model_hrf=c.model_hrf,
                                       stimuli_as_impulses=c.stimuli_as_impulses,
                                       use_temporal_deriv=c.use_temporal_deriv,
                                       volumes_in_cluster=c.volumes_in_cluster, 
                                       scan_onset=c.scan_onset,scale_regressors=c.scale_regressors),
            name='s')
        s.inputs.time_acquisition = c.ta
    s.inputs.input_units =                              c.input_units
    s.inputs.output_units = 'secs'
    s.inputs.time_repetition =                          c.tr
    if c.hpcutoff < 0:
        c.hpcutoff = np.inf
    s.inputs.high_pass_filter_cutoff =                  c.hpcutoff
    #s.inputs.concatenate_runs = False
    #subjinfo =                                          subjectinfo(subj)
    
    
    # create a node to add the traditional (MCFLIRT-derived) motion regressors to 
    # the subject info
    trad_motn = pe.Node(util.Function(input_names=['subinfo',
                                                   'files'],
                                      output_names=['subinfo'],
                                      function=trad_mot),
                        name='trad_motn')

    
    modelflow.connect(infosource,'subject_id', subjectinfo, 'subject_id')
    modelflow.connect(subjectinfo, 'output', trad_motn, 'subinfo')


    # create a node to add the principle components of the noise regressors to 
    # the subject info
    noise_motn = pe.Node(util.Function(input_names=['subinfo',
                                                    'files',
                                                    'num_noise_components',
                                                    "use_compcor"],
                                       output_names=['subinfo'],
                                       function=noise_mot),
                         name='noise_motn')
    noise_motn.inputs.use_compcor = c.use_compcor
    # generate first level analysis workflow
    modelfit =                                          create_first()
    modelfit.inputs.inputspec.interscan_interval =      c.interscan_interval
    modelfit.inputs.inputspec.estimation_method = {c.estimation_method:''} 
    
    contrasts = pe.Node(util.Function(input_names=['subject_id'], output_names=['contrasts']), name='getcontrasts')
    contrasts.inputs.function_str = c.contrasts

    modelflow.connect(infosource,'subject_id', 
                     contrasts, 'subject_id')
    modelflow.connect(contrasts,'contrasts', modelfit, 'inputspec.contrasts')
    
    modelfit.inputs.inputspec.bases =                   c.bases
    modelfit.inputs.inputspec.model_serial_correlations = 'AR(1)'
    noise_motn.inputs.num_noise_components =           prep_c.num_noise_components
    
    # make a data sink
    sinkd = pe.Node(nio.DataSink(), name='sinkd')
    sinkd.inputs.base_directory = os.path.join(c.sink_dir)
        
    modelflow.connect(infosource, 'subject_id', sinkd, 'container')
    #modelflow.connect(infosource, ('subject_id',getsubs, getcontrasts, subjectinfo, prep_c.fwhm), sinkd, 'substitutions')
    modelflow.connect(infosource, 'subject_id', get_substitutions, 'subject_id')
    modelflow.connect(contrasts, 'contrasts', get_substitutions, 'cons')
    modelflow.connect(subjectinfo,'output',get_substitutions,'info')
    get_substitutions.inputs.fwhm = prep_c.fwhm
    modelflow.connect(get_substitutions,'subs', sinkd, 'substitutions')


    sinkd.inputs.regexp_substitutions = [('mask/fwhm_%d/_threshold([0-9]*)/.*nii'%x,'mask/fwhm_%d/funcmask.nii'%x) for x in prep_c.fwhm]
    sinkd.inputs.regexp_substitutions.append(('realigned/fwhm_([0-9])/_copy_geom([0-9]*)/','realigned/'))
    sinkd.inputs.regexp_substitutions.append(('motion/fwhm_([0-9])/','motion/'))
    sinkd.inputs.regexp_substitutions.append(('bbreg/fwhm_([0-9])/','bbreg/'))

    def gunzipper(in_files):
        from nipype.algorithms.misc import Gunzip
        if isinstance(in_files,list):
            outputs = []
            for i in in_files:
                if i.endswith('.gz'):
                    res = Gunzip(in_file=i).run()
                    outputs.append(res.outputs.out_file)
                else: outputs.append(i)
        else:
            if in_files.endswith('.gz'):
                res = Gunzip(in_file=in_files).run()
                outputs = res.outputs.out_file  
            else: outputs = in_files
        return outputs      

    gunzip1 = pe.Node(util.Function(input_names=['in_files'],output_names=['outputs'],function=gunzipper),name='gunzipper')
    gunzip2 = gunzip1.clone('gunzipper2')     
    # make connections
    modelflow.connect(preproc, 'datagrabber.motion_parameters',      trad_motn,  'files')
    modelflow.connect(preproc, 'datagrabber.noise_components',       noise_motn, 'files')
    modelflow.connect(preproc, 'datagrabber.highpassed_files', gunzip2, "in_files")
    modelflow.connect(gunzip2,'outputs', s, 'functional_runs')

    modelflow.connect(preproc, 'datagrabber.outlier_files',          s,          'outlier_files')
    modelflow.connect(trad_motn,'subinfo',                          noise_motn, 'subinfo')
    modelflow.connect(noise_motn,'subinfo',                         s,          'subject_info')
    modelflow.connect(s,'session_info',                             modelfit,   'inputspec.session_info')

    modelflow.connect(preproc, 'datagrabber.mask',gunzip1,'in_files') 
    modelflow.connect(gunzip1, 'outputs', modelfit, 'inputspec.mask')

    modelflow.connect(modelfit, 'outputspec.spm_mat_file',   sinkd,      'modelfit_spm.contrasts.@spm_mat')
    modelflow.connect(modelfit, 'outputspec.residual_image',              sinkd,      'modelfit_spm.design.@residual')
    modelflow.connect(modelfit, 'outputspec.con_images',                 sinkd,      'modelfit_spm.contrasts.@cons')
    modelflow.connect(modelfit, 'outputspec.ess_images',              sinkd,      'modelfit_spm.contrasts.@ess')
    modelflow.connect(modelfit, 'outputspec.spmF_images',                sinkd,      'modelfit_spm.contrasts.@spmF')
    modelflow.connect(modelfit, 'outputspec.spmT_images',                sinkd,      'modelfit_spm.contrasts.@spmT')
    modelflow.connect(modelfit, 'outputspec.RPVimage',              sinkd,      'modelfit_spm.design.@rpv')
    modelflow.connect(modelfit, 'outputspec.beta_images',           sinkd,      'modelfit_spm.design.@beta')
    modelflow.connect(modelfit, 'outputspec.mask_image',            sinkd,      'modelfit_spm.design.@mask')
    
    return modelflow
Example #19
0
def first_level(TR,
                contrast_list,
                subject_list,
                experiment_dir,
                output_dir,
                subjectinfo_func,
                working_dir='workingdir'):
    """define first level model"""
    # SpecifyModel - Generates SPM-specific Model
    modelspec = Node(SpecifySPMModel(concatenate_runs=False,
                                     input_units='secs',
                                     output_units='secs',
                                     time_repetition=TR,
                                     high_pass_filter_cutoff=128),
                     name="modelspec")

    # Level1Design - Generates an SPM design matrix
    level1design = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                     timing_units='secs',
                                     interscan_interval=TR,
                                     model_serial_correlations='FAST'),
                        name="level1design")

    # EstimateModel - estimate the parameters of the model
    level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                          name="level1estimate")

    # EstimateContrast - estimates contrasts
    level1conest = Node(EstimateContrast(), name="level1conest")

    # Get Subject Info - get subject specific condition information
    getsubjectinfo = Node(Function(input_names=['subject_id'],
                                   output_names=['subject_info'],
                                   function=subjectinfo_func),
                          name='getsubjectinfo')

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'contrasts'],
                                        contrasts=contrast_list),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    smooth_dir = opj(experiment_dir, 'smooth_nomask', 'preproc')
    templates = {
        'func': opj(smooth_dir, 'sub-{subject_id}', '*run-*_fwhm-8_bold.nii')
    }

    selectfiles = Node(SelectFiles(templates,
                                   base_directory=experiment_dir,
                                   sort_filelist=True),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-')]
    datasink.inputs.substitutions = substitutions

    # Initiation of the 1st-level analysis workflow
    l1analysis = Workflow(name='l1analysis')
    l1analysis.base_dir = opj(experiment_dir, working_dir)

    # Connect up the 1st-level analysis components
    l1analysis.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id')]),
        (infosource, getsubjectinfo, [('subject_id', 'subject_id')]),
        (getsubjectinfo, modelspec, [('subject_info', 'subject_info')]),
        (infosource, level1conest, [('contrasts', 'contrasts')]),
        (selectfiles, modelspec, [('func', 'functional_runs')]),
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
        (level1estimate, level1conest, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')]),
        (level1conest, datasink, [
            ('spm_mat_file', '1stLevel.@spm_mat'),
            ('spmT_images', '1stLevel.@T'),
            ('con_images', '1stLevel.@con'),
            ('spmF_images', '1stLevel.@F'),
            ('ess_images', '1stLevel.@ess'),
        ]),
    ])
    return l1analysis
Example #20
0
def test_modelgen_spm_concat(tmpdir):
    filename1 = tmpdir.join("test1.nii").strpath
    filename2 = tmpdir.join("test2.nii").strpath
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename2)

    # Test case when only one duration is passed, as being the same for all onsets.
    s = SpecifySPMModel()
    s.inputs.input_units = "secs"
    s.inputs.concatenate_runs = True
    setattr(s.inputs, "output_units", "secs")
    assert s.inputs.output_units == "secs"
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.0
    info = [
        Bunch(conditions=["cond1"],
              onsets=[[2, 50, 100, 170]],
              durations=[[1]]),
        Bunch(conditions=["cond1"],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]]),
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    assert len(res.outputs.session_info) == 1
    assert len(res.outputs.session_info[0]["regress"]) == 1
    assert np.sum(res.outputs.session_info[0]["regress"][0]["val"]) == 30
    assert len(res.outputs.session_info[0]["cond"]) == 1
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][0]["onset"]),
        np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]),
    )
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][0]["duration"]),
        np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )

    # Test case of scans as output units instead of seconds
    setattr(s.inputs, "output_units", "scans")
    assert s.inputs.output_units == "scans"
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][0]["onset"]),
        np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6,
    )

    # Test case for no concatenation with seconds as output units
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = "secs"
    res = s.run()
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][0]["onset"]),
        np.array([2.0, 50.0, 100.0, 170.0]),
    )

    # Test case for variable number of events in separate runs, sometimes unique.
    filename3 = tmpdir.join("test3.nii").strpath
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename3)
    s.inputs.functional_runs = [filename1, filename2, filename3]
    info = [
        Bunch(conditions=["cond1", "cond2"],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(
            conditions=["cond1", "cond2"],
            onsets=[[2, 3], [2, 4]],
            durations=[[1, 1], [1, 1]],
        ),
        Bunch(conditions=["cond1", "cond2"],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][0]["duration"]),
        np.array([1.0, 1.0]),
    )
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][1]["duration"]),
        np.array([1.0]))
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[1]["cond"][1]["duration"]),
        np.array([1.0, 1.0]),
    )
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[2]["cond"][1]["duration"]),
        np.array([1.0]))

    # Test case for variable number of events in concatenated runs, sometimes unique.
    s.inputs.concatenate_runs = True
    info = [
        Bunch(conditions=["cond1", "cond2"],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
        Bunch(
            conditions=["cond1", "cond2"],
            onsets=[[2, 3], [2, 4]],
            durations=[[1, 1], [1, 1]],
        ),
        Bunch(conditions=["cond1", "cond2"],
              onsets=[[2, 3], [2]],
              durations=[[1, 1], [1]]),
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][0]["duration"]),
        np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )
    npt.assert_almost_equal(
        np.array(res.outputs.session_info[0]["cond"][1]["duration"]),
        np.array([1.0, 1.0, 1.0, 1.0]),
    )
Example #21
0
# define the fsl output type:
trim.inputs.output_type = 'NIFTI'
# set expected thread and memory usage for the node:
trim.interface.num_threads = 1
trim.interface.estimated_memory_gb = 3

# ======================================================================
# DEFINE NODE: SPECIFY SPM MODEL (GENERATE SPM-SPECIFIC MODEL)
# ======================================================================
# function: makes a model specification compatible with spm designers
# adds SPM specific options to SpecifyModel

# SpecifyModel - Generates SPM-specific Model
modelspec = Node(SpecifySPMModel(concatenate_runs=False,
                                 input_units='secs',
                                 output_units='secs',
                                 time_repetition=TR,
                                 high_pass_filter_cutoff=128),
                 name="modelspec")

l1model = Node(model.SpecifyModel(input_units='secs',
                                  time_repetition=TR,
                                  high_pass_filter_cutoff=128),
               name="l1model")
# input: concatenate runs to a single session (boolean, default: False):
# l1model.inputs.concatenate_runs = False
# input: units of event onsets and durations (secs or scans):
# l1model.inputs.input_units = 'secs'
# input: units of design event onsets and durations (secs or scans):
# l1model.inputs.output_units = 'secs'
# input: time of repetition (a float):