Beispiel #1
0
def test_modelgen_sparse(tmpdir):
    tempdir = str(tmpdir)
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename2)
    s = SpecifySparseModel()
    s.inputs.input_units = 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    info = [Bunch(conditions=['cond1'], onsets=[[0, 50, 100, 180]], durations=[[2]]),
            Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
    s.inputs.subject_info = info
    s.inputs.volumes_in_cluster = 1
    s.inputs.time_acquisition = 2
    s.inputs.high_pass_filter_cutoff = np.inf
    res = s.run()
    assert len(res.outputs.session_info) == 2
    assert len(res.outputs.session_info[0]['regress']) == 1
    assert len(res.outputs.session_info[0]['cond']) == 0

    s.inputs.stimuli_as_impulses = False
    res = s.run()
    assert res.outputs.session_info[0]['regress'][0]['val'][0] == 1.0

    s.inputs.model_hrf = True
    res = s.run()
    npt.assert_almost_equal(res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384)
    assert len(res.outputs.session_info[0]['regress']) == 1
    s.inputs.use_temporal_deriv = True
    res = s.run()

    assert len(res.outputs.session_info[0]['regress']) == 2
    npt.assert_almost_equal(res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384)
    npt.assert_almost_equal(res.outputs.session_info[1]['regress'][1]['val'][5], 0.007671459162258378)
    def condition_generator(single_sub_data, params_name, duration=2):
        from nipype.interfaces.base import Bunch
        run_num = set(single_sub_data.run)
        subject_info = []
        for i in run_num:
            tmp_table = single_sub_data[single_sub_data.run == i]
            tmp_onset = tmp_table.onset.values.tolist()

            pmod_names = []
            pmod_params = []
            pmod_poly = []
            for param in params_name:
                pmod_params.append(tmp_table[param].values.tolist())
                pmod_names.append(param)
                pmod_poly.append(1)

            tmp_Bunch = Bunch(conditions=["trial_onset_run" + str(i)],
                              onsets=[tmp_onset],
                              durations=[[duration]],
                              pmod=[
                                  Bunch(name=pmod_names,
                                        poly=pmod_poly,
                                        param=pmod_params)
                              ])
            subject_info.append(tmp_Bunch)

        return subject_info
def make_bunch_and_contrasts(blocked_design_onsets_dicts,
                             n_cycles=20,
                             dur_per_digit=5.12,
                             subtractive_contrast=False):
    """
    Produce subject_info as required input of SpecifyModel (Bunch containing conditions, onsets, durations)
    and contrasts as input for modelfit workflow.

    Subtractive contrasts weights regressors of interest with +4 and all others with -1. in this case, we skip the last
    contrast (because it would be a linear combination of the others).
    Non-subtractive contrast (i.e. one-sample t-test) weights regressor of interest with 1 and all others with 0.
    """

    from nipype.interfaces.base import Bunch
    cycle_dur = dur_per_digit * 5

    # in periodic stimulation runs: onsets are the same for both conditions, just the order of regressors is flipped
    periodic_onsets = [[0 + (digit_idx * dur_per_digit) + (cycle_idx * cycle_dur)
               for cycle_idx in range(n_cycles)]
              for digit_idx in range(5)]
    durations = [[dur_per_digit] * n_cycles for _ in range(5)]
    d1_d5_conditions = ['D_%i' % i for i in range(1, 6)]
    d5_d1_conditions = ['D_%i' % i for i in range(5, 0, -1)]

    # blocked_design conditions and onsets
    blocked1_onsets = blocked_design_onsets_dicts['blocked_design1']
    blocked2_onsets = blocked_design_onsets_dicts['blocked_design2']

    subject_info = [Bunch(conditions=d1_d5_conditions, onsets=periodic_onsets, durations=durations),
                    Bunch(conditions=d5_d1_conditions, onsets=periodic_onsets, durations=durations),
                    Bunch(conditions=d1_d5_conditions, onsets=blocked1_onsets, durations=durations),
                    Bunch(conditions=d1_d5_conditions, onsets=blocked2_onsets, durations=durations)]
    # t-cotrasts
    t_contrasts = []
    for cond_name in d1_d5_conditions:
        if subtractive_contrast:
            if d1_d5_conditions.index(cond_name) == len(cond_name) - 1:
                continue
            else:
                contrast_vector = [-1, -1, -1, -1]
                contrast_vector.insert(d1_d5_conditions.index(cond_name), 4)
                t_contrasts.append(('tcon_%s' % cond_name, 'T', d1_d5_conditions, contrast_vector))
        else:
            contrast_vector = [0, 0, 0, 0]
            contrast_vector.insert(d1_d5_conditions.index(cond_name), 1)
            t_contrasts.append(('tcon_%s' % cond_name, 'T', d1_d5_conditions, contrast_vector))
    # f-contrast over all t-contrasts
    f_contrast = [('All_Digits', 'F', t_contrasts)]
    contrasts = t_contrasts + f_contrast
    n_copes = len(contrasts)
    return subject_info, contrasts
Beispiel #4
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30),
                np.eye(4)).to_filename(filename2)
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [
        Bunch(conditions=['cond1'],
              onsets=[[2, 50, 100, 170]],
              durations=[[1]]),
        Bunch(conditions=['cond1'],
              onsets=[[30, 40, 100, 150]],
              durations=[[1]])
    ]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(
        res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(
        res.outputs.session_info[0]['cond'][0]['onset']), np.array(
            [2.0, 50.0, 100.0, 170.0])
    rmtree(tempdir)
Beispiel #5
0
def get_info(events, confounds):
    info = []
    for s in range(len(SUBJECTS)):
        info.append([])
        for r in range(num_runs):
            event = events[s][r]
            info[s].append([
                Bunch(
                    conditions=conditions,
                    onsets=onsets(event),
                    durations=durations(event),
                    regressors=[
                        list(confounds[s][r].FramewiseDisplacement.fillna(0)),
                        list(confounds[s][r].aCompCor00),
                        list(confounds[s][r].aCompCor01),
                        list(confounds[s][r].aCompCor02),
                        list(confounds[s][r].aCompCor03),
                        list(confounds[s][r].aCompCor04),
                        list(confounds[s][r].aCompCor05),
                        list(confounds[s][r].X),
                        list(confounds[s][r].Y),
                        list(confounds[s][r].Z),
                        list(confounds[s][r].RotX),
                        list(confounds[s][r].RotY),
                        list(confounds[s][r].RotZ)
                    ],
                    regressor_names=[
                        'FramewiseDisplacement', 'aCompCor00', 'aCompCor01',
                        'aCompCor02', 'aCompCor03', 'aCompCor04', 'aCompCor05',
                        'X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ'
                    ])
            ])
    return info
Beispiel #6
0
    def make_pmod(df, conditions, pmods={}, normalize='mean'):

        pmod = []

        for cond in conditions:

            if not pmods.get(cond):
                pmod.append(None)
            else:
                df2 = df[df.trial_type == cond]

                pmod_name = pmods.get(cond)

                #pmod = [pmod] if not type(pmods) is list else pmod

                # MAKE SURE THERE IS VARIANCE IN PMOD VECTOR
                if df2[pmod_name].var() == 0:
                    #df2[pmod_name]+=0.001
                    pmod.append(None)
                    continue

                # APPLY NORMALIZATION
                if normalize == 'mean':
                    df2[pmod_name] = df2[pmod_name] - df2[pmod_name].mean()

                pmod.append(
                    Bunch(name=[pmod_name],
                          param=[df2[pmod_name].values.tolist()],
                          poly=[1]))

        return pmod
Beispiel #7
0
def get_subjectinfo(subject_delay, scan_type, scan_types):
	import pandas as pd
	from copy import deepcopy
	import sys
	sys.path.append('/home/chymera/src/LabbookDB/db/')
	from query import loadSession
	from common_classes import LaserStimulationProtocol
	db_path="~/meta.db"

	session, engine = loadSession(db_path)

	sql_query=session.query(LaserStimulationProtocol).filter(LaserStimulationProtocol.code==scan_types[scan_type])
	mystring = sql_query.statement
	mydf = pd.read_sql_query(mystring,engine)
	delay = int(mydf["stimulation_onset"][0])
	inter_stimulus_duration = int(mydf["inter_stimulus_duration"][0])
	stimulus_duration = mydf["stimulus_duration"][0]
	stimulus_repetitions = mydf["stimulus_repetitions"][0]

	onsets=[]
	names=[]
	for i in range(stimulus_repetitions):
		onset = delay+(inter_stimulus_duration+stimulus_duration)*i
		onsets.append([onset])
		names.append("s"+str(i+1))
	output = []
	for idx_a, a in enumerate(onsets):
		for idx_b, b in enumerate(a):
			onsets[idx_a][idx_b] = round(b-subject_delay, 2) #floating point values don't add up nicely, so we have to round (https://docs.python.org/2/tutorial/floatingpoint.html)
	output.append(Bunch(conditions=names,
					onsets=deepcopy(onsets),
					durations=[[stimulus_duration]]*stimulus_repetitions
					))
	return output
Beispiel #8
0
def gen_info(run_event_files):
    """Generate subject_info structure from a list of event files
    """
    info = []
    for i, event_files in enumerate(run_event_files):
        runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
        for event_file in event_files:
            _, name = os.path.split(event_file)
            if '.run' in name:
                name, _ = name.split('.run%03d' % (i + 1))
            elif '.txt' in name:
                name, _ = name.split('.txt')
            runinfo.conditions.append(name)
            event_info = np.atleast_2d(np.loadtxt(event_file))
            runinfo.onsets.append(event_info[:, 0].tolist())
            if event_info.shape[1] > 1:
                runinfo.durations.append(event_info[:, 1].tolist())
            else:
                runinfo.durations.append([0])
            if event_info.shape[1] > 2:
                runinfo.amplitudes.append(event_info[:, 2].tolist())
            else:
                delattr(runinfo, 'amplitudes')
        info.append(runinfo)
    return info
Beispiel #9
0
    def _glob_fsl_feeds_data(subject_dir):
        """glob data from subject_dir.

        """

        if not os.path.exists(subject_dir):
            return None

        subject_data = {}
        subject_data["subject_dir"] = subject_dir
        for file_name in FSL_FEEDS_DATA_FILES:
            file_path = os.path.join(subject_dir, file_name)
            if os.path.exists(file_path) or os.path.exists(
                    file_path.rstrip(".gz")):
                file_name = re.sub("(?:\.nii\.gz|\.txt)", "", file_name)
                subject_data[file_name] = file_path
            else:
                if not os.path.basename(subject_dir) == 'data':
                    return _glob_fsl_feeds_data(
                        os.path.join(subject_dir, 'feeds/data'))
                else:
                    print "%s missing from filelist!" % file_name
                    return None

        _subject_data = {
            "func": os.path.join(subject_dir, "fmri.nii.gz"),
            "anat": os.path.join(subject_dir, "structural_brain.nii.gz")
        }

        return Bunch(**_subject_data)
Beispiel #10
0
    def _run_interface(self, runtime):
        conditions, onsets, durations = parse_condition_file(
            in_any=self.inputs.in_any)

        if isdefined(self.inputs.condition_names):
            conditions_selected = [
                str(name) for name in self.inputs.condition_names
            ]  # need a traits-free representation for bunch
            onsets_selected, durations_selected = [], []
            for condition_name in conditions_selected:
                if condition_name not in conditions:
                    condition_onsets = []
                    condition_durations = []
                else:
                    i = conditions.index(condition_name)
                    condition_onsets = onsets[i]
                    condition_durations = durations[i]
                onsets_selected.append(condition_onsets)
                durations_selected.append(condition_durations)
            conditions, onsets, durations = conditions_selected, onsets_selected, durations_selected

        self._results["subject_info"] = Bunch(conditions=conditions,
                                              onsets=onsets,
                                              durations=durations)

        return runtime
Beispiel #11
0
def TaskEvents(fileEvent):
    import pandas as pd
    from nipype.interfaces.base import Bunch
    ## Getting experiment info from the event file, into a Bunch object
    trialInfo = pd.read_csv(fileEvent, sep='\t')
    conditions = sorted(list(set(trialInfo.trial_type)))
    onsets = []
    durations = []

    for itrial in conditions:
        onsets.append(list(trialInfo[trialInfo.trial_type == itrial].onset))
        durations.append(
            list(trialInfo[trialInfo.trial_type == itrial].duration))

    subject_info = [
        Bunch(
            conditions=conditions,
            onsets=onsets,
            durations=durations,
        )
    ]

    ## Defining contrasts
    cont01 = ['activation', 'T', conditions, [1]]
    cont02 = ['deactivation', 'T', conditions, [-1]]
    contrast_list = [cont01, cont02]

    return subject_info, contrast_list
    def run(self):
        for fname in self.inputs.spmT_images:
            img = nifti.load(fname)
            data = np.array(img.get_data())

            fdr = ENN(data.ravel())
            th = fdr.threshold(0.05)

            plt.figure()
            ax = plt.subplot(1, 1, 1)
            fdr.plot(mpaxes=ax)
            plt.savefig("histogram.pdf")

            active_map = data > th

            thresholded_map = np.zeros(data.shape)
            thresholded_map[active_map] = data[active_map]

            thresholded_map = np.reshape(thresholded_map, data.shape)

            new_img = nifti.Nifti1Image(thresholded_map, img.get_affine(),
                                        img.get_header())
            nifti.save(new_img, 'thresholded_map.nii')

        runtime = Bunch(returncode=0, messages=None, errmessages=None)
        outputs = None
        return InterfaceResult(deepcopy(self), runtime, outputs=outputs)
Beispiel #13
0
def evt_info(cond_events):
    output = []

    # for each run
    for cond_events0 in cond_events:
        from nipype.interfaces.base import Bunch
        from copy import deepcopy

        run_results = []
        names = ['PreSwitchCurves', 'ResponseCues']
        run_results = Bunch(
            conditions=names,
            onsets=[
                deepcopy(cond_events0['PreSwitchCurves'].time),
                deepcopy(cond_events0['ResponseCues'].time)
            ],
            durations=[
                deepcopy(cond_events0['PreSwitchCurves'].dur),
                deepcopy(cond_events0['ResponseCues'].dur)
            ],
        )

        # onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))]
        # output.insert(r,
        #               Bunch(conditions=names,
        #                     onsets=deepcopy(onsets),
        #                     durations=[[15] for s in names]))

        output.append(run_results)
    return output
Beispiel #14
0
    def _glob_spm_auditory_data(subject_dir):
        """glob data from subject_dir.

        """

        if not os.path.exists(subject_dir):
            return None

        subject_data = {}
        for file_name in SPM_AUDITORY_DATA_FILES:
            file_path = os.path.join(subject_dir, file_name)
            if os.path.exists(file_path):
                subject_data[file_name] = file_path
            else:
                print("%s missing from filelist!" % file_name)
                return None

        _subject_data = {}
        _subject_data["func"] = sorted([
            subject_data[x] for x in subject_data.keys()
            if re.match("^fM00223_0\d\d\.img$", os.path.basename(x))
        ])

        _subject_data["anat"] = [
            subject_data[x] for x in subject_data.keys()
            if re.match("^sM00223_002\.img$", os.path.basename(x))
        ][0]

        return Bunch(**_subject_data)
def TaskEvents(fileEvent):
    import pandas as pd
    from nipype.interfaces.base import Bunch
    ## Getting experiment info from the event file, into a Bunch object
    trialInfo = pd.read_csv(fileEvent, sep='\t')
    conditions = sorted(list(set(trialInfo.Stimulus)))
    onsets = []
    durations = []

    for itrial in conditions:
        onsets.append(list(trialInfo[trialInfo.Stimulus == itrial].onset))
        durations.append(list(
            trialInfo[trialInfo.Stimulus == itrial].duration))

    subject_info = [
        Bunch(
            conditions=conditions,
            onsets=onsets,
            durations=durations,
        )
    ]

    ## Defining contrasts
    cont01 = ['congruent', 'T', conditions, [1, 0]]
    cont02 = ['incongruent', 'T', conditions, [0, 1]]
    cont03 = ['cong>incong', 'T', conditions, [1, -1]]
    cont04 = ['incong>cong', 'T', conditions, [-1, 1]]
    cont05 = ['average', 'T', conditions, [0.5, 0.5]]

    contrast_list = [cont01, cont02, cont03, cont04, cont05]

    return subject_info, contrast_list
Beispiel #16
0
    def __init__(self, datasink, TR, num_vol):
        # specify input and output nodes
        self.datasink = datasink
        self.TR = TR
        self.num_vol = num_vol

        # specify nodes
        # SpecifyModel - Generates SPM-specific Model
        self.modelspec = pe.Node(interface=model.SpecifySPMModel(),
                                 name='model_specification')
        self.modelspec.inputs.input_units = 'secs'
        self.modelspec.inputs.output_units = 'secs'
        self.modelspec.inputs.time_repetition = self.TR
        self.modelspec.inputs.high_pass_filter_cutoff = 128
        subjectinfo = [
            Bunch(conditions=['None'],
                  onsets=[list(range(self.num_vol))],
                  durations=[[0.5]])
        ]
        self.modelspec.inputs.subject_info = subjectinfo

        # Level1Design - Generates an SPM design matrix
        self.level1design = pe.Node(interface=spm.Level1Design(),
                                    name='first_level_design')
        self.level1design.inputs.bases = {'hrf': {'derivs': [1, 1]}}
        self.level1design.inputs.interscan_interval = self.TR
        self.level1design.inputs.timing_units = 'secs'

        # EstimateModel - estimate the parameters of the model
        # method can be 'Classical', 'Bayesian' or 'Bayesian2'
        self.level1estimate = pe.Node(interface=spm.EstimateModel(),
                                      name="first_level_estimate")
        self.level1estimate.inputs.estimation_method = {'Classical': 1}

        self.threshold = pe.Node(interface=spm.Threshold(), name="threshold")
        self.threshold.inputs.contrast_index = 1

        # EstimateContrast - estimates contrasts
        self.contrast_estimate = pe.Node(interface=spm.EstimateContrast(),
                                         name="contrast_estimate")
        cont1 = ('active > rest', 'T', ['None'], [1])
        contrasts = [cont1]
        self.contrast_estimate.inputs.contrasts = contrasts

        # specify workflow instance
        self.workflow = pe.Workflow(name='first_level_analysis_workflow')

        # connect nodes
        self.workflow.connect([
            (self.modelspec, self.level1design, [('session_info',
                                                  'session_info')]),
            (self.level1design, self.level1estimate, [('spm_mat_file',
                                                       'spm_mat_file')]),
            (self.level1estimate, self.contrast_estimate,
             [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'),
              ('residual_image', 'residual_image')]),
            # (self.contrast_estimate, self.threshold, [('spm_mat_file', 'spm_mat_file'), ('spmT_images', 'stat_image')]),
            (self.contrast_estimate, self.datasink,
             [('con_images', 'contrast_img'), ('spmT_images', 'contrast_T')])
        ])
Beispiel #17
0
    def test_cwd(self):

        cmdline = ["pwd"]
        home_dir = op.expanduser("~")
        runtime = Bunch(cwd=home_dir, environ=os.environ)
        out = submission.submit_cmdline(runtime, cmdline)
        assert out.stdout == home_dir + "\n"
def paradigm_info(design_text_file):
    """Create the model design parameters based on a csv design file.
    The expected column titles for the csv are 'name' for the specific task (e.g., 'fp_run1'),'conditions' (e.g.,'highCal'), 'onsets',and 'durations'"""
    from nipype.interfaces.base import Bunch
    import pandas as pd
    try:
        df = pd.DataFrame(pd.read_csv(design_text_file))
        cols = ['onsets', 'durations']
        for col in cols:
            for ix, row in enumerate(df[col]):
                if ',' in row:
                    df.loc[ix, col] = [
                        int(x) for x in str(row).strip('[]').strip().split(',')
                    ]
                else:
                    df.loc[ix, col] = [
                        int(x) for x in str(row).strip('[]').strip().split()
                    ]
        par_info = {}

        for task in sorted(set(df['name'])):
            tmp = df.loc[df['name'] ==
                         task, :]  # limiting to the applicable data
            conditions = (tmp['conditions'].to_list())
            onsets = (tmp['onsets'].to_list())
            durations = (tmp['durations'].to_list())
            par_info[task] = Bunch(conditions=conditions,
                                   onsets=onsets,
                                   durations=durations)

        return par_info
    except:
        # TODO test to see what types of errors may be common (lists in the durations, no column name, etc.)
        print('Error in paradigm info.')
Beispiel #19
0
def tsv2subjectinfo(in_file, exclude=None):

    import pandas as pd
    from nipype.interfaces.base import Bunch
    import numpy as np

    events = pd.read_csv(in_file, sep=str('\t'))

    if exclude is not None:  # not tested
        events.drop(exclude, axis=1, inplace=True)


    conditions = sorted(events['trial_type'].unique())
    onsets = [events['onset'][events['trial_type'] == tt].tolist() for tt in conditions]
    durations = [events['duration'][events['trial_type'] == tt].tolist() for tt in conditions]
    
    if 'weight' in events.columns:
    	amplitudes = [events['weight'][events['trial_type'] == tt].tolist() for tt in conditions]
    else:
    	amplitudes = [np.ones(len(d)) for d in durations]

    bunch = Bunch(conditions=conditions,
                  onsets=onsets,
                  durations=durations,
                  amplitudes=amplitudes)
    
    return bunch
Beispiel #20
0
    def evt_info(cond_events):
        output = []

        # for each run
        for ev in cond_events:
            from nipype.interfaces.base import Bunch
            from copy import deepcopy
            names = []

            for name in ev.keys():
                if ev[name].shape[0] > 0:
                    names.append(name)

            onsets = [
                deepcopy(ev[name].time) if ev[name].shape[0] > 0 else []
                for name in names
            ]
            durations = [
                deepcopy(ev[name].dur) if ev[name].shape[0] > 0 else []
                for name in names
            ]
            amplitudes = [
                deepcopy(ev[name].amplitude) if ev[name].shape[0] > 0 else []
                for name in names
            ]

            run_results = Bunch(
                conditions=names,
                onsets=[deepcopy(ev[name].time) for name in names],
                durations=[deepcopy(ev[name].dur) for name in names],
                amplitudes=[deepcopy(ev[name].amplitude) for name in names])

            output.append(run_results)
        return output
Beispiel #21
0
def test_sc_populate_inputs():
    sc = ra.StimulusCorrelation()
    inputs = Bunch(realignment_parameters=None,
                   intensity_values=None,
                   spm_mat_file=None,
                   concatenated_design=None)
    yield assert_equal, sc.inputs.__dict__.keys(), inputs.__dict__.keys()
Beispiel #22
0
def subjectinfo(meantsfile):
    import numpy as np
    from nipype.interfaces.base import Bunch
    ts = np.loadtxt(meantsfile)
    output = [
        Bunch(regressor_names=['MeanIntensity'], regressors=[ts.tolist()])
    ]
    return output
Beispiel #23
0
    def test_stdout_addition(self):

        cmdline = ["echo", "oh why hello"]
        runtime = Bunch(stdout="hello test\n",
                        cwd=os.getcwd(),
                        environ=os.environ)
        out = submission.submit_cmdline(runtime, cmdline)
        assert out.stdout == "hello test\noh why hello\n"
 def _agg(objekt, runtime):
     outputs = Bunch(tissue_class_map=os.path.join(
         datadir, 'testFASTRPT-tissue_class_map.nii.gz'),
         tissue_class_files=[
             os.path.join(datadir, 'testFASTRPT-tissue_class_files0.nii.gz'),
             os.path.join(datadir, 'testFASTRPT-tissue_class_files1.nii.gz'),
             os.path.join(datadir, 'testFASTRPT-tissue_class_files2.nii.gz')]
     )
     return outputs
Beispiel #25
0
    def test_submit(self):

        tmp_dir = tempfile.gettempdir()
        tmp_file = op.join(tmp_dir, "tmp_{}".format(time.time()))
        cmdline = ["touch", tmp_file]
        runtime = Bunch(cwd=os.getcwd(), environ=os.environ)
        submission.submit_cmdline(runtime, cmdline)
        assert op.exists(tmp_file)
        os.remove(tmp_file)
Beispiel #26
0
def fetch_msdl_atlas(data_dir=None, url=None, resume=True, verbose=0):
    """Download and load the MSDL brain atlas.

    Parameters
    ----------
    data_dir: string, optional
        Path of the data directory. Used to force data storage in a specified
        location. Default: None

    url: string, optional
        Override download URL. Used for test only (or if you setup a mirror of
        the data).

    Returns
    -------
    data: sklearn.datasets.base.Bunch
        Dictionary-like object, the interest attributes are :
        - 'labels': str. Path to csv file containing labels.
        - 'maps': str. path to nifti file containing regions definition.

    References
    ----------
    :Download:
        https://team.inria.fr/parietal/files/2013/05/MSDL_rois.zip

    :Paper to cite:
        `Multi-subject dictionary learning to segment an atlas of brain
        spontaneous activity <http://hal.inria.fr/inria-00588898/en>`_
        Gaël Varoquaux, Alexandre Gramfort, Fabian Pedregosa, Vincent Michel,
        Bertrand Thirion. Information Processing in Medical Imaging, 2011,
        pp. 562-573, Lecture Notes in Computer Science.

    :Other references:
        `Learning and comparing functional connectomes across subjects
        <http://hal.inria.fr/hal-00812911/en>`_.
        Gaël Varoquaux, R.C. Craddock NeuroImage, 2013.

    """
    dataset_name = "msdl_atlas"
    file_names = ['msdl_rois_labels.csv', 'msdl_rois.nii']
    tars = ['MSDL_rois.zip']
    path = "MSDL_rois"  # created by unzipping the above archive.

    paths = [os.path.join(path, fname) for fname in file_names]

    try:
        files = _get_dataset(dataset_name, paths, data_dir=data_dir)
    except IOError:
        if url is None:
            url = 'https://team.inria.fr/parietal/files/2013/05/' + tars[0]
        _fetch_dataset(dataset_name, [url],
                       data_dir=data_dir,
                       resume=resume,
                       verbose=verbose)
        files = _get_dataset(dataset_name, paths, data_dir=data_dir)

    return Bunch(labels=files[0], maps=files[1])
Beispiel #27
0
    def _run_interface(self, runtime):
        conditions, onsets, durations = parse_condition_file(
            in_any=self.inputs.in_any)

        self._results["subject_info"] = Bunch(conditions=conditions,
                                              onsets=onsets,
                                              durations=durations)

        return runtime
Beispiel #28
0
    def _run_interface(self, runtime):
        # parse input files
        cf = ConditionFile(data=self.inputs.in_any)
        conditions = cf.conditions
        onsets = cf.onsets
        durations = cf.durations

        # use only selected conditions
        if isdefined(self.inputs.condition_names):
            conditions, onsets, durations = cf.select(
                self.inputs.condition_names)

        # remove empty or invalid conditions
        filtered_conditions = [
            (condition, onset, duration) for condition, onset, duration in zip(
                conditions, onsets, durations)
            if len(onset) == len(duration) and len(onset) > 0
        ]
        assert len(filtered_conditions) > 0, "No events found"
        conditions, onsets, durations = zip(*filtered_conditions)

        # filter and re-write contrasts based on available conditions
        if isdefined(self.inputs.contrasts):
            contrasts = self.inputs.contrasts
            new_contrasts = list()
            for name, contrast_type, contrast_conditions, contrast_values in contrasts:
                if any(condition not in conditions  # is missing
                       and not isclose(value, 0)  # but is part of the contrast
                       for condition, value in zip(contrast_conditions,
                                                   contrast_values)):
                    continue

                filtered_contrast = [(condition, value)
                                     for condition, value in zip(
                                         contrast_conditions, contrast_values)
                                     if condition in conditions]
                contrast_conditions, contrast_values = map(
                    list, zip(*filtered_contrast))

                new_contrasts.append((
                    name,
                    contrast_type,
                    contrast_conditions,
                    contrast_values,
                ))

            self._results["contrast_names"] = [
                name for name, _, _, _ in new_contrasts
            ]
            self._results["contrasts"] = new_contrasts

        self._results["condition_names"] = list(conditions)
        self._results["subject_info"] = Bunch(conditions=conditions,
                                              onsets=onsets,
                                              durations=durations)

        return runtime
def get_subject_info(subject_id):
    from os.path import join as opj
    path = '~/nipype_tutorial/data/%s' % subject_id
    onset_info = []
    for run in ['01', '02']:
        for cond in ['01', '02', '03', '04']:
            onset_file = opj(path, 'onset_run0%s_cond0%s.txt' % (run, cond))
            with open(onset_file, 'rt') as f:
                for line in f:
                    info = line.strip().split()
                    if info[1] != '0.00':
                        onset_info.append(
                            ['cond0%s' % cond,
                             'run0%s' % run,
                             float(info[0])])
    onset_run1_congruent = []
    onset_run1_incongruent = []
    onset_run2_congruent = []
    onset_run2_incongruent = []

    for info in onset_info:
        if info[1] == 'run001':
            if info[0] == 'cond001' or info[0] == 'cond002':
                onset_run1_congruent.append(info[2])
            elif info[0] == 'cond003' or info[0] == 'cond004':
                onset_run1_incongruent.append(info[2])
        if info[1] == 'run002':
            if info[0] == 'cond001' or info[0] == 'cond002':
                onset_run2_congruent.append(info[2])
            elif info[0] == 'cond003' or info[0] == 'cond004':
                onset_run2_incongruent.append(info[2])

    onset_list = [
        sorted(onset_run1_congruent),
        sorted(onset_run1_incongruent),
        sorted(onset_run2_congruent),
        sorted(onset_run2_incongruent)
    ]

    from nipype.interfaces.base import Bunch
    condition_names = ['congruent', 'incongruent']

    subjectinfo = []
    for r in range(2):
        onsets = [onset_list[r * 2], onset_list[r * 2 + 1]]
        subjectinfo.insert(
            r,
            Bunch(conditions=condition_names,
                  onsets=onsets,
                  durations=[[0], [0]],
                  amplitudes=None,
                  tmod=None,
                  pmod=None,
                  regressor_names=None,
                  regressors=None))
    return subjectinfo
Beispiel #30
0
    def test_runtime_error(self):

        tmp_dir = tempfile.gettempdir()
        tmp_file = op.join(tmp_dir, "i_am_not_a_file")
        cmdline = ["cat", tmp_file]
        runtime = Bunch(cwd=os.getcwd(), environ=os.environ)
        with nt.assert_raises(RuntimeError):
            out = submission.submit_cmdline(runtime, cmdline)
            error = "cat: {}: No such file or directory\n".format(tmp_file)
            assert out.stderr == error