Пример #1
0
def weird_convert_dti_dcm(in_dcm):

    import os
    import numpy as np
    import re

    subjid = re.search('R[0-9X]+', in_dcm).group()
    year = re.search('_201[1234]', in_dcm).group()[1:]
    visit_dict = {'2012': 1, '2013': 2, '2014': 3, '2011': 4}
    visit = visit_dict[year]
    scanid = re.search('S[0-9]+', in_dcm).group()
    ton_dir = '/data1/cooked/TONf'
    test_fn = os.path.join(ton_dir, subjid, 'visit_{}'.format(visit), 'DTI',
                           '_'.join([subjid, 'visit', str(visit), 'DTI',
                                     scanid])) + '.bvals'
    if os.path.exists(test_fn):
        assert np.all(np.loadtxt(test_fn) != 0)
    converter = Dcm2nii()
    converter.inputs.source_names = in_dcm
    converter.inputs.gzip_output = True
    converter.inputs.output_dir = os.getcwd()

    converter.run()

    merger = Merge()
    merger.inputs.in_files = converter.output_files
    merger.inputs.dimension = 't'
    merged_result = merger.run()
    fn_base = os.path.basename(in_dcm).split('.')[0]

    merged_file = os.path.join(os.getcwd(), fn_base + '.nii.gz')
    os.rename(merged_result.outputs.merged_file, merged_file)

    in_bval = converter.bvals[0]
    in_bvec = converter.bvecs[0]
    b0_idx = 0
    assert np.all(np.loadtxt(in_bval) != 0)

    # Load (and transpose!!)
    bvec_arr = np.loadtxt(in_bvec).T
    out_bvec = np.zeros((bvec_arr.shape[0] + 1,
                         bvec_arr.shape[1]))
    out_bvec[:] = np.nan
    out_bvec[b0_idx, :] = 0
    out_bvec[np.where(np.isnan(out_bvec))] = bvec_arr.flatten()

    bval_arr = np.loadtxt(in_bval)
    out_bval = np.zeros((bval_arr.shape[0] + 1,))
    out_bval[:] = np.nan
    out_bval[b0_idx] = 0
    out_bval[np.isnan(out_bval)] = bval_arr

    out_bvec_fn = os.path.join(os.getcwd(), fn_base + '.bvecs')
    np.savetxt(out_bvec_fn, out_bvec, fmt='%.8f')

    out_bval_fn = os.path.join(os.getcwd(), fn_base + '.bvals')

    np.savetxt(out_bval_fn, out_bval, fmt='%.6f')
    return merged_file, out_bvec_fn, out_bval_fn
Пример #2
0
 def __init__(self, in_files=['path'],
              dimension="enumerate(('t','x','y','z','a'))", **options):
     from nipype.interfaces.fsl import Merge
     fu = Merge()
     fu.inputs.in_files = in_files
     fu.inputs.dimension = dimension
     for ef in options:
         setattr(fu.inputs, ef, options[ef])
     self.res = fu.run()
def merge_files(output_dir, subject, file_list):
    merger = Merge()
    merger.inputs.in_files = [os.path.join(output_dir, subject, f) for f in file_list]
    merger.inputs.dimension = 't'
    merger.inputs.output_type = 'NIFTI_GZ'
    merger.inputs.merged_file = os.path.join(output_dir, subject, 'DWI_concat.nii.gz')
    result = merger.run()
    print('Merging complete')
    return None
def getonefile(files, name):
    merger = Merge()
    merger.inputs.in_files = files
    merger.inputs.dimension = "t"
    merger.inputs.tr = 2.00
    merger.inputs.output_type = "NIFTI_GZ"
    merger.inputs.merged_file = "/media/phoenix/SeagateDrive/Dataset/Outputs/Belief_Updating/Higher_level_inputs/%s.nii.gz" % (
        name)
    merger.run()
Пример #5
0
def fslmath_Merge(filelist, output_prefix):
    from nipype.interfaces.fsl import Merge
    import os

    mergeM = Merge()
    mergeM.inputs.in_files = filelist
    mergeM.inputs.dimension = 't'
    mergeM.inputs.output_type = 'NIFTI_GZ'
    print "Merge [" + os.path.basename(filelist[0]) + ".." + os.path.basename(filelist[len(filelist)-1]) + "]:" + mergeM.cmdline
    res = mergeM.run()
    outfile = d2s.move_to_results(res.outputs.merged_file, output_prefix)
    return outfile
Пример #6
0
def cope_merge_wf(subject_id, sink_directory, name='cope_merge_wf'):
    cope_merge_wf = Workflow(name='cope_merge_wf')

    info = dict(
        learning_cope=[['subject_id']],  #dictionary for Datagrabber
        nonlearning_cope=[['subject_id']])

    #node to grab corr and incorr cope files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/frstlvl/model_LSS2')
    datasource.inputs.field_template = dict(
        learning_cope='%s/deriv/learn/*.nii.gz',
        nonlearning_cope='%s/deriv/nonlearn/*.nii.gz')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #node to merge learning trials across all 6 runs
    merge_learning = Node(Merge(), name='merge_learning')
    merge_learning.inputs.dimension = 't'
    merge_learning.inputs.output_type = 'NIFTI_GZ'
    merge_learning.inputs.merged_file = 'cope_learning.nii.gz'
    merge_learning.inputs.tr = 2.00
    cope_merge_wf.connect(datasource, 'learning_cope', merge_learning,
                          'in_files')

    #node to merge nonlearning trials across all 6 runs
    merge_nonlearning = Node(Merge(), name='merge_nonlearning')
    merge_nonlearning.inputs.dimension = 't'
    merge_nonlearning.inputs.output_type = 'NIFTI_GZ'
    merge_nonlearning.inputs.merged_file = 'cope_nonlearning.nii.gz'
    merge_nonlearning.inputs.tr = 2.00
    cope_merge_wf.connect(datasource, 'nonlearning_cope', merge_nonlearning,
                          'in_files')

    #node to output data
    dsink = Node(DataSink(), name='dsink')
    dsink.inputs.base_directory = sink_directory
    dsink.inputs.container = subject_id
    cope_merge_wf.connect(merge_learning, 'merged_file', dsink,
                          'merged.@learning')
    cope_merge_wf.connect(merge_nonlearning, 'merged_file', dsink,
                          'merged.@nonlearning')

    return cope_merge_wf
def coregistration_4D(source_file, ref, out_file=None, spm_path=None):
    '''
    Coregistration with spm + fsl for 4D files.
    Why? Nor SPM, nor fsl are able to do this by default
    :param source_file: path to input 4D file
    :param ref: reference file to co-register the source-file to
    :param out_file: output file
    :param spm_path: path to spm
    :return: path to coregistered file
    '''
    if spm_path is not None:
        mlab.MatlabCommand.set_default_paths(spm_path)
    if spm.SPMCommand().version is None:
        raise Exception('SPM path not set correctly:', spm_path,
                        spm.SPMCommand().version)
    main_dir, source_file_name = os.path.split(source_file)
    if out_file is None:
        out_file = os.path.join(main_dir, 'r' + source_file_name)

    split_folder = os.path.join(main_dir, '4D_split')
    if not os.path.exists(os.path.join(main_dir, '4D_split')):
        os.mkdir(split_folder)
    split = Split(in_file=source_file, dimension='t')
    split.inputs.in_file = source_file
    split.inputs.dimension = 't'
    split.inputs.out_base_name = os.path.join(split_folder, '4D_vol_')
    split.inputs.output_type = 'NIFTI'
    split = split.run()

    split_files = split.outputs.out_files
    index_file = split_files.pop(0)

    coreg = spm.Coregister()
    coreg.inputs.target = ref
    coreg.inputs.source = index_file
    coreg.inputs.apply_to_files = split_files
    coreg = coreg.run()

    merger = Merge()
    merger.inputs.in_files = coreg.outputs.coregistered_files
    merger.inputs.dimension = 't'
    merger.inputs.output_type = 'NIFTI_GZ'
    merger.inputs.merged_file = out_file
    merger = merger.run()

    shutil.rmtree(split_folder)

    return merger.outputs.merged_file
Пример #8
0
def cale(input_dir, output_dir):

    fns = glob(op.join(input_dir, '*.nii.gz'))

    merger = Merge()
    merger.inputs.in_files = fns
    merger.inputs.dimension = 't'
    merger.inputs.merged_file = op.join(output_dir, 'cALE.nii.gz')

    meanimg = MeanImage()
    meanimg.inputs.in_file = op.join(output_dir, 'cALE.nii.gz')
    meanimg.inputs.dimensions = 'T'
    meanimg.inputs.out_file = op.join(output_dir, 'cALE.nii.gz')

    maths = MultiImageMaths()
    maths.inputs.in_file = op.join(output_dir, 'cALE.nii.gz')
    maths.inputs.op_string = '-mul {0}'.format(len(fns))
    maths.inputs.out_file = op.join(output_dir, 'cALE.nii.gz')

    thresh = Threshold()
    thresh.inputs.in_file = op.join(output_dir, 'cALE.nii.gz')
    thresh.inputs.thresh = np.floor(len(fns) / 2)
    thresh.inputs.direction = 'below'
    thresh.inputs.out_file = op.join(
        output_dir, 'cALE_thresh-{0}.nii.gz'.format(np.floor(len(fns) / 2)))
Пример #9
0
def make_full_workflow(session='7TGE', n_fmap=10):
    n_in = Node(IdentityInterface(fields=[
        'T1w',
        'func',
        'fmap',
        'subject',
    ]),
                name='input')

    n_out = Node(IdentityInterface(fields=[
        'func1',
        'func2',
        'filtered1',
        'filtered2',
        'mat_func2struct',
    ]),
                 name='output')

    n_merge = Node(interface=Merge(), name='merge')
    n_merge.inputs.dimension = 't'

    w_preproc = make_workflow(n_fmap)

    w_smooth1 = make_w_smooth('1')
    w_smooth2 = make_w_smooth('2')

    w = Workflow(session)

    w.connect(n_in, 'func', n_merge, 'in_files')
    w.connect(n_merge, 'merged_file', w_preproc, 'input.func')
    w.connect(n_in, 'fmap', w_preproc, 'input.fmap')
    w.connect(w_preproc, 'output.func1', n_out, 'func1')
    w.connect(w_preproc, 'output.func2', n_out, 'func2')
    w.connect(w_preproc, 'output.func1', w_smooth1, 'input.func')
    w.connect(w_preproc, 'output.func2', w_smooth2, 'input.func')
    w.connect(w_smooth1, 'output.func', n_out, 'filtered1')
    w.connect(w_smooth2, 'output.func', n_out, 'filtered2')

    if session.startswith('7T'):
        w_coreg_7T = make_w_coreg_7T()
        w.connect(n_in, 'T1w', w_coreg_7T, 'input.T1w')
        w.connect(w_preproc, 'output.mean', w_coreg_7T, 'input.mean')
        w.connect(w_coreg_7T, 'output.mat_ants', n_out, 'mat_func2struct')

    else:
        w_coreg = make_w_freesurfer2func()
        w_coreg_3T = make_w_coreg_3T_ants()
        """
        w.connect(n_in, 'T1w', w_coreg, 'input.T1w')
        w.connect(n_in, 'subject', w_coreg, 'input.subject')
        w.connect(w_preproc, 'output.mean', w_coreg, 'input.mean')
        """

        w.connect(n_in, 'T1w', w_coreg_3T, 'input.T1w')
        w.connect(w_preproc, 'output.mean', w_coreg_3T, 'input.mean')
        w.connect(w_coreg_3T, 'output.mat_func2struct', n_out,
                  'mat_func2struct')

    return w
Пример #10
0
def merge_regs(out_dir, out_file):
    import subprocess, shlex
    folder_list = [out_dir + folder for folder in os.listdir(out_dir)]
    print folder_list
    file_list = [
        folder + '/' + (os.listdir(folder)[0]) for folder in folder_list
    ]
    merger = Merge()
    merger.inputs.in_files = file_list
    merger.inputs.dimension = 't'
    merger.inputs.merged_file = out_file
    print merger.cmdline
    command = shlex.split(merger.cmdline)
    subprocess.call(command)
Пример #11
0
def make_w_topup():
    n_in = Node(IdentityInterface(fields=[
        'func',  # after motion correction
        'fmap',
        ]), name='input')

    n_out = Node(IdentityInterface(fields=[
        'func',
        ]), name='output')

    n_mean_func = Node(MeanImage(), name='mean_func')

    n_mc_fmap = Node(MCFLIRT(), name='motion_correction_fmap')
    n_mean_fmap = Node(MeanImage(), name='mean_fmap')

    n_list = Node(Merge_list(2), name='list')
    n_merge = Node(Merge(), name='merge')
    n_merge.inputs.dimension = 't'

    n_topup = Node(TOPUP(), name='topup')
    n_topup.inputs.encoding_file = _generate_acqparams()
    n_topup.inputs.subsamp = 1  # slower, but it accounts for odd number of slices

    n_acqparam = Node(function_acq_params, name='acquisition_parameters')

    n_apply = Node(ApplyTOPUP(), name='topup_apply')
    n_apply.inputs.method = 'jac'

    w = Workflow('topup')
    w.connect(n_in, 'fmap', n_mc_fmap, 'in_file')
    w.connect(n_mc_fmap, 'out_file', n_mean_fmap, 'in_file')
    w.connect(n_in, 'func', n_mean_func, 'in_file')
    w.connect(n_mean_func, 'out_file', n_list, 'in1')
    w.connect(n_mean_fmap, 'out_file', n_list, 'in2')
    w.connect(n_list, 'out', n_merge, 'in_files')
    w.connect(n_merge, 'merged_file', n_topup, 'in_file')

    w.connect(n_in, 'func', n_apply, 'in_files')
    w.connect(n_topup, 'out_fieldcoef', n_apply, 'in_topup_fieldcoef')
    w.connect(n_topup, 'out_movpar', n_apply, 'in_topup_movpar')
    w.connect(n_in, 'func', n_acqparam, 'in_file')
    w.connect(n_acqparam, 'encoding_file', n_apply, 'encoding_file')

    w.connect(n_apply, 'out_corrected', n_out, 'func')

    return w
runWithRandomise = True
nperms=10000
# runPair=True
run1Sample=True

if run1Sample:
    for fb in [0,1]:
        x=[]
        for subj in subject_list:
            fbLoc=subjectinfo(subj,fb)
            fname= '/home/jmuraskin/Projects/CCD/CPAC-out/pipeline_CCD_v1/%s_data_/dr_tempreg_maps_files_to_standard_smooth/_scan_feedback_%d/_csf_threshold_0.96/_gm_threshold_0.7/_wm_threshold_0.96/_compcor_ncomponents_5_selector_pc10.linear1.wm0.global0.motion1.quadratic1.gm0.compcor1.csf1/_spatial_map_PNAS_Smith09_rsn10/_fwhm_6/_dr_tempreg_maps_files_smooth_03/temp_reg_map_0003_antswarp_maths.nii.gz' % (subj,fbLoc)
            # fname = '/home/jmuraskin/Projects/CCD/CPAC-out/pipeline_CCD_v1/%s_data_/dr_tempreg_maps_files_to_standard_smooth/_scan_feedback_%d/%s%d.nii.gz' % (fbLoc,subj,t,i)
            x.append(fname)
        subjs = len(x)
        merger = Merge()
        merger.inputs.in_files = x
        merger.inputs.dimension = 't'
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.merged_file = './DMN_merged_%s.nii.gz' % fbNames[fb]
        merger.run()
        #get meanFD values for each subject and add as covariate
        meanFD=zscore(motionTest[motionTest.FB==fbNames[fb]][motionTest.Subject_ID.isin(subject_list)]['meanFD'])
        model = MultipleRegressDesign()
        model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]],['group neg mean', 'T',['reg1'],[-1]]]
        model.inputs.regressors = dict(reg1=list(motionTest[motionTest.FB==fbNames[fb]][motionTest.Subject_ID.isin(subject_list)]['scanorder']-1.5),FD=list(meanFD))
        model.run()

        if runWithRandomise:
            os.mkdir(fbNames[fb])
            randomiseCommand='./randomise_forpython.sh -i %s -o ./%s/fb -D -d design.mat -t design.con -e design.grp -m %s -T -n %d' % ('DMN_merged_%s.nii.gz' % fbNames[fb],fbNames[fb],'/usr/share/fsl/5.0/data/standard/MNI152_T1_3mm_brain_mask.nii.gz',nperms)
Пример #13
0
    readout_vec.shape
    b0_acq_param = np.concatenate((acq_param_ap, acq_param_pa), axis=0)
    b0_acq_param = np.concatenate((b0_acq_param, readout_vec), axis=1)
    len(b0_acq_param)

    np.savetxt(os.path.join(bids_dir,
                            'derivatives/dtiprep/config/acq_param.txt'),
               b0_acq_param,
               fmt='%s')
    print('created acquisition parameters file')

#merge ap and pa nifti files
niftis = np.array(sorted(glob.glob('*acq-71??_dwi.nii.gz')))

from nipype.interfaces.fsl import Merge
merger = Merge(in_files=niftis.tolist(), dimension='t', output_type='NIFTI_GZ')
cmd = merger.cmdline
print(cmd)
os.system(cmd)

output = os.path.abspath(glob.glob('*acq-71??_dwi_merged.nii.gz')[0])
os.rename(
    output,
    os.path.join(os.path.dirname(output), 'sub-%s_AP_PA_dwi.nii.gz' % subject))
print('merged AP and PA into single image')

#split nifti files into separate volumes
from nipype.interfaces.fsl import Split
split = Split(in_file='sub-%s_acq-71AP_dwi.nii.gz' % subject,
              dimension='t',
              out_base_name='ap_vol',
def group_multregress_openfmri(dataset_dir,
                               model_id=None,
                               task_id=None,
                               l1output_dir=None,
                               out_dir=None,
                               no_reversal=False,
                               plugin=None,
                               plugin_args=None,
                               flamemodel='flame1',
                               nonparametric=False,
                               use_spm=False):

    meta_workflow = Workflow(name='mult_regress')
    meta_workflow.base_dir = work_dir
    for task in task_id:
        task_name = get_taskname(dataset_dir, task)
        cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
        regressors_needed, contrasts, groups, subj_list = get_sub_vars(
            dataset_dir, task_name, model_id)
        for idx, contrast in enumerate(contrasts):
            wk = Workflow(name='model_%03d_task_%03d_contrast_%s' %
                          (model_id, task, contrast[0][0]))

            info = Node(util.IdentityInterface(
                fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
                        name='infosource')
            info.inputs.model_id = model_id
            info.inputs.task_id = task
            info.inputs.dataset_dir = dataset_dir

            dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                                  outfields=['copes', 'varcopes']),
                      name='grabber')
            dg.inputs.template = os.path.join(
                l1output_dir,
                'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
            if use_spm:
                dg.inputs.template_args['copes'] = [[
                    'model_id', 'task_id', subj_list, '', 'spm/', '',
                    'cope_id', ''
                ]]
                dg.inputs.template_args['varcopes'] = [[
                    'model_id', 'task_id', subj_list, 'var', 'spm/', 'var',
                    'cope_id', '.gz'
                ]]
            else:
                dg.inputs.template_args['copes'] = [[
                    'model_id', 'task_id', subj_list, '', '', '', 'cope_id',
                    '.gz'
                ]]
                dg.inputs.template_args['varcopes'] = [[
                    'model_id', 'task_id', subj_list, 'var', '', 'var',
                    'cope_id', '.gz'
                ]]
            dg.iterables = ('cope_id', cope_ids)
            dg.inputs.sort_filelist = False

            wk.connect(info, 'model_id', dg, 'model_id')
            wk.connect(info, 'task_id', dg, 'task_id')

            model = Node(MultipleRegressDesign(), name='l2model')
            model.inputs.groups = groups
            model.inputs.contrasts = contrasts[idx]
            model.inputs.regressors = regressors_needed[idx]

            mergecopes = Node(Merge(dimension='t'), name='merge_copes')
            wk.connect(dg, 'copes', mergecopes, 'in_files')

            if flamemodel != 'ols':
                mergevarcopes = Node(Merge(dimension='t'),
                                     name='merge_varcopes')
                wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

            mask_file = fsl.Info.standard_image(
                'MNI152_T1_2mm_brain_mask.nii.gz')
            flame = Node(FLAMEO(), name='flameo')
            flame.inputs.mask_file = mask_file
            flame.inputs.run_mode = flamemodel
            #flame.inputs.infer_outliers = True

            wk.connect(model, 'design_mat', flame, 'design_file')
            wk.connect(model, 'design_con', flame, 't_con_file')
            wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
            if flamemodel != 'ols':
                wk.connect(mergevarcopes, 'merged_file', flame,
                           'var_cope_file')
            wk.connect(model, 'design_grp', flame, 'cov_split_file')

            if nonparametric:
                palm = Node(Function(input_names=[
                    'cope_file', 'design_file', 'contrast_file', 'group_file',
                    'mask_file', 'cluster_threshold'
                ],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                            name='palm')
                palm.inputs.cluster_threshold = 3.09
                palm.inputs.mask_file = mask_file
                palm.plugin_args = {
                    'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G',
                    'overwrite': True
                }
                wk.connect(model, 'design_mat', palm, 'design_file')
                wk.connect(model, 'design_con', palm, 'contrast_file')
                wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
                wk.connect(model, 'design_grp', palm, 'group_file')

            smoothest = Node(SmoothEstimate(), name='smooth_estimate')
            wk.connect(flame, 'zstats', smoothest, 'zstat_file')
            smoothest.inputs.mask_file = mask_file

            cluster = Node(Cluster(), name='cluster')
            wk.connect(smoothest, 'dlh', cluster, 'dlh')
            wk.connect(smoothest, 'volume', cluster, 'volume')
            cluster.inputs.connectivity = 26
            cluster.inputs.threshold = 2.3
            cluster.inputs.pthreshold = 0.05
            cluster.inputs.out_threshold_file = True
            cluster.inputs.out_index_file = True
            cluster.inputs.out_localmax_txt_file = True

            wk.connect(flame, 'zstats', cluster, 'in_file')

            ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                           name='z2pval')
            wk.connect(flame, 'zstats', ztopval, 'in_file')

            sinker = Node(DataSink(), name='sinker')
            sinker.inputs.base_directory = os.path.join(
                out_dir, 'task%03d' % task, contrast[0][0])
            sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                           ('_maths_', '_reversed_')]

            wk.connect(flame, 'zstats', sinker, 'stats')
            wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
            wk.connect(cluster, 'index_file', sinker, 'stats.@index')
            wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')
            if nonparametric:
                wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')

            if not no_reversal:
                zstats_reverse = Node(BinaryMaths(), name='zstats_reverse')
                zstats_reverse.inputs.operation = 'mul'
                zstats_reverse.inputs.operand_value = -1
                wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

                cluster2 = cluster.clone(name='cluster2')
                wk.connect(smoothest, 'dlh', cluster2, 'dlh')
                wk.connect(smoothest, 'volume', cluster2, 'volume')
                wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')

                ztopval2 = ztopval.clone(name='ztopval2')
                wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')

                wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
                wk.connect(cluster2, 'threshold_file', sinker,
                           'stats.@neg_thr')
                wk.connect(cluster2, 'index_file', sinker, 'stats.@neg_index')
                wk.connect(cluster2, 'localmax_txt_file', sinker,
                           'stats.@neg_localmax')
            meta_workflow.add_nodes([wk])
    return meta_workflow
import os 
Пример #16
0
def Couple_Preproc_Pipeline(base_dir=None,
                            output_dir=None,
                            subject_id=None,
                            spm_path=None):
    """ Create a preprocessing workflow for the Couples Conflict Study using nipype

    Args:
        base_dir: path to data folder where raw subject folder is located
        output_dir: path to where key output files should be saved
        subject_id: subject_id (str)
        spm_path: path to spm folder

    Returns:
        workflow: a nipype workflow that can be run
        
    """

    from nipype.interfaces.dcm2nii import Dcm2nii
    from nipype.interfaces.fsl import Merge, TOPUP, ApplyTOPUP
    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as util
    from nipype.interfaces.utility import Merge as Merge_List
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.fsl.maths import UnaryMaths
    from nipype.interfaces.nipy.preprocess import Trim
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces import spm
    from nipype.interfaces.spm import Normalize12
    from nipype.algorithms.misc import Gunzip
    from nipype.interfaces.nipy.preprocess import ComputeMask
    import nipype.interfaces.matlab as mlab
    from nltools.utils import get_resource_path, get_vox_dims, get_n_volumes
    from nltools.interfaces import Plot_Coregistration_Montage, PlotRealignmentParameters, Create_Covariates
    import os
    import glob

    ########################################
    ## Setup Paths and Nodes
    ########################################

    # Specify Paths
    canonical_file = os.path.join(spm_path, 'canonical', 'single_subj_T1.nii')
    template_file = os.path.join(spm_path, 'tpm', 'TPM.nii')

    # Set the way matlab should be called
    mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
    mlab.MatlabCommand.set_default_paths(spm_path)

    # Get File Names for different types of scans.  Parse into separate processing streams
    datasource = Node(interface=nio.DataGrabber(
        infields=['subject_id'], outfields=['struct', 'ap', 'pa']),
                      name='datasource')
    datasource.inputs.base_directory = base_dir
    datasource.inputs.template = '*'
    datasource.inputs.field_template = {
        'struct': '%s/Study*/t1w_32ch_mpr_08mm*',
        'ap': '%s/Study*/distortion_corr_32ch_ap*',
        'pa': '%s/Study*/distortion_corr_32ch_pa*'
    }
    datasource.inputs.template_args = {
        'struct': [['subject_id']],
        'ap': [['subject_id']],
        'pa': [['subject_id']]
    }
    datasource.inputs.subject_id = subject_id
    datasource.inputs.sort_filelist = True

    # iterate over functional scans to define paths
    scan_file_list = glob.glob(
        os.path.join(base_dir, subject_id, 'Study*', '*'))
    func_list = [s for s in scan_file_list if "romcon_ap_32ch_mb8" in s]
    func_list = [s for s in func_list
                 if "SBRef" not in s]  # Exclude sbref for now.
    func_source = Node(interface=util.IdentityInterface(fields=['scan']),
                       name="func_source")
    func_source.iterables = ('scan', func_list)

    # Create Separate Converter Nodes for each different type of file. (dist corr scans need to be done before functional)
    ap_dcm2nii = Node(interface=Dcm2nii(), name='ap_dcm2nii')
    ap_dcm2nii.inputs.gzip_output = True
    ap_dcm2nii.inputs.output_dir = '.'
    ap_dcm2nii.inputs.date_in_filename = False

    pa_dcm2nii = Node(interface=Dcm2nii(), name='pa_dcm2nii')
    pa_dcm2nii.inputs.gzip_output = True
    pa_dcm2nii.inputs.output_dir = '.'
    pa_dcm2nii.inputs.date_in_filename = False

    f_dcm2nii = Node(interface=Dcm2nii(), name='f_dcm2nii')
    f_dcm2nii.inputs.gzip_output = True
    f_dcm2nii.inputs.output_dir = '.'
    f_dcm2nii.inputs.date_in_filename = False

    s_dcm2nii = Node(interface=Dcm2nii(), name='s_dcm2nii')
    s_dcm2nii.inputs.gzip_output = True
    s_dcm2nii.inputs.output_dir = '.'
    s_dcm2nii.inputs.date_in_filename = False

    ########################################
    ## Setup Nodes for distortion correction
    ########################################

    # merge output files into list
    merge_to_file_list = Node(interface=Merge_List(2),
                              infields=['in1', 'in2'],
                              name='merge_to_file_list')

    # fsl merge AP + PA files (depends on direction)
    merger = Node(interface=Merge(dimension='t'), name='merger')
    merger.inputs.output_type = 'NIFTI_GZ'

    # use topup to create distortion correction map
    topup = Node(interface=TOPUP(), name='topup')
    topup.inputs.encoding_file = os.path.join(get_resource_path(),
                                              'epi_params_APPA_MB8.txt')
    topup.inputs.output_type = "NIFTI_GZ"
    topup.inputs.config = 'b02b0.cnf'

    # apply topup to all functional images
    apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
    apply_topup.inputs.in_index = [1]
    apply_topup.inputs.encoding_file = os.path.join(get_resource_path(),
                                                    'epi_params_APPA_MB8.txt')
    apply_topup.inputs.output_type = "NIFTI_GZ"
    apply_topup.inputs.method = 'jac'
    apply_topup.inputs.interp = 'spline'

    # Clear out Zeros from spline interpolation using absolute value.
    abs_maths = Node(interface=UnaryMaths(), name='abs_maths')
    abs_maths.inputs.operation = 'abs'

    ########################################
    ## Preprocessing
    ########################################

    # Trim - remove first 10 TRs
    n_vols = 10
    trim = Node(interface=Trim(), name='trim')
    trim.inputs.begin_index = n_vols

    #Realignment - 6 parameters - realign to first image of very first series.
    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.register_to_mean = True

    #Coregister - 12 parameters
    coregister = Node(interface=spm.Coregister(), name="coregister")
    coregister.inputs.jobtype = 'estwrite'

    #Plot Realignment
    plot_realign = Node(interface=PlotRealignmentParameters(),
                        name="plot_realign")

    #Artifact Detection
    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'SPM'

    # Gunzip - unzip the functional and structural images
    gunzip_struc = Node(Gunzip(), name="gunzip_struc")
    gunzip_func = Node(Gunzip(), name="gunzip_func")

    # Normalize - normalizes functional and structural images to the MNI template
    normalize = Node(interface=Normalize12(jobtype='estwrite',
                                           tpm=template_file),
                     name="normalize")

    #Plot normalization Check
    plot_normalization_check = Node(interface=Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = canonical_file

    #Create Mask
    compute_mask = Node(interface=ComputeMask(), name="compute_mask")
    #remove lower 5% of histogram of mean image
    compute_mask.inputs.m = .05

    #Smooth
    #implicit masking (.im) = 0, dtype = 0
    smooth = Node(interface=spm.Smooth(), name="smooth")
    smooth.inputs.fwhm = 6

    #Create Covariate matrix
    make_cov = Node(interface=Create_Covariates(), name="make_cov")

    # Create a datasink to clean up output files
    datasink = Node(interface=nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = output_dir
    datasink.inputs.container = subject_id

    ########################################
    # Create Workflow
    ########################################

    workflow = Workflow(name='Preprocessed')
    workflow.base_dir = os.path.join(base_dir, subject_id)
    workflow.connect([
        (datasource, ap_dcm2nii, [('ap', 'source_dir')]),
        (datasource, pa_dcm2nii, [('pa', 'source_dir')]),
        (datasource, s_dcm2nii, [('struct', 'source_dir')]),
        (func_source, f_dcm2nii, [('scan', 'source_dir')]),
        (ap_dcm2nii, merge_to_file_list, [('converted_files', 'in1')]),
        (pa_dcm2nii, merge_to_file_list, [('converted_files', 'in2')]),
        (merge_to_file_list, merger, [('out', 'in_files')]),
        (merger, topup, [('merged_file', 'in_file')]),
        (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'),
                              ('out_movpar', 'in_topup_movpar')]),
        (f_dcm2nii, trim, [('converted_files', 'in_file')]),
        (trim, apply_topup, [('out_file', 'in_files')]),
        (apply_topup, abs_maths, [('out_corrected', 'in_file')]),
        (abs_maths, gunzip_func, [('out_file', 'in_file')]),
        (gunzip_func, realign, [('out_file', 'in_files')]),
        (s_dcm2nii, gunzip_struc, [('converted_files', 'in_file')]),
        (gunzip_struc, coregister, [('out_file', 'source')]),
        (coregister, normalize, [('coregistered_source', 'image_to_align')]),
        (realign, coregister, [('mean_image', 'target'),
                               ('realigned_files', 'apply_to_files')]),
        (realign, normalize, [(('mean_image', get_vox_dims),
                               'write_voxel_sizes')]),
        (coregister, normalize, [('coregistered_files', 'apply_to_files')]),
        (normalize, smooth, [('normalized_files', 'in_files')]),
        (realign, compute_mask, [('mean_image', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters'),
                        ('realigned_files', 'realigned_files')]),
        (realign, plot_realign, [('realignment_parameters',
                                  'realignment_parameters')]),
        (normalize, plot_normalization_check, [('normalized_files', 'wra_img')
                                               ]),
        (realign, make_cov, [('realignment_parameters',
                              'realignment_parameters')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (normalize, datasink, [('normalized_files', 'structural.@normalize')]),
        (coregister, datasink, [('coregistered_source', 'structural.@struct')
                                ]),
        (topup, datasink, [('out_fieldcoef', 'distortion.@fieldcoef')]),
        (topup, datasink, [('out_movpar', 'distortion.@movpar')]),
        (smooth, datasink, [('smoothed_files', 'functional.@smooth')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')])
    ])
    return workflow
Пример #17
0
selectfiles = Node(DataGrabber(infields=['subject_path'],
                               outfields=['func', 'motion'],
                               base_directory='/',
                               template='%s/%s',
                               template_args=info,
                               sort_filelist=True),
                   name='selectfiles')

# For merging seed and nuisance mask paths and then distributing them downstream
seed_plus_nuisance = Node(utilMerge(2), name='seed_plus_nuisance')
seed_plus_nuisance.inputs.in2 = nuisance_masks

# 1. Obtain timeseries for seed and nuisance variables
# 1a. Merge all 3D functional images into a single 4D image
merge = Node(Merge(dimension='t', output_type='NIFTI', tr=TR), name='merge')

# 1b. Take mean of all voxels in each roi at each timepoint
ts = MapNode(ImageMeants(), name='ts', iterfield=['mask'])


# 1c. - Merge nuisance ts with motion parameters to create nuisance_regressors.txt.
#     - Take temporal derivatives of nuisance_regressors.txt and append to nuisance_regressors.txt
#       to create nuisance_regressors_tempderiv.txt
#     - Square nuisance_regressors_tempderiv.txt and append to nuisance_regressors_tempderiv.txt,
#       then append seed timeseries in front to create seed_nuisance_regressors.txt
def make_regressors_files(regressors_ts_list, mot_params, func):
    import numpy as np
    import os
    num_timepoints = len(func)
    num_nuisance = len(regressors_ts_list) - 1
Пример #18
0
from nipype.interfaces.fsl import Merge

merger = Merge()
merger.inputs.in_files = ['bold_001.nii.gz', 'bold_002.nii.gz']
merger.inputs.dimension = 't'
merger.inputs.output_type = 'NIFTI_GZ'
merger.inputs.tr = 2.5
merger.merged_file = 'merged'


Пример #19
0
    ConstrainedSphericalDeconvolution,
    DWIBiasCorrect,
    DWIDenoise,
    Generate5tt,
    MRDeGibbs,
    ResponseSD,
)

#: A dictionary that should be imported in the project's settings and included
#: within the *ANALYSIS_INTERFACES* setting.
interfaces = {
    "apply_topup": {ApplyTOPUP().version: ApplyTOPUP},
    "binary_maths": {BinaryMaths().version: BinaryMaths},
    "BET": {BET().version: BET},
    "CAT12 Segmentation": {"12.7": Cat12Segmentation},
    "fslmerge": {Merge().version: Merge},
    "fslreorient2std": {Reorient2Std().version: Reorient2Std},
    "fslroi": {ExtractROI().version: ExtractROI},
    "FAST": {FastWrapper.version: FastWrapper},
    "FLIRT": {FLIRT().version: FLIRT},
    "FNIRT": {FNIRT().version: FNIRT},
    "FSL Anatomical Processing Script": {FslAnat.__version__: FslAnat},
    "mean_image": {MeanImage().version: MeanImage},
    "robustfov": {RobustFOV().version: RobustFOV},
    "ReconAll": {ReconAll().version: ReconAll},
    "SUSAN": {SUSAN().version: SUSAN},
    "topup": {TopupWrapper.version: TopupWrapper},
    "eddy": {Eddy().version: Eddy},
    "denoise": {DWIDenoise().version: DWIDenoise},
    "degibbs": {MRDeGibbs().version: MRDeGibbs},
    "bias_correct": {DWIBiasCorrect().version: DWIBiasCorrect},
Пример #20
0
def group_onesample_openfmri(dataset_dir,
                             model_id=None,
                             task_id=None,
                             l1output_dir=None,
                             out_dir=None,
                             no_reversal=False):

    wk = Workflow(name='one_sample')
    wk.base_dir = os.path.abspath(work_dir)

    info = Node(
        util.IdentityInterface(fields=['model_id', 'task_id', 'dataset_dir']),
        name='infosource')
    info.inputs.model_id = model_id
    info.inputs.task_id = task_id
    info.inputs.dataset_dir = dataset_dir

    num_copes = contrasts_num(model_id, task_id, dataset_dir)

    dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
                          outfields=['copes', 'varcopes']),
              name='grabber')
    dg.inputs.template = os.path.join(
        l1output_dir, 'model%03d/task%03d/*/%scopes/mni/%scope%02d.nii.gz')
    dg.inputs.template_args['copes'] = [[
        'model_id', 'task_id', '', '', 'cope_id'
    ]]
    dg.inputs.template_args['varcopes'] = [[
        'model_id', 'task_id', 'var', 'var', 'cope_id'
    ]]
    dg.iterables = ('cope_id', num_copes)

    dg.inputs.sort_filelist = True

    wk.connect(info, 'model_id', dg, 'model_id')
    wk.connect(info, 'task_id', dg, 'task_id')

    model = Node(L2Model(), name='l2model')

    wk.connect(dg, ('copes', get_len), model, 'num_copes')

    mergecopes = Node(Merge(dimension='t'), name='merge_copes')
    wk.connect(dg, 'copes', mergecopes, 'in_files')

    mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
    wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')

    mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
    flame = Node(FLAMEO(), name='flameo')
    flame.inputs.mask_file = mask_file
    flame.inputs.run_mode = 'flame1'

    wk.connect(model, 'design_mat', flame, 'design_file')
    wk.connect(model, 'design_con', flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
    wk.connect(model, 'design_grp', flame, 'cov_split_file')

    smoothest = Node(SmoothEstimate(), name='smooth_estimate')
    wk.connect(flame, 'zstats', smoothest, 'zstat_file')
    smoothest.inputs.mask_file = mask_file

    cluster = Node(Cluster(), name='cluster')
    wk.connect(smoothest, 'dlh', cluster, 'dlh')
    wk.connect(smoothest, 'volume', cluster, 'volume')
    cluster.inputs.connectivity = 26
    cluster.inputs.threshold = 2.3
    cluster.inputs.pthreshold = 0.05
    cluster.inputs.out_threshold_file = True
    cluster.inputs.out_index_file = True
    cluster.inputs.out_localmax_txt_file = True

    wk.connect(flame, 'zstats', cluster, 'in_file')

    ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
                   name='z2pval')
    wk.connect(flame, 'zstats', ztopval, 'in_file')

    sinker = Node(DataSink(), name='sinker')
    sinker.inputs.base_directory = os.path.abspath(out_dir)
    sinker.inputs.substitutions = [('_cope_id', 'contrast'),
                                   ('_maths__', '_reversed_')]

    wk.connect(flame, 'zstats', sinker, 'stats')
    wk.connect(cluster, 'threshold_file', sinker, 'stats.@thr')
    wk.connect(cluster, 'index_file', sinker, 'stats.@index')
    wk.connect(cluster, 'localmax_txt_file', sinker, 'stats.@localmax')

    if no_reversal == False:
        zstats_reverse = Node(BinaryMaths(), name='zstats_reverse')
        zstats_reverse.inputs.operation = 'mul'
        zstats_reverse.inputs.operand_value = -1
        wk.connect(flame, 'zstats', zstats_reverse, 'in_file')

        cluster2 = cluster.clone(name='cluster2')
        wk.connect(smoothest, 'dlh', cluster2, 'dlh')
        wk.connect(smoothest, 'volume', cluster2, 'volume')
        wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')

        ztopval2 = ztopval.clone(name='ztopval2')
        wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')

        wk.connect(zstats_reverse, 'out_file', sinker, 'stats.@neg')
        wk.connect(cluster2, 'threshold_file', sinker, 'stats.@neg_thr')
        wk.connect(cluster2, 'index_file', sinker, 'stats.@neg_index')
        wk.connect(cluster2, 'localmax_txt_file', sinker,
                   'stats.@neg_localmax')

    return wk
Пример #21
0
    tr = int(
        subprocess.check_output(['cat', subject_dir + '/rest/Rest_TR.txt'
                                 ]).strip('\n').strip(' ').split('.')[0])
    print(nvols, tr)
    print(len(outlier_vols))
    print('Length outliers: ' + str(len(outlier_vols)))

    proc = subprocess.Popen(['fslsplit', str(bptf)])
    proc.wait()

    s = []
    for i in range(nvols):
        if i not in outlier_vols:
            s += ['vol' + str(i).zfill(4) + '.nii.gz']

    merger = Merge()
    merger.inputs.in_files = s
    merger.inputs.dimension = 't'
    merger.inputs.tr = tr
    merger.run()
    wait10()

    for j in s:
        os.remove(j)

    temp_ofile = os.path.join(subject_dir,
                              s[0].replace('.nii.gz', '_merged.nii.gz'))

    if os.path.exists(temp_ofile):
        os.rename(temp_ofile, 'RestVW_bptf_good_vols.nii.gz')
Пример #22
0
def fsl_motion_correction(name='realign'):
    """Realign a time series to the middle volume using spline interpolation

    Uses MCFLIRT to realign the time series and ApplyWarp to apply the rigid
    body transformations using spline interpolation (unknown order).

    Nipype Inputs
    -------------
    realign_input.func: exising file
        The path to the fMRI file.

    Nipype Outputs
    --------------
    realign_output.realigned_file: exising file
        The path to the realigned fMRI file.

    realign_output.realign_params: mat file
        .mat file with the affine transformation parameters.

    Example
    -------
    >>> wf = fsl_motion_correction()
    >>> wf.inputs.inputspec.func = 'f3.nii'
    >>> wf.run() # doctest: +SKIP
    """
    wf = Workflow(name=name)

    # input node
    input = setup_node(IdentityInterface(fields=['func']),
                       name='realign_input')
    realigner = setup_node(MCFLIRT(save_mats=True, stats_imgs=True),
                           name='mcflirt')
    splitter = setup_node(Split(dimension='t'), name='splitter')
    warper = MapNode(ApplyWarp(interp='spline'),
                     iterfield=['in_file', 'premat'],
                     name='warper')

    joiner = setup_node(Merge(dimension='t'), name='joiner')

    # output node
    output = setup_node(IdentityInterface(fields=[
        'realigned_file',
        'realign_params',
    ]),
                        name='realign_output')

    wf.connect([
        # input
        (input, realigner, [
            ("func", "in_file"),
            (("func", select_volume, 'middle'), "ref_vol"),
        ]),
        # split
        (realigner, splitter, [("out_file", "in_file")]),
        (realigner, warper, [
            ("mat_file", "premat"),
            ("variance_img", "ref_file"),
        ]),
        # warp
        (splitter, warper, [("out_files", "in_file")]),
        (warper, joiner, [("mat_file", "premat")]),
        # output
        (joiner, output, [("merged_file", "realigned_file")]),
        (realigner, output, [("mat_file", "realign_params")]),
    ])
    return wf
Пример #23
0
     "versions": [{
         "title": SUSAN().version or "1.0",
         "description":
         f"Default SUSAN version for nipype {_NIPYPE_VERSION}.",  # noqa: E501
         "input": SUSAN_INPUT_SPECIFICATION,
         "output": SUSAN_OUTPUT_SPECIFICATION,
         "nested_results_attribute": "outputs.get_traitsfree",
     }],
 },
 {
     "title":
     "fslmerge",
     "description":
     "Concatenates images along specified dimension.",
     "versions": [{
         "title": Merge().version or "1.0",
         "description":
         f"Default fslmerge version for nipype {_NIPYPE_VERSION}.",  # noqa: E501
         "input": FSLMERGE_INPUT_SPECIFICATION,
         "output": FSLMERGE_OUTPUT_SPECIFICATION,
         "nested_results_attribute": "outputs.get_traitsfree",
     }],
 },
 {
     "title":
     "fslroi",
     "description":
     "Extracts specific ROI from image.",
     "versions": [{
         "title": ExtractROI().version or "1.0",
         "description":
Пример #24
0
def gnl_correction(input,
                   file_bash,
                   file_coeff,
                   python3_env,
                   python2_env,
                   path_output,
                   cleanup=True):
    """
    The purpose of the following function is to correct for gradient nonlinearities. A corrected
    file is written using spline interpolation. The function needs FSL to be included in the search
    path.
    Inputs:
        *input: filename of input image.
        *file_bash: filename of bash script which calls the gradient unwarping toolbox.
        *file_coeff: filename of siemens coefficient file.
        *python3_env: name of python3 virtual environment.
        *python2_env: name of python2 virtual environment.
        *path_output: path where output is written.
        *cleanup: delete intermediate files.

    created by Daniel Haenelt
    Date created: 10-01-2020             
    Last modified: 10-01-2020  
    """
    import os
    import shutil as sh
    import numpy as np
    import nibabel as nb
    from nipype.interfaces.fsl import ConvertWarp, Merge
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl.preprocess import ApplyWarp
    from lib.io.get_filename import get_filename
    from lib.cmap.generate_coordinate_mapping import generate_coordinate_mapping

    # get fileparts
    path, name, ext = get_filename(input)

    # make subfolders
    path_grad = os.path.join(path_output, "grad")
    if not os.path.exists(path_grad):
        os.makedirs(path_grad)

    # parse arguments
    file_output = os.path.join(path_output, name + "_gnlcorr" + ext)
    file_warp = os.path.join(path_grad, "warp.nii.gz")
    file_jacobian = os.path.join(path_grad, "warp_jacobian.nii.gz")

    # run gradient unwarp
    os.system("bash " + file_bash  + \
              " " + python3_env + \
              " " + python2_env + \
              " " + path_grad + \
              " " + input + \
              " trilinear.nii.gz" + \
              " " + file_coeff)

    # now create an appropriate warpfield output (relative convention)
    convertwarp = ConvertWarp()
    convertwarp.inputs.reference = os.path.join(path_grad, "trilinear.nii.gz")
    convertwarp.inputs.warp1 = os.path.join(path_grad, "fullWarp_abs.nii.gz")
    convertwarp.inputs.abswarp = True
    convertwarp.inputs.out_relwarp = True
    convertwarp.inputs.out_file = file_warp
    convertwarp.inputs.args = "--jacobian=" + file_jacobian
    convertwarp.run()

    # convertwarp's jacobian output has 8 frames, each combination of one-sided differences, so average them
    calcmean = MeanImage()
    calcmean.inputs.in_file = file_jacobian
    calcmean.inputs.dimension = "T"
    calcmean.inputs.out_file = file_jacobian
    calcmean.run()

    # apply warp to first volume
    applywarp = ApplyWarp()
    applywarp.inputs.in_file = input
    applywarp.inputs.ref_file = input
    applywarp.inputs.relwarp = True
    applywarp.inputs.field_file = file_warp
    applywarp.inputs.output_type = "NIFTI"
    applywarp.inputs.out_file = file_output
    applywarp.inputs.interp = "spline"
    applywarp.run()

    # normalise warped output image to initial intensity range
    data_img = nb.load(input)
    data_array = data_img.get_fdata()
    max_data = np.max(data_array)
    min_data = np.min(data_array)

    data_img = nb.load(file_output)
    data_array = data_img.get_fdata()
    data_array[data_array < min_data] = 0
    data_array[data_array > max_data] = max_data

    output = nb.Nifti1Image(data_array, data_img.affine, data_img.header)
    nb.save(output, file_output)

    # calculate gradient deviations
    os.system("calc_grad_perc_dev" + \
              " --fullwarp=" + file_warp + \
              " -o " + os.path.join(path_grad,"grad_dev"))

    # merge directions
    merger = Merge()
    merger.inputs.in_files = [
        os.path.join(path_grad, "grad_dev_x.nii.gz"),
        os.path.join(path_grad, "grad_dev_y.nii.gz"),
        os.path.join(path_grad, "grad_dev_z.nii.gz")
    ]
    merger.inputs.dimension = 't'
    merger.inputs.merged_file = os.path.join(path_grad, "grad_dev.nii.gz")
    merger.run()

    # convert from % deviation to absolute
    data_img = nb.load(os.path.join(path_grad, "grad_dev.nii.gz"))
    data_array = data_img.get_fdata()
    data_array = data_array / 100

    output = nb.Nifti1Image(data_array, data_img.affine, data_img.header)
    nb.save(output, os.path.join(path_grad, "grad_dev.nii.gz"))

    # warp coordinate mapping
    generate_coordinate_mapping(input,
                                0,
                                path_grad,
                                suffix="gnl",
                                time=False,
                                write_output=True)

    applywarp = ApplyWarp()
    applywarp.inputs.in_file = os.path.join(path_grad, "cmap_gnl.nii")
    applywarp.inputs.ref_file = input
    applywarp.inputs.relwarp = True
    applywarp.inputs.field_file = file_warp
    applywarp.inputs.out_file = os.path.join(path_grad, "cmap_gnl.nii")
    applywarp.inputs.interp = "trilinear"
    applywarp.inputs.output_type = "NIFTI"
    applywarp.run()

    # clean intermediate files
    if cleanup:
        sh.rmtree(path_grad, ignore_errors=True)