Example #1
0
def dcm_to_nifti(dicom_dir, nifti_dir, split=True, tool_used='dcm2niix'):
    for patient in os.listdir(dicom_dir):

        path_dicom = os.path.join(dicom_dir, patient)
        path_nifti = os.path.join(nifti_dir, patient)
        # make subfolder for each patient
        if not os.path.exists(path_nifti):
            os.makedirs(path_nifti)
        if split == False:
            if tool_used == 'dcm2niix':
                converter = Dcm2niix()
                converter.inputs.source_dir = path_dicom
                converter.inputs.compression = 5
                converter.inputs.merge_imgs = True
                converter.inputs.out_filename = '%d'
                converter.inputs.output_dir = path_nifti
                converter.run()
            elif tool_used == 'dcm2nii':
                converter = Dcm2nii()
                converter.inputs.source_dir = path_dicom
                converter.inputs.gzip_output = True
                converter.inputs.output_dir = path_nifti
                converter.run()
            else:
                raise Warning("tool used does not exist, please enter dcm2nii or dcm2niix")

        else:
            for s in os.listdir(path_dicom):
                if tool_used == 'dcm2niix':
                    converter = Dcm2niix()
                    converter.inputs.source_dir = path_dicom + '/' + s
                    converter.inputs.compression = 5
                    converter.inputs.merge_imgs = True
                    converter.inputs.out_filename = 'x_%d'
                    converter.inputs.output_dir = path_nifti
                    converter.run()
                elif tool_used == 'dcm2nii':
                    converter = Dcm2nii()
                    converter.inputs.source_dir = path_dicom + '/' + s
                    converter.inputs.gzip_output = True
                    converter.inputs.output_dir = path_nifti
                    converter.run()
                else:
                    raise Warning("tool used does not exist, please enter dcm2nii or dcm2niix")


#dcm_to_nifti(dicom_split_dir, nifti_dir, True, 'dcm2nii')
#dcm_to_nifti(dicom_split_dir, nifti_dir, True, 'dcm2niix')
Example #2
0
def test_Dcm2nii_inputs():
    input_map = dict(
        anonymize=dict(argstr="-a", usedefault=True),
        args=dict(argstr="%s"),
        collapse_folders=dict(argstr="-c", usedefault=True),
        config_file=dict(argstr="-b %s", genfile=True),
        convert_all_pars=dict(argstr="-v", usedefault=True),
        date_in_filename=dict(argstr="-d", usedefault=True),
        environ=dict(nohash=True, usedefault=True),
        events_in_filename=dict(argstr="-e", usedefault=True),
        gzip_output=dict(argstr="-g", usedefault=True),
        id_in_filename=dict(argstr="-i", usedefault=True),
        ignore_exception=dict(nohash=True, usedefault=True),
        nii_output=dict(argstr="-n", usedefault=True),
        output_dir=dict(argstr="-o %s", genfile=True),
        protocol_in_filename=dict(argstr="-p", usedefault=True),
        reorient=dict(argstr="-r"),
        reorient_and_crop=dict(argstr="-x", usedefault=True),
        source_dir=dict(argstr="%s", mandatory=True, position=-1, xor=["source_names"]),
        source_in_filename=dict(argstr="-f", usedefault=True),
        source_names=dict(argstr="%s", copyfile=False, mandatory=True, position=-1, xor=["source_dir"]),
        spm_analyze=dict(argstr="-s", xor=["nii_output"]),
        terminal_output=dict(mandatory=True, nohash=True),
    )
    inputs = Dcm2nii.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Example #3
0
def make_and_upload_nifti(subject_name, subject_XNAT, scan_sequence):

    subject_directory = os.path.join(DICOM_folder, i_subject)
    dicom_directory = os.path.join(DICOM_folder, i_subject, scan_sequence)

    converter = Dcm2nii()
    converter.inputs.source_dir = dicom_directory
    converter.inputs.gzip_output = True
    converter.inputs.output_dir = subject_directory
    converter.inputs.source_in_filename = False
    converter.inputs.protocol_in_filename = False
    converter.inputs.date_in_filename = False
    converter.inputs.events_in_filename = False
    converter.inputs.id_in_filename = True
    converter.inputs.terminal_output = 'file'
    convert_result = converter.run()

    nifti_file = convert_result.outputs.converted_files

    shutil.move(nifti_file,
                os.path.join(subject_directory, scan_sequence + '.nii.gz'))
    nifti_file = os.path.join(subject_directory, scan_sequence + '.nii.gz')

    upload_nifti(subject_XNAT, scan_sequence, nifti_file, 'NIFTI',
                 'image.nii.gz')
    return
Example #4
0
def dcm2nii_converter(source_names=traits.Undefined):
    """ Create a dcm2nii workflow.

    It does:
    - dcm2nii

    Nipype Inputs
    -------------
    source_names: traits.File
        path to the DICOM files or series folder.

    Returns
    -------
    wf: nipype Workflow

    Note
    ----
    For more info: http://www.mit.edu/~satra/nipype-nightly/interfaces/generated/nipype.interfaces.dcm2nii.html
    # TODO: an attach function for this node.
    """
    dcm2nii = Dcm2nii()

    dcm2nii.inputs.gzip_output = True
    dcm2nii.inputs.output_dir = '.'
    dcm2nii.inputs.terminal_output = 'file'
    dcm2nii.inputs.source_names = source_names

    return dcm2nii
Example #5
0
def weird_convert_dti_dcm(in_dcm):

    import os
    import numpy as np
    import re

    subjid = re.search('R[0-9X]+', in_dcm).group()
    year = re.search('_201[1234]', in_dcm).group()[1:]
    visit_dict = {'2012': 1, '2013': 2, '2014': 3, '2011': 4}
    visit = visit_dict[year]
    scanid = re.search('S[0-9]+', in_dcm).group()
    ton_dir = '/data1/cooked/TONf'
    test_fn = os.path.join(ton_dir, subjid, 'visit_{}'.format(visit), 'DTI',
                           '_'.join([subjid, 'visit', str(visit), 'DTI',
                                     scanid])) + '.bvals'
    if os.path.exists(test_fn):
        assert np.all(np.loadtxt(test_fn) != 0)
    converter = Dcm2nii()
    converter.inputs.source_names = in_dcm
    converter.inputs.gzip_output = True
    converter.inputs.output_dir = os.getcwd()

    converter.run()

    merger = Merge()
    merger.inputs.in_files = converter.output_files
    merger.inputs.dimension = 't'
    merged_result = merger.run()
    fn_base = os.path.basename(in_dcm).split('.')[0]

    merged_file = os.path.join(os.getcwd(), fn_base + '.nii.gz')
    os.rename(merged_result.outputs.merged_file, merged_file)

    in_bval = converter.bvals[0]
    in_bvec = converter.bvecs[0]
    b0_idx = 0
    assert np.all(np.loadtxt(in_bval) != 0)

    # Load (and transpose!!)
    bvec_arr = np.loadtxt(in_bvec).T
    out_bvec = np.zeros((bvec_arr.shape[0] + 1,
                         bvec_arr.shape[1]))
    out_bvec[:] = np.nan
    out_bvec[b0_idx, :] = 0
    out_bvec[np.where(np.isnan(out_bvec))] = bvec_arr.flatten()

    bval_arr = np.loadtxt(in_bval)
    out_bval = np.zeros((bval_arr.shape[0] + 1,))
    out_bval[:] = np.nan
    out_bval[b0_idx] = 0
    out_bval[np.isnan(out_bval)] = bval_arr

    out_bvec_fn = os.path.join(os.getcwd(), fn_base + '.bvecs')
    np.savetxt(out_bvec_fn, out_bvec, fmt='%.8f')

    out_bval_fn = os.path.join(os.getcwd(), fn_base + '.bvals')

    np.savetxt(out_bval_fn, out_bval, fmt='%.6f')
    return merged_file, out_bvec_fn, out_bval_fn
Example #6
0
 def __init__(self, source_names=['path'], source_dir='path', **options):
     from nipype.interfaces.dcm2nii import Dcm2nii
     self.converter = Dcm2nii()
     self.converter.inputs.source_names = source_names
     self.converter.inputs.source_dir = source_dir
     for ef in options:
         setattr(self.converter.inputs, ef, options[ef])
     self.converter.run()
Example #7
0
 def __init__(self, source_names=['', '']):
     from nipype.interfaces.dcm2nii import Dcm2nii
     converter = Dcm2nii()
     converter.inputs.source_names = source_names
     converter.inputs.gzip_output = True
     converter.inputs.output_dir = '.'
     converter.cmdline
     converter.run()
def dcm_to_nifti(dicom_dir, nifti_dir, split=True, tool_used='dcm2niix'):
    for patient in os.listdir(dicom_dir):
        path_dicom = os.path.join(dicom_dir, patient)
        path_nifti = os.path.join(nifti_dir, patient)
        # make subfolder for each patient
        if not os.path.exists(path_nifti):
            os.makedirs(path_nifti)
        if not split:
            if tool_used == 'dcm2niix':
                try:
                    subprocess.call('dcm2niix -z y -f %%d -o %s %s' %
                                    (path_nifti, path_dicom),
                                    shell=True)
                except:
                    pass
            elif tool_used == 'dcm2nii':
                converter = Dcm2nii()
                converter.inputs.source_dir = path_dicom
                converter.inputs.gzip_output = True
                converter.inputs.output_dir = path_nifti
                converter.run()
            else:
                raise Warning(
                    "tool used does not exist, please enter dcm2nii or dcm2niix"
                )

        else:
            for i in os.listdir(path_dicom):
                if tool_used == 'dcm2niix':
                    try:
                        subprocess.call('dcm2niix -z y -f %%d -o %s %s' %
                                        (path_nifti, path_dicom + '/' + i),
                                        shell=True)
                    except:
                        pass
                elif tool_used == 'dcm2nii':
                    converter = Dcm2nii()
                    converter.inputs.source_dir = path_dicom + '/' + i
                    converter.inputs.gzip_output = True
                    converter.inputs.output_dir = path_nifti
                    converter.run()
                else:
                    raise Warning(
                        "tool used does not exist, please enter dcm2nii or dcm2niix"
                    )
Example #9
0
def test_Dcm2nii_outputs():
    output_map = dict(
        bvals=dict(), bvecs=dict(), converted_files=dict(), reoriented_and_cropped_files=dict(), reoriented_files=dict()
    )
    outputs = Dcm2nii.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #10
0
def create_converter_diffusion_pipeline(working_dir,
                                        ds_dir,
                                        name='converter_diffusion'):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting')

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['dMRI_dicom']),
                     name='inputnode')

    outputnode = Node(util.IdentityInterface(fields=['dMRI']),
                      name='outputnode')

    niftisink = Node(nio.DataSink(), name='niftisink')
    niftisink.inputs.base_directory = os.path.join(ds_dir, 'raw_niftis')

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {'submit_specs': 'request_memory = 2000'}
    converter_wf.connect(inputnode, 'dMRI_dicom', converter_dMRI,
                         'source_names')

    dMRI_rename = Node(util.Rename(format_string='DTI_mx_137.nii.gz'),
                       name='dMRI_rename')
    converter_wf.connect(converter_dMRI, 'converted_files', dMRI_rename,
                         'in_file')

    bvecs_rename = Node(util.Rename(format_string='DTI_mx_137.bvecs'),
                        name='bvecs_rename')
    converter_wf.connect(converter_dMRI, 'bvecs', bvecs_rename, 'in_file')

    bvals_rename = Node(util.Rename(format_string='DTI_mx_137.bvals'),
                        name='bvals_rename')
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, 'in_file')

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name='reor_2_std')
    converter_wf.connect(dMRI_rename, 'out_file', reor_2_std, 'in_file')
    converter_wf.connect(reor_2_std, 'out_file', outputnode, 'dMRI')

    # save original niftis
    converter_wf.connect(reor_2_std, 'out_file', niftisink, 'dMRI.@dwi')
    converter_wf.connect(bvals_rename, 'out_file', niftisink, 'dMRI.@bvals')
    converter_wf.connect(bvecs_rename, 'out_file', niftisink, 'dMRI.@bvecs')

    converter_wf.write_graph(dotfilename='converter_struct',
                             graph2use='flat',
                             format='pdf')
    return converter_wf
Example #11
0
def test_Dcm2nii_inputs():
    input_map = dict(anonymize=dict(argstr='-a',
    position=2,
    ),
    args=dict(argstr='%s',
    position=9,
    ),
    config_file=dict(argstr='-b %s',
    genfile=True,
    position=7,
    ),
    convert_all_pars=dict(argstr='-v',
    position=8,
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    gzip_output=dict(argstr='-g',
    position=0,
    usedefault=True,
    ),
    id_in_filename=dict(argstr='-i',
    position=3,
    usedefault=True,
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    nii_output=dict(argstr='-n',
    position=1,
    usedefault=True,
    ),
    output_dir=dict(argstr='-o %s',
    genfile=True,
    position=6,
    ),
    reorient=dict(argstr='-r',
    position=4,
    ),
    reorient_and_crop=dict(argstr='-x',
    position=5,
    ),
    source_names=dict(argstr='%s',
    copyfile=False,
    mandatory=True,
    position=10,
    ),
    terminal_output=dict(mandatory=True,
    nohash=True,
    ),
    )
    inputs = Dcm2nii.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
def dcm_to_nifti(dicom_dir, nifti_dir, split=True, tool_used='dcm2niix'):
    for patient in os.listdir(dicom_dir):
        path_dicom = os.path.join(dicom_dir, patient)
        path_nifti = os.path.join(nifti_dir, patient)
        # make subfolder for each patient
        if not os.path.exists(path_nifti):
            os.makedirs(path_nifti)
        if not split:
            if tool_used == 'dcm2niix':
                converter = Dcm2niix()
                converter.inputs.source_dir = path_dicom
                converter.inputs.compression = 5
                converter.inputs.merge_imgs = True
                converter.inputs.out_filename = '%d'
                converter.inputs.output_dir = path_nifti
                converter.run()
            elif tool_used == 'dcm2nii':
                converter = Dcm2nii()
                converter.inputs.source_dir = path_dicom
                converter.inputs.gzip_output = True
                converter.inputs.output_dir = path_nifti
                converter.run()
            else:
                raise Warning("tool used does not exist, please enter dcm2nii or dcm2niix")

        else:
            for s in os.listdir(path_dicom):
                if tool_used == 'dcm2niix':
                    try:
                        subprocess.call('/home/harryzhang/toolbox/dcm2niix/console/dcm2niix -m y -5 -f %%d -o %s %s' %
                                        (path_nifti, path_dicom + '/'+s), shell=True)
                    except:
                        pass
                elif tool_used == 'dcm2nii':
                    converter = Dcm2nii()
                    converter.inputs.source_dir = path_dicom + '/' + s
                    converter.inputs.gzip_output = True
                    converter.inputs.output_dir = path_nifti
                    converter.run()
                else:
                    raise Warning("tool used does not exist, please enter dcm2nii or dcm2niix")
Example #13
0
def dcm2nii_wrapper(input_dir, output_dir):
    converter = Dcm2nii()
    converter.inputs.source_dir = input_dir
    converter.inputs.output_dir = output_dir
    converter.inputs.gzip_output = False
    converter.inputs.reorient_and_crop = False
    converter.inputs.reorient = False
    converter.inputs.events_in_filename = False
    converter.inputs.date_in_filename = False
    converter.inputs.protocol_in_filename = True
    converter.run()
    return converter.output_files
Example #14
0
def test_Dcm2nii_outputs():
    output_map = dict(bvals=dict(),
    bvecs=dict(),
    converted_files=dict(),
    reoriented_and_cropped_files=dict(),
    reoriented_files=dict(),
    )
    outputs = Dcm2nii.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #15
0
def main(args):

    print('\n>> CLI Parameters ...\n')
    print(args)

    if not os.path.isdir(args.output_dir):
        os.makedirs(args.output_dir)

    converter = Dcm2nii()
    converter.inputs.source_dir = args.source_dir
    converter.inputs.output_dir = args.output_dir
    converter.inputs.anonymize = args.anonymize
    converter.inputs.nii_output = args.nii_output
    converter.inputs.gzip_output = args.gzip_output

    print('\n>> Running Dcm2nii ... \n')
    print(converter.cmdline)

    converter.run()
def extract_nac_pet(dicom_folder):
    """
    Extract the Non attenuation Corrected PET from a DICOM exam.

    The function extract the last frame of the DICOM exam.

    The DICOM exam is expected to be of the dynamic PET scan itself. It also expects the image dimension
    in the transversal acquisition direction to be of 127

    It uses dcm2nii to convert the DICOM subset into a nifti file

    :param dicom_folder: The input DICOM folder of the dynamic PET scan
    :return: the nifti converted image file (the last frame of the dynamic PET scan)
    """
    from glob import glob
    import os
    import shutil
    import re
    from nipype.interfaces.dcm2nii import Dcm2nii

    def atoi(text):
        return int(text) if text.isdigit() else text

    def natural_keys(text):
        return [atoi(c) for c in re.split('(\d+)', text)]

    files = glob(os.path.join(os.path.abspath(dicom_folder), '*'))
    sorted_files = sorted(files, key=natural_keys)
    nac_pet_files = sorted_files[-127:]
    for f in nac_pet_files:
        shutil.copy(f, os.getcwd())
    dcm2nii = Dcm2nii()
    dcm2nii.inputs.source_dir = os.getcwd()
    nii_outputs = dcm2nii.run().outputs.converted_files
    print(nii_outputs)
    return nii_outputs[0]
Example #17
0
for subject in subjects_data_list:
    if not os.path.exists(experiment_dir + '/' + data_dir_name + '/' +
                          subject):
        os.makedirs(experiment_dir + '/' + data_dir_name + '/' + subject)
"""
Define nodes to use
"""

#Node: Infosource - we use IdentityInterface to create our own node, to
#                   specify the list of subjects the pipeline should be
#                   executed on
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
                     name="infosource")
infosource.iterables = [('subject_id', subjects_data_list)]

dcm2nii_converter = pe.Node(interface=Dcm2nii(), name='dcm2nii')
dcm2nii_converter.inputs.gzip_output = True
dcm2nii_converter.inputs.reorient_and_crop = False

# Initiation of the preparation pipeline
prepareflow = pe.Workflow(name="prepareflow")

# Define where the workingdir of the all_consuming_workflow should be stored
# at
prepareflow.base_dir = experiment_dir + '/workingdir_prepareflow'


#Define pathfinder function
def pathfinder(subject, experiment_dir, foldername):
    import os
    from glob import glob
# Build DICOM scourse Direciry
def sourceFinder(subjectname):
    import os
    path_to_subject = '/Users/Dalton/Documents/FSL/ValuePilot/RawData'
    path_to_dicomdir = 'DICOMDIR'
    return os.path.join(path_to_subject, subjectname, path_to_dicomdir)


def outputFinder(subjectname):
    import os
    path_to_subject = '/Users/Dalton/Documents/FSL/ValuePilot/RawData'
    path_to_dicomdir = 'niis'
    return os.path.join(path_to_subject, subjectname, path_to_dicomdir)


converter = pe.Node(Dcm2nii(), name="converter")
converter.inputs.args = "-d n"

#Initiation of the preparation pipeline
prepareflow = pe.Workflow(name="prepareflow")

#Define where the workingdir of the all_consuming_workflow should be stored at
prepareflow.base_dir = experiment_dir + '/workingdir_prepareflow'

prepareflow.connect([(infosource, converter,
                      [(('subject_id', sourceFinder), 'source_names'),
                       (('subject_id', outputFinder), 'output_dir')])])

prepareflow.write_graph(graph2use='orig')
prepareflow.run(plugin='MultiProc', plugin_args={'n_procs': 8})
Example #19
0
    ]]
    datagrabber.inputs.template_args['1400'] = [[
        'subject_id', 'session_1/RfMRI_mx_1400'
    ]]
    datagrabber.inputs.template_args['2500'] = [[
        'subject_id', 'session_1/RfMRI_std_2500'
    ]]
    datagrabber.inputs.template_args['dwi'] = [[
        'subject_id', ['session_1/DTI_mx_137/*.dcm']
    ]]
    datagrabber.inputs.sort_filelist = True
    datagrabber.inputs.raise_on_empty = False

    wf.connect(subjects_infosource, "subject_id", datagrabber, "subject_id")

    dcm2nii_dwi = pe.Node(Dcm2nii(), name="dcm2nii_dwi")
    dcm2nii_dwi.inputs.gzip_output = True
    dcm2nii_dwi.inputs.nii_output = True
    dcm2nii_dwi.inputs.anonymize = False
    dcm2nii_dwi.plugin_args = {'submit_specs': 'request_memory = 2000'}
    wf.connect(datagrabber, "dwi", dcm2nii_dwi, "source_names")

    dwi_rename = pe.Node(util.Rename(format_string="DTI_mx_137.nii.gz"),
                         name="dwi_rename")
    wf.connect(dcm2nii_dwi, "converted_files", dwi_rename, "in_file")

    bvecs_rename = pe.Node(util.Rename(format_string="DTI_mx_137.bvecs"),
                           name="bvecs_rename")
    wf.connect(dcm2nii_dwi, "bvecs", bvecs_rename, "in_file")

    bvals_rename = pe.Node(util.Rename(format_string="DTI_mx_137.bvals"),
Example #20
0
def convert(items, anonymizer=None, symlink=True, converter=None):
    prov_files = []
    tmpdir = mkdtemp()

    for item in items:
        if isinstance(item[1], (list, tuple)):
            outtypes = item[1]
        else:
            outtypes = [item[1]]
        prefix = item[0]
        print('Converting %s' % prefix)
        dirname = os.path.dirname(prefix + '.ext')
        print(dirname)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        for outtype in outtypes:
            print(outtype)
            if outtype == 'dicom':
                dicomdir = prefix + '_dicom'
                if os.path.exists(dicomdir):
                    shutil.rmtree(dicomdir)
                os.mkdir(dicomdir)
                for filename in item[2]:
                    outfile = os.path.join(dicomdir, os.path.split(filename)[1])
                    if not os.path.islink(outfile):
                        if symlink:
                            os.symlink(filename, outfile)
                        else:
                            os.link(filename, outfile)
            elif outtype in ['nii', 'nii.gz']:
                outname = prefix + '.' + outtype
                scaninfo = prefix + '_scaninfo.json'
                if not os.path.exists(outname):
                    from nipype import config
                    config.enable_provenance()
                    from nipype import Function, Node
                    from nipype.interfaces.base import isdefined
                    print converter
                    if converter == 'mri_convert':
                        from nipype.interfaces.freesurfer.preprocess import MRIConvert
                        convertnode = Node(MRIConvert(), name = 'convert')
                        convertnode.base_dir = tmpdir
                        if outtype == 'nii.gz':
                            convertnode.inputs.out_type = 'niigz'
                        convertnode.inputs.in_file = item[2][0]
                        convertnode.inputs.out_file = outname
                        #cmd = 'mri_convert %s %s' % (item[2][0], outname)
                        #print(cmd)
                        #os.system(cmd)
                        res=convertnode.run()
                    elif converter == 'dcm2nii':
                        from nipype.interfaces.dcm2nii import Dcm2nii
                        convertnode = Node(Dcm2nii(), name='convert')
                        convertnode.base_dir = tmpdir
                        convertnode.inputs.source_names = item[2]
                        convertnode.inputs.gzip_output = outtype == 'nii.gz'
                        convertnode.inputs.terminal_output = 'allatonce'
                        res = convertnode.run()
                        if isinstance(res.outputs.converted_files, list):
                            print("Cannot convert dicom files - series likely has multiple orientations: ", item[2])
                            continue
                        else:
                            shutil.copyfile(res.outputs.converted_files, outname)
                        if isdefined(res.outputs.bvecs):
                            outname_bvecs = prefix + '.bvecs'
                            outname_bvals = prefix + '.bvals'
                            shutil.copyfile(res.outputs.bvecs, outname_bvecs)
                            shutil.copyfile(res.outputs.bvals, outname_bvals)
                    prov_file = prefix + '_prov.ttl'
                    shutil.copyfile(os.path.join(convertnode.base_dir,
                                                 convertnode.name,
                                                 'provenance.ttl'),
                                    prov_file)
                    prov_files.append(prov_file)
                    embedfunc = Node(Function(input_names=['dcmfiles',
                                                           'niftifile',
                                                           'infofile',
                                                           'force'],
                                              output_names=['outfile',
                                                            'meta'],
                                              function=embed_nifti),
                                     name='embedder')
                    embedfunc.inputs.dcmfiles = item[2]
                    embedfunc.inputs.niftifile = outname

                    embedfunc.inputs.infofile = scaninfo
                    embedfunc.inputs.force = True
                    embedfunc.base_dir = tmpdir
                    res = embedfunc.run()
                    g = res.provenance.rdf()
                    g.parse(prov_file,
                            format='turtle')
                    g.serialize(prov_file, format='turtle')
                    #out_file, meta_dict = embed_nifti(item[2], outname, force=True)
                    os.chmod(outname, 0440)
                    os.chmod(scaninfo, 0440)
                    os.chmod(prov_file, 0440)
    
    shutil.rmtree(tmpdir)
Example #21
0
## quick & basic converter for my own MRI files to swap into nifti from dicoms
## I deserve this for sitting in MRI scanner for an hour

from nipype.interfaces.dcm2nii import Dcm2nii
import networkx

converter = Dcm2nii()

# throw inputs here
converter.inputs.source_names = ['./../ute/E5466S1I1.DCM']

# says this is a good idea online bc of size/compression
converter.inputs.gzip_output = True
converter.inputs.output_dir = '.'

converter.cmdline
'dcm2nii -a y -c y -b config.ini -v y -d y -e y -g y -i n -n y -o . -p y -x n -f n /../ute/E5466S1I1.DCM'
Example #22
0
def Couple_Preproc_Pipeline(base_dir=None,
                            output_dir=None,
                            subject_id=None,
                            spm_path=None):
    """ Create a preprocessing workflow for the Couples Conflict Study using nipype

    Args:
        base_dir: path to data folder where raw subject folder is located
        output_dir: path to where key output files should be saved
        subject_id: subject_id (str)
        spm_path: path to spm folder

    Returns:
        workflow: a nipype workflow that can be run
        
    """

    from nipype.interfaces.dcm2nii import Dcm2nii
    from nipype.interfaces.fsl import Merge, TOPUP, ApplyTOPUP
    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as util
    from nipype.interfaces.utility import Merge as Merge_List
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.fsl.maths import UnaryMaths
    from nipype.interfaces.nipy.preprocess import Trim
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces import spm
    from nipype.interfaces.spm import Normalize12
    from nipype.algorithms.misc import Gunzip
    from nipype.interfaces.nipy.preprocess import ComputeMask
    import nipype.interfaces.matlab as mlab
    from nltools.utils import get_resource_path, get_vox_dims, get_n_volumes
    from nltools.interfaces import Plot_Coregistration_Montage, PlotRealignmentParameters, Create_Covariates
    import os
    import glob

    ########################################
    ## Setup Paths and Nodes
    ########################################

    # Specify Paths
    canonical_file = os.path.join(spm_path, 'canonical', 'single_subj_T1.nii')
    template_file = os.path.join(spm_path, 'tpm', 'TPM.nii')

    # Set the way matlab should be called
    mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
    mlab.MatlabCommand.set_default_paths(spm_path)

    # Get File Names for different types of scans.  Parse into separate processing streams
    datasource = Node(interface=nio.DataGrabber(
        infields=['subject_id'], outfields=['struct', 'ap', 'pa']),
                      name='datasource')
    datasource.inputs.base_directory = base_dir
    datasource.inputs.template = '*'
    datasource.inputs.field_template = {
        'struct': '%s/Study*/t1w_32ch_mpr_08mm*',
        'ap': '%s/Study*/distortion_corr_32ch_ap*',
        'pa': '%s/Study*/distortion_corr_32ch_pa*'
    }
    datasource.inputs.template_args = {
        'struct': [['subject_id']],
        'ap': [['subject_id']],
        'pa': [['subject_id']]
    }
    datasource.inputs.subject_id = subject_id
    datasource.inputs.sort_filelist = True

    # iterate over functional scans to define paths
    scan_file_list = glob.glob(
        os.path.join(base_dir, subject_id, 'Study*', '*'))
    func_list = [s for s in scan_file_list if "romcon_ap_32ch_mb8" in s]
    func_list = [s for s in func_list
                 if "SBRef" not in s]  # Exclude sbref for now.
    func_source = Node(interface=util.IdentityInterface(fields=['scan']),
                       name="func_source")
    func_source.iterables = ('scan', func_list)

    # Create Separate Converter Nodes for each different type of file. (dist corr scans need to be done before functional)
    ap_dcm2nii = Node(interface=Dcm2nii(), name='ap_dcm2nii')
    ap_dcm2nii.inputs.gzip_output = True
    ap_dcm2nii.inputs.output_dir = '.'
    ap_dcm2nii.inputs.date_in_filename = False

    pa_dcm2nii = Node(interface=Dcm2nii(), name='pa_dcm2nii')
    pa_dcm2nii.inputs.gzip_output = True
    pa_dcm2nii.inputs.output_dir = '.'
    pa_dcm2nii.inputs.date_in_filename = False

    f_dcm2nii = Node(interface=Dcm2nii(), name='f_dcm2nii')
    f_dcm2nii.inputs.gzip_output = True
    f_dcm2nii.inputs.output_dir = '.'
    f_dcm2nii.inputs.date_in_filename = False

    s_dcm2nii = Node(interface=Dcm2nii(), name='s_dcm2nii')
    s_dcm2nii.inputs.gzip_output = True
    s_dcm2nii.inputs.output_dir = '.'
    s_dcm2nii.inputs.date_in_filename = False

    ########################################
    ## Setup Nodes for distortion correction
    ########################################

    # merge output files into list
    merge_to_file_list = Node(interface=Merge_List(2),
                              infields=['in1', 'in2'],
                              name='merge_to_file_list')

    # fsl merge AP + PA files (depends on direction)
    merger = Node(interface=Merge(dimension='t'), name='merger')
    merger.inputs.output_type = 'NIFTI_GZ'

    # use topup to create distortion correction map
    topup = Node(interface=TOPUP(), name='topup')
    topup.inputs.encoding_file = os.path.join(get_resource_path(),
                                              'epi_params_APPA_MB8.txt')
    topup.inputs.output_type = "NIFTI_GZ"
    topup.inputs.config = 'b02b0.cnf'

    # apply topup to all functional images
    apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
    apply_topup.inputs.in_index = [1]
    apply_topup.inputs.encoding_file = os.path.join(get_resource_path(),
                                                    'epi_params_APPA_MB8.txt')
    apply_topup.inputs.output_type = "NIFTI_GZ"
    apply_topup.inputs.method = 'jac'
    apply_topup.inputs.interp = 'spline'

    # Clear out Zeros from spline interpolation using absolute value.
    abs_maths = Node(interface=UnaryMaths(), name='abs_maths')
    abs_maths.inputs.operation = 'abs'

    ########################################
    ## Preprocessing
    ########################################

    # Trim - remove first 10 TRs
    n_vols = 10
    trim = Node(interface=Trim(), name='trim')
    trim.inputs.begin_index = n_vols

    #Realignment - 6 parameters - realign to first image of very first series.
    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.register_to_mean = True

    #Coregister - 12 parameters
    coregister = Node(interface=spm.Coregister(), name="coregister")
    coregister.inputs.jobtype = 'estwrite'

    #Plot Realignment
    plot_realign = Node(interface=PlotRealignmentParameters(),
                        name="plot_realign")

    #Artifact Detection
    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'SPM'

    # Gunzip - unzip the functional and structural images
    gunzip_struc = Node(Gunzip(), name="gunzip_struc")
    gunzip_func = Node(Gunzip(), name="gunzip_func")

    # Normalize - normalizes functional and structural images to the MNI template
    normalize = Node(interface=Normalize12(jobtype='estwrite',
                                           tpm=template_file),
                     name="normalize")

    #Plot normalization Check
    plot_normalization_check = Node(interface=Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = canonical_file

    #Create Mask
    compute_mask = Node(interface=ComputeMask(), name="compute_mask")
    #remove lower 5% of histogram of mean image
    compute_mask.inputs.m = .05

    #Smooth
    #implicit masking (.im) = 0, dtype = 0
    smooth = Node(interface=spm.Smooth(), name="smooth")
    smooth.inputs.fwhm = 6

    #Create Covariate matrix
    make_cov = Node(interface=Create_Covariates(), name="make_cov")

    # Create a datasink to clean up output files
    datasink = Node(interface=nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = output_dir
    datasink.inputs.container = subject_id

    ########################################
    # Create Workflow
    ########################################

    workflow = Workflow(name='Preprocessed')
    workflow.base_dir = os.path.join(base_dir, subject_id)
    workflow.connect([
        (datasource, ap_dcm2nii, [('ap', 'source_dir')]),
        (datasource, pa_dcm2nii, [('pa', 'source_dir')]),
        (datasource, s_dcm2nii, [('struct', 'source_dir')]),
        (func_source, f_dcm2nii, [('scan', 'source_dir')]),
        (ap_dcm2nii, merge_to_file_list, [('converted_files', 'in1')]),
        (pa_dcm2nii, merge_to_file_list, [('converted_files', 'in2')]),
        (merge_to_file_list, merger, [('out', 'in_files')]),
        (merger, topup, [('merged_file', 'in_file')]),
        (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'),
                              ('out_movpar', 'in_topup_movpar')]),
        (f_dcm2nii, trim, [('converted_files', 'in_file')]),
        (trim, apply_topup, [('out_file', 'in_files')]),
        (apply_topup, abs_maths, [('out_corrected', 'in_file')]),
        (abs_maths, gunzip_func, [('out_file', 'in_file')]),
        (gunzip_func, realign, [('out_file', 'in_files')]),
        (s_dcm2nii, gunzip_struc, [('converted_files', 'in_file')]),
        (gunzip_struc, coregister, [('out_file', 'source')]),
        (coregister, normalize, [('coregistered_source', 'image_to_align')]),
        (realign, coregister, [('mean_image', 'target'),
                               ('realigned_files', 'apply_to_files')]),
        (realign, normalize, [(('mean_image', get_vox_dims),
                               'write_voxel_sizes')]),
        (coregister, normalize, [('coregistered_files', 'apply_to_files')]),
        (normalize, smooth, [('normalized_files', 'in_files')]),
        (realign, compute_mask, [('mean_image', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters'),
                        ('realigned_files', 'realigned_files')]),
        (realign, plot_realign, [('realignment_parameters',
                                  'realignment_parameters')]),
        (normalize, plot_normalization_check, [('normalized_files', 'wra_img')
                                               ]),
        (realign, make_cov, [('realignment_parameters',
                              'realignment_parameters')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (normalize, datasink, [('normalized_files', 'structural.@normalize')]),
        (coregister, datasink, [('coregistered_source', 'structural.@struct')
                                ]),
        (topup, datasink, [('out_fieldcoef', 'distortion.@fieldcoef')]),
        (topup, datasink, [('out_movpar', 'distortion.@movpar')]),
        (smooth, datasink, [('smoothed_files', 'functional.@smooth')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')])
    ])
    return workflow
def test_Dcm2nii_inputs():
    input_map = dict(
        anonymize=dict(
            argstr='-a',
            usedefault=True,
        ),
        args=dict(argstr='%s', ),
        collapse_folders=dict(
            argstr='-c',
            usedefault=True,
        ),
        config_file=dict(
            argstr='-b %s',
            genfile=True,
        ),
        convert_all_pars=dict(
            argstr='-v',
            usedefault=True,
        ),
        date_in_filename=dict(
            argstr='-d',
            usedefault=True,
        ),
        environ=dict(
            nohash=True,
            usedefault=True,
        ),
        events_in_filename=dict(
            argstr='-e',
            usedefault=True,
        ),
        gzip_output=dict(
            argstr='-g',
            usedefault=True,
        ),
        id_in_filename=dict(
            argstr='-i',
            usedefault=True,
        ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        nii_output=dict(
            argstr='-n',
            usedefault=True,
        ),
        output_dir=dict(
            argstr='-o %s',
            genfile=True,
        ),
        protocol_in_filename=dict(
            argstr='-p',
            usedefault=True,
        ),
        reorient=dict(argstr='-r', ),
        reorient_and_crop=dict(
            argstr='-x',
            usedefault=True,
        ),
        source_dir=dict(
            argstr='%s',
            mandatory=True,
            position=-1,
            xor=['source_names'],
        ),
        source_in_filename=dict(
            argstr='-f',
            usedefault=True,
        ),
        source_names=dict(
            argstr='%s',
            copyfile=False,
            mandatory=True,
            position=-1,
            xor=['source_dir'],
        ),
        spm_analyze=dict(
            argstr='-s',
            xor=['nii_output'],
        ),
        terminal_output=dict(
            mandatory=True,
            nohash=True,
        ),
    )
    inputs = Dcm2nii.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
### Grey Matter / White Matter post processing pipeline for T1/T2 weighted images
## based on commandline protocol

from nipype.interfaces.dcm2nii import Dcm2nii
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio

converter = Dcm2nii()

converter.inputs.source_names = ['E5466S1I1.DCM']

convert = pe.MapNode(interface=Dcm2nii(), name='conv', iterfield=['source_names'])

# skull stripping
bet = pe.MapNode(interface=fsl.BET(), name = 'bet', iterfield=['frac'])

bet.inputs.frac = [0.8, 0.5, 0.2]

# segmentation
fast = pe.MapNode(interface=fsl.FAST(), name='fast', iterfield=['in_files'])

# registration
flirt = pe.MapNode(interface=fsl.FLIRT(), name='flirt', iterfield=['in_file'])

ds = pe.Node(interface=nio.DataSink(), name="ds", iterfield=['in_files'])

ds.inputs.base_directory = '.'

workflow = pe.Workflow(name='T1_T2_Segmentation')
workflow.base_dir = './output'
Example #25
0
# <codecell>

from glob import glob
from nipype.interfaces.dcm2nii import Dcm2nii
from nipype.interfaces.fsl import DTIFit, BET

import nipype.pipeline.engine as pe

# <markdowncell>

# ### Version 2: Mapnodes

# <codecell>

convert = pe.MapNode(Dcm2nii(),
                     name='convert_dicom',
                     iterfield=['source_names'])
skull_stripper = pe.MapNode(BET(mask=True),
                            name='skull_stripper',
                            iterfield=['in_file'])
dtifit = pe.MapNode(DTIFit(),
                    name='dtifit',
                    iterfield=['dwi', 'bvals', 'bvecs', 'mask'])

convert_flow = pe.Workflow(name='convert_and_fit_mapnode')
convert_flow.connect([
    (convert, dtifit, [('converted_files', 'dwi'), ('bvals', 'bvals'),
                       ('bvecs', 'bvecs')]),
    (convert, skull_stripper, [('converted_files', 'in_file')]),
    (skull_stripper, dtifit, [('mask_file', 'mask')])
Example #26
0
# Convert the T1 mgz image to nifti format for later usage
mriConverter = Node(freesurfer.preprocess.MRIConvert(),
                    name='convertAparcAseg')
#convertT1.inputs.out_file = subPath + reconallFolderName + '/mri/aparc+aseg.nii.gz'
mriConverter.inputs.out_type = 'niigz'
mriConverter.inputs.out_orientation = 'RAS'

# ### Diffusion Data (dwMRI) preprocessing

# In[ ]:

# First extract the diffusion vectors and the pulse intensity (bvec and bval)
# Use dcm2nii for this task
#dwiFinderNode = t1FinderNode.clone('dwiFinder')

dcm2niiNode = Node(Dcm2nii(), name='dcm2niiAndBvecs')
dcm2niiNode.inputs.gzip_output = True
dcm2niiNode.inputs.date_in_filename = False
dcm2niiNode.inputs.events_in_filename = False

# Extract the first image of the DTI series i.e. the b0 image
extrctB0Node = Node(Function(input_names=['dwMriFile'],
                             output_names=['b0'],
                             function=extractB0),
                    name='Extract_b0')

# Perform the registration between subject T1 space and dwMRI space
bbregNode = Node(freesurfer.preprocess.BBRegister(), name='BBRegister')
bbregNode.inputs.init = "fsl"
bbregNode.inputs.contrast_type = "t2"
bbregNode.inputs.epi_mask = True
def create_gif_pseudoct_workflow(in_ute_echo2_file,
                                 in_ute_umap_dir,
                                 in_db_file,
                                 cpp_dir,
                                 in_t1_file=None,
                                 in_t2_file=None,
                                 in_mask_file=None,
                                 in_nac_pet_dir=None,
                                 name='gif_pseudoct'):
    """create_niftyseg_gif_propagation_pipeline.
    @param in_ute_echo2_file  input UTE echo file
    @param in_ute_umap_dir    input UTE umap file
    @param in_db_file         input database xml file for the GIF algorithm
    @param cpp_dir            cpp directory
    @param in_t1_file         input T1 target file
    @param in_t2_file         input T2 target file
    @param in_mask_file       optional input mask for the target T1 file
    @param name               optional name of the pipeline
    """

    in_file = in_t1_file if in_t1_file else in_t2_file
    subject_id = split_filename(os.path.basename(in_file))[1]

    workflow = pe.Workflow(name=name)
    workflow.base_output_dir = name

    gif = pe.Node(interface=Gif(database_file=in_db_file,
                                cpp_dir=cpp_dir,
                                lncc_ker=3,
                                regNMI=True,
                                regBE=0.01),
                  name='gif')
    if in_mask_file:
        gif.inputs.mask_file = in_mask_file

    # Create empty masks for the bias correction to cover the full image
    t1_full_mask = pe.Node(interface=niu.Function(input_names=['in_file'],
                                                  output_names=['out_file'],
                                                  function=create_full_mask),
                           name='t1_full_mask')
    t1_full_mask.inputs.in_file = in_t1_file
    t2_full_mask = pe.Node(interface=niu.Function(input_names=['in_file'],
                                                  output_names=['out_file'],
                                                  function=create_full_mask),
                           name='t2_full_mask')
    t2_full_mask.inputs.in_file = in_t2_file

    # Create bias correction nodes that are adapted to our needs. i.e. Boost the T2 bias correction
    bias_correction_t1 = pe.Node(interface=N4BiasCorrection(),
                                 name='bias_correction_t1')
    if in_t1_file:
        bias_correction_t1.inputs.in_file = in_t1_file

    # Create bias correction nodes that are adapted to our needs. i.e. Boost the T2 bias correction
    bias_correction_t2 = pe.Node(interface=N4BiasCorrection(
        in_maxiter=300, in_convergence=0.0001),
                                 name='bias_correction_t2')
    if in_t2_file:
        bias_correction_t2.inputs.in_file = in_t2_file

    # Only connect the nodes if the input image exist respectively
    if in_t1_file:
        workflow.connect(t1_full_mask, 'out_file', bias_correction_t1,
                         'mask_file')
    if in_t2_file:
        workflow.connect(t2_full_mask, 'out_file', bias_correction_t2,
                         'mask_file')

    if in_t1_file and in_t2_file:
        affine_mr_target = pe.Node(interface=niftyreg.RegAladin(maxit_val=10),
                                   name='affine_mr_target')
        workflow.connect(bias_correction_t1, 'out_file', affine_mr_target,
                         'ref_file')
        workflow.connect(bias_correction_t2, 'out_file', affine_mr_target,
                         'flo_file')
        resample_mr_target = pe.Node(
            interface=niftyreg.RegResample(pad_val=float('nan')),
            name='resample_MR_target')
        workflow.connect(bias_correction_t1, 'out_file', resample_mr_target,
                         'ref_file')
        workflow.connect(bias_correction_t2, 'out_file', resample_mr_target,
                         'flo_file')
        lister = pe.Node(interface=niu.Merge(2), name='lister')
        merger = pe.Node(interface=fsl.Merge(dimension='t',
                                             output_type='NIFTI_GZ'),
                         name='fsl_merge')
        workflow.connect(affine_mr_target, 'aff_file', resample_mr_target,
                         'trans_file')
        workflow.connect(bias_correction_t1, 'out_file', lister, 'in1')
        workflow.connect(resample_mr_target, 'out_file', lister, 'in2')
        workflow.connect(lister, 'out', merger, 'in_files')
        workflow.connect(merger, 'merged_file', gif, 'in_file')
    else:
        if in_t1_file:
            workflow.connect(bias_correction_t1, 'out_file', gif, 'in_file')
        if in_t2_file:
            workflow.connect(bias_correction_t2, 'out_file', gif, 'in_file')

    pct_hu_to_umap = pe.Node(interface=niu.Function(
        input_names=['pCT_file', 'structural_mri_file', 'ute_echo2_file'],
        output_names=['pct_umap_file'],
        function=convert_pct_hu_to_umap),
                             name='pCT_HU_to_umap')
    pct_hu_to_umap.inputs.structural_mri_file = in_file
    pct_hu_to_umap.inputs.ute_echo2_file = in_ute_echo2_file
    workflow.connect(gif, 'synth_file', pct_hu_to_umap, 'pCT_file')

    pct2dcm_pct_umap = pe.Node(interface=Pct2Dcm(in_umap_name='pCT_umap'),
                               name='pct2dcm_pct_umap')
    workflow.connect(pct_hu_to_umap, 'pct_umap_file', pct2dcm_pct_umap,
                     'in_umap_file')
    pct2dcm_pct_umap.inputs.in_ute_umap_dir = os.path.abspath(in_ute_umap_dir)

    merger_output_number = 2

    pct2dcm_ute_umap_end = None
    pct2dcm_pct_umap_end = None
    if in_nac_pet_dir:

        ute_umap_dcm2nii = pe.Node(
            interface=Dcm2nii(source_dir=in_ute_umap_dir),
            name='ute_umap_dcm2nii')
        first_item_selector = pe.Node(interface=niu.Select(index=0),
                                      name='first_item_selector')
        workflow.connect(ute_umap_dcm2nii, 'converted_files',
                         first_item_selector, 'inlist')

        nac_extractor = pe.Node(interface=niu.Function(
            input_names=['dicom_folder'],
            output_names=['nifti_file'],
            function=extract_nac_pet),
                                name='nac_extractor')
        nac_extractor.inputs.dicom_folder = in_nac_pet_dir

        ute_to_nac_registration = pe.Node(
            interface=niftyreg.RegAladin(rig_only_flag=True),
            name='ute_to_nac_registration')
        workflow.connect(nac_extractor, 'nifti_file', ute_to_nac_registration,
                         'ref_file')
        ute_to_nac_registration.inputs.flo_file = in_ute_echo2_file

        ute_resample = pe.Node(interface=niftyreg.RegResample(),
                               name='ute_resample')
        workflow.connect(first_item_selector, 'out', ute_resample, 'ref_file')
        workflow.connect(first_item_selector, 'out', ute_resample, 'flo_file')
        workflow.connect(ute_to_nac_registration, 'aff_file', ute_resample,
                         'aff_file')

        pct2dcm_ute_umap_end = pe.Node(
            interface=Pct2Dcm(in_umap_name='UTE_umap_end'),
            name='pct2dcm_ute_umap_end')
        workflow.connect(ute_resample, 'res_file', pct2dcm_ute_umap_end,
                         'in_umap_file')
        pct2dcm_ute_umap_end.inputs.in_ute_umap_dir = os.path.abspath(
            in_ute_umap_dir)

        pct_resample = pe.Node(interface=niftyreg.RegResample(),
                               name='pct_resample')
        workflow.connect(pct_hu_to_umap, 'pct_umap_file', pct_resample,
                         'ref_file')
        workflow.connect(pct_hu_to_umap, 'pct_umap_file', pct_resample,
                         'flo_file')
        workflow.connect(ute_to_nac_registration, 'aff_file', pct_resample,
                         'aff_file')

        pct2dcm_pct_umap_end = pe.Node(
            interface=Pct2Dcm(in_umap_name='pCT_umap_end'),
            name='pct2dcm_pct_umap_end')
        workflow.connect(pct_resample, 'res_file', pct2dcm_pct_umap_end,
                         'in_umap_file')
        pct2dcm_pct_umap_end.inputs.in_ute_umap_dir = os.path.abspath(
            in_ute_umap_dir)

        merger_output_number = 4

    # merge output
    output_merger = pe.Node(
        interface=niu.Merge(numinputs=merger_output_number),
        name='output_merger')
    workflow.connect(gif, 'synth_file', output_merger, 'in1')
    workflow.connect(pct2dcm_pct_umap, 'output_file', output_merger, 'in2')

    renamer = pe.Node(interface=niu.Rename(format_string=subject_id +
                                           "_%(type)s",
                                           keep_ext=True),
                      name='renamer')
    if in_nac_pet_dir:
        workflow.connect(pct2dcm_ute_umap_end, 'output_file', output_merger,
                         'in3')
        workflow.connect(pct2dcm_pct_umap_end, 'output_file', output_merger,
                         'in4')
        renamer.inputs.type = ['synth', 'pct', 'ute_end', 'pct_end']
    else:
        renamer.inputs.type = ['synth', 'pct']
    workflow.connect(output_merger, 'out', renamer, 'in_file')

    return workflow
Example #28
0
def test_Dcm2nii_inputs():
    input_map = dict(anonymize=dict(argstr='-a',
    usedefault=True,
    ),
    args=dict(argstr='%s',
    ),
    collapse_folders=dict(argstr='-c',
    usedefault=True,
    ),
    config_file=dict(argstr='-b %s',
    genfile=True,
    ),
    convert_all_pars=dict(argstr='-v',
    usedefault=True,
    ),
    date_in_filename=dict(argstr='-d',
    usedefault=True,
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    events_in_filename=dict(argstr='-e',
    usedefault=True,
    ),
    gzip_output=dict(argstr='-g',
    usedefault=True,
    ),
    id_in_filename=dict(argstr='-i',
    usedefault=True,
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    nii_output=dict(argstr='-n',
    usedefault=True,
    ),
    output_dir=dict(argstr='-o %s',
    genfile=True,
    ),
    protocol_in_filename=dict(argstr='-p',
    usedefault=True,
    ),
    reorient=dict(argstr='-r',
    ),
    reorient_and_crop=dict(argstr='-x',
    usedefault=True,
    ),
    source_dir=dict(argstr='%s',
    mandatory=True,
    position=-1,
    xor=['source_names'],
    ),
    source_in_filename=dict(argstr='-f',
    usedefault=True,
    ),
    source_names=dict(argstr='%s',
    copyfile=False,
    mandatory=True,
    position=-1,
    xor=['source_dir'],
    ),
    spm_analyze=dict(argstr='-s',
    xor=['nii_output'],
    ),
    terminal_output=dict(nohash=True,
    ),
    )
    inputs = Dcm2nii.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value