Example #1
0
def mgh2nii(filename, path_output, out_type="nii"):
    """
    This function converts a volume file from freesurfer mgh to nifti format.
    Inputs:
        *filename: full path of the input file.
        *path_outupt: path where output is written.
        *out_type: target type of file.
    
    created by Daniel Haenelt
    Date created: 06-01-2020             
    Last modified: 24-07-2020
    """
    import os
    from nipype.interfaces.freesurfer.preprocess import MRIConvert
    from lib.io.get_filename import get_filename

    # get filename
    path, name, ext = get_filename(filename)

    # convert volume to nifti format
    mc = MRIConvert()
    mc.inputs.in_file = filename
    mc.inputs.out_file = os.path.join(path_output, name + "." + out_type)
    mc.inputs.in_type = ext.replace('.', '')
    mc.inputs.out_type = out_type
    mc.run()
Example #2
0
def upsample_mask_tissues(in_file):
    """
    To upsample the 1,2,3 compartment segmented volume in SPM
    Args:
        in_file: a list containing the three compartments

    Returns:
    """
    import os
    from nipype.interfaces.freesurfer.preprocess import MRIConvert

    out_file = []

    if isinstance(in_file, basestring):  # just one compartment
        out_mask = os.path.basename(in_file).split('.nii.gz')[0] + '_upsample.nii.gz'
        mc = MRIConvert()
        mc.inputs.in_file = in_file
        mc.inputs.out_file = out_mask
        mc.inputs.vox_size = (1, 1, 1)
        mc.run()

        out_file.append(os.path.abspath(os.path.join(os.getcwd(), out_mask)))
    else: # more than one compartment
        for tissue in in_file:
            out_mask = os.path.basename(tissue).split('.nii.gz')[0] + '_upsample.nii.gz'
            mc = MRIConvert()
            mc.inputs.in_file = tissue
            mc.inputs.out_file = out_mask
            mc.inputs.vox_size = (1, 1, 1)
            mc.run()

            out_file.append(os.path.abspath(os.path.join(os.getcwd(), out_mask)))

    return out_file
Example #3
0
def preprocess(input_file,
               output_dir,
               conform=True,
               bias_correct=True,
               skullstrip=True):

    preprocess_flow = Workflow(name='preprocess', base_dir=output_dir)

    conform = Node(MRIConvert(conform=True,
                              out_type='niigz',
                              out_file='conformed.nii.gz'),
                   name='conform')
    n4 = Node(N4BiasFieldCorrection(dimension=3,
                                    bspline_fitting_distance=300,
                                    shrink_factor=3,
                                    n_iterations=[50, 50, 30, 20],
                                    output_image='n4.nii.gz'),
              name='n4')
    robex = Node(ROBEX(seed=1729, stripped_image='brain.nii.gz'), name='robex')

    preprocess_flow.connect([(conform, n4, [('out_file', 'input_image')]),
                             (n4, robex, [('output_image', 'input_image')])])

    preprocess_flow.write_graph(graph2use='orig')
    conform.inputs.in_file = input_file
    preprocess_flow.run('MultiProc', plugin_args={'n_procs': 5})
Example #4
0
def test_MRIConvert_outputs():
    output_map = dict(out_file=dict(), )
    outputs = MRIConvert.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
def test_MRIConvert_outputs():
    output_map = dict(out_file=dict(),
    )
    outputs = MRIConvert.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #6
0
def convert(items, anonymizer=None, symlink=True, converter=None):
    prov_files = []
    tmpdir = mkdtemp()

    for item in items:
        if isinstance(item[1], (list, tuple)):
            outtypes = item[1]
        else:
            outtypes = [item[1]]
        prefix = item[0]
        print('Converting %s' % prefix)
        dirname = os.path.dirname(prefix + '.ext')
        print(dirname)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        for outtype in outtypes:
            print(outtype)
            if outtype == 'dicom':
                dicomdir = prefix + '_dicom'
                if os.path.exists(dicomdir):
                    shutil.rmtree(dicomdir)
                os.mkdir(dicomdir)
                for filename in item[2]:
                    outfile = os.path.join(dicomdir, os.path.split(filename)[1])
                    if not os.path.islink(outfile):
                        if symlink:
                            os.symlink(filename, outfile)
                        else:
                            os.link(filename, outfile)
            elif outtype in ['nii', 'nii.gz']:
                outname = prefix + '.' + outtype
                scaninfo = prefix + '_scaninfo.json'
                if not os.path.exists(outname):
                    from nipype import config
                    config.enable_provenance()
                    from nipype import Function, Node
                    from nipype.interfaces.base import isdefined
                    print converter
                    if converter == 'mri_convert':
                        from nipype.interfaces.freesurfer.preprocess import MRIConvert
                        convertnode = Node(MRIConvert(), name = 'convert')
                        convertnode.base_dir = tmpdir
                        if outtype == 'nii.gz':
                            convertnode.inputs.out_type = 'niigz'
                        convertnode.inputs.in_file = item[2][0]
                        convertnode.inputs.out_file = outname
                        #cmd = 'mri_convert %s %s' % (item[2][0], outname)
                        #print(cmd)
                        #os.system(cmd)
                        res=convertnode.run()
                    elif converter == 'dcm2nii':
                        from nipype.interfaces.dcm2nii import Dcm2nii
                        convertnode = Node(Dcm2nii(), name='convert')
                        convertnode.base_dir = tmpdir
                        convertnode.inputs.source_names = item[2]
                        convertnode.inputs.gzip_output = outtype == 'nii.gz'
                        convertnode.inputs.terminal_output = 'allatonce'
                        res = convertnode.run()
                        if isinstance(res.outputs.converted_files, list):
                            print("Cannot convert dicom files - series likely has multiple orientations: ", item[2])
                            continue
                        else:
                            shutil.copyfile(res.outputs.converted_files, outname)
                        if isdefined(res.outputs.bvecs):
                            outname_bvecs = prefix + '.bvecs'
                            outname_bvals = prefix + '.bvals'
                            shutil.copyfile(res.outputs.bvecs, outname_bvecs)
                            shutil.copyfile(res.outputs.bvals, outname_bvals)
                    prov_file = prefix + '_prov.ttl'
                    shutil.copyfile(os.path.join(convertnode.base_dir,
                                                 convertnode.name,
                                                 'provenance.ttl'),
                                    prov_file)
                    prov_files.append(prov_file)
                    embedfunc = Node(Function(input_names=['dcmfiles',
                                                           'niftifile',
                                                           'infofile',
                                                           'force'],
                                              output_names=['outfile',
                                                            'meta'],
                                              function=embed_nifti),
                                     name='embedder')
                    embedfunc.inputs.dcmfiles = item[2]
                    embedfunc.inputs.niftifile = outname

                    embedfunc.inputs.infofile = scaninfo
                    embedfunc.inputs.force = True
                    embedfunc.base_dir = tmpdir
                    res = embedfunc.run()
                    g = res.provenance.rdf()
                    g.parse(prov_file,
                            format='turtle')
                    g.serialize(prov_file, format='turtle')
                    #out_file, meta_dict = embed_nifti(item[2], outname, force=True)
                    os.chmod(outname, 0440)
                    os.chmod(scaninfo, 0440)
                    os.chmod(prov_file, 0440)
    
    shutil.rmtree(tmpdir)
Example #7
0
def psacnn_workflow(input_file,
                    output_dir,
                    use_preprocess=True,
                    model_file=None,
                    contrast='t1w',
                    use_gpu=True,
                    gpu_id=0,
                    save_label_image=False,
                    save_prob_image=False,
                    patch_size=96,
                    batch_size=4,
                    sample_rate=20000):

    subprocess.call(['mkdir', '-p', output_dir])
    if use_gpu == False:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
        gpu_id = -1
        batch_size = 16
        sample_rate = 40000
    else:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
        batch_size = 4
        sample_rate = 20000

    if use_preprocess == True:
        preprocess_flow = Workflow(name='preprocess', base_dir=output_dir)

        conform = Node(MRIConvert(conform=True,
                                  out_type='niigz',
                                  out_file='conformed.nii.gz'),
                       name='conform')
        n4 = Node(N4BiasFieldCorrection(dimension=3,
                                        bspline_fitting_distance=300,
                                        shrink_factor=3,
                                        n_iterations=[50, 50, 30, 20],
                                        output_image='n4.nii.gz'),
                  name='n4')
        robex = Node(ROBEX(seed=1729, stripped_image='brain.nii.gz'),
                     name='robex')

        psacnn = Node(PSACNN(output_dir=output_dir,
                             contrast=contrast,
                             patch_size=patch_size,
                             batch_size=batch_size,
                             save_label_image=save_label_image,
                             save_prob_image=save_prob_image,
                             sample_rate=sample_rate),
                      name='psacnn')

        preprocess_flow.connect([
            (conform, n4, [('out_file', 'input_image')]),
            (n4, robex, [('output_image', 'input_image')]),
            (robex, psacnn, [('stripped_image', 'input_image')])
        ])

        preprocess_flow.write_graph(graph2use='orig')
        conform.inputs.in_file = input_file
        preprocess_flow.run('MultiProc', plugin_args={'n_procs': 16})
    else:

        psacnn = PSACNN(input_image=input_file,
                        output_dir=output_dir,
                        contrast=contrast,
                        patch_size=patch_size,
                        batch_size=batch_size,
                        save_label_image=save_label_image,
                        save_prob_image=save_prob_image,
                        sample_rate=sample_rate)
        # psacnn.inputs.input_image = input_file
        # psacnn.inputs.output_dir = output_dir
        # psacnn.inputs.contrast = contrast
        # psacnn.inputs.patch_size = patch_size
        # psacnn.inputs.batch_size = batch_size
        # psacnn.inputs.save_label_image = save_label_image
        # psacnn.inputs.save_prob_image = save_prob_image
        # psacnn.inputs.sample_rate = sample_rate

        psacnn.run()
def test_MRIConvert_inputs():
    input_map = dict(apply_inv_transform=dict(argstr='--apply_inverse_transform %s',
    ),
    apply_transform=dict(argstr='--apply_transform %s',
    ),
    args=dict(argstr='%s',
    ),
    ascii=dict(argstr='--ascii',
    ),
    autoalign_matrix=dict(argstr='--autoalign %s',
    ),
    color_file=dict(argstr='--color_file %s',
    ),
    conform=dict(argstr='--conform',
    ),
    conform_min=dict(argstr='--conform_min',
    ),
    conform_size=dict(argstr='--conform_size %s',
    ),
    crop_center=dict(argstr='--crop %d %d %d',
    ),
    crop_gdf=dict(argstr='--crop_gdf',
    ),
    crop_size=dict(argstr='--cropsize %d %d %d',
    ),
    cut_ends=dict(argstr='--cutends %d',
    ),
    devolve_transform=dict(argstr='--devolvexfm %s',
    ),
    drop_n=dict(argstr='--ndrop %d',
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    fill_parcellation=dict(argstr='--fill_parcellation',
    ),
    force_ras=dict(argstr='--force_ras_good',
    ),
    frame=dict(argstr='--frame %d',
    ),
    frame_subsample=dict(argstr='--fsubsample %d %d %d',
    ),
    fwhm=dict(argstr='--fwhm %f',
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    in_center=dict(argstr='--in_center %s',
    ),
    in_file=dict(argstr='--input_volume %s',
    mandatory=True,
    position=-2,
    ),
    in_i_dir=dict(argstr='--in_i_direction %f %f %f',
    ),
    in_i_size=dict(argstr='--in_i_size %d',
    ),
    in_info=dict(argstr='--in_info',
    ),
    in_j_dir=dict(argstr='--in_j_direction %f %f %f',
    ),
    in_j_size=dict(argstr='--in_j_size %d',
    ),
    in_k_dir=dict(argstr='--in_k_direction %f %f %f',
    ),
    in_k_size=dict(argstr='--in_k_size %d',
    ),
    in_like=dict(argstr='--in_like %s',
    ),
    in_matrix=dict(argstr='--in_matrix',
    ),
    in_orientation=dict(argstr='--in_orientation %s',
    ),
    in_scale=dict(argstr='--scale %f',
    ),
    in_stats=dict(argstr='--in_stats',
    ),
    in_type=dict(argstr='--in_type %s',
    ),
    invert_contrast=dict(argstr='--invert_contrast %f',
    ),
    midframe=dict(argstr='--mid-frame',
    ),
    no_change=dict(argstr='--nochange',
    ),
    no_scale=dict(argstr='--no_scale 1',
    ),
    no_translate=dict(argstr='--no_translate',
    ),
    no_write=dict(argstr='--no_write',
    ),
    out_center=dict(argstr='--out_center %f %f %f',
    ),
    out_datatype=dict(argstr='--out_data_type %s',
    ),
    out_file=dict(argstr='--output_volume %s',
    genfile=True,
    position=-1,
    ),
    out_i_count=dict(argstr='--out_i_count %d',
    ),
    out_i_dir=dict(argstr='--out_i_direction %f %f %f',
    ),
    out_i_size=dict(argstr='--out_i_size %d',
    ),
    out_info=dict(argstr='--out_info',
    ),
    out_j_count=dict(argstr='--out_j_count %d',
    ),
    out_j_dir=dict(argstr='--out_j_direction %f %f %f',
    ),
    out_j_size=dict(argstr='--out_j_size %d',
    ),
    out_k_count=dict(argstr='--out_k_count %d',
    ),
    out_k_dir=dict(argstr='--out_k_direction %f %f %f',
    ),
    out_k_size=dict(argstr='--out_k_size %d',
    ),
    out_matrix=dict(argstr='--out_matrix',
    ),
    out_orientation=dict(argstr='--out_orientation %s',
    ),
    out_scale=dict(argstr='--out-scale %d',
    ),
    out_stats=dict(argstr='--out_stats',
    ),
    out_type=dict(argstr='--out_type %s',
    ),
    parse_only=dict(argstr='--parse_only',
    ),
    read_only=dict(argstr='--read_only',
    ),
    reorder=dict(argstr='--reorder %d %d %d',
    ),
    resample_type=dict(argstr='--resample_type %s',
    ),
    reslice_like=dict(argstr='--reslice_like %s',
    ),
    sdcm_list=dict(argstr='--sdcmlist %s',
    ),
    skip_n=dict(argstr='--nskip %d',
    ),
    slice_bias=dict(argstr='--slice-bias %f',
    ),
    slice_crop=dict(argstr='--slice-crop %d %d',
    ),
    slice_reverse=dict(argstr='--slice-reverse',
    ),
    smooth_parcellation=dict(argstr='--smooth_parcellation',
    ),
    sphinx=dict(argstr='--sphinx',
    ),
    split=dict(argstr='--split',
    ),
    status_file=dict(argstr='--status %s',
    ),
    subject_name=dict(argstr='--subject_name %s',
    ),
    subjects_dir=dict(),
    template_info=dict(),
    template_type=dict(argstr='--template_type %s',
    ),
    terminal_output=dict(nohash=True,
    ),
    unwarp_gradient=dict(argstr='--unwarp_gradient_nonlinearity',
    ),
    vox_size=dict(argstr='-voxsize %f %f %f',
    ),
    zero_ge_z_offset=dict(argstr='--zero_ge_z_offset',
    ),
    zero_outlines=dict(argstr='--zero_outlines',
    ),
    )
    inputs = MRIConvert.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Example #9
0
def create_t1_based_unwarp(name='unwarp'):
    """
    Unwarp an fMRI time series based on non-linear registration to T1.
        NOTE: AS IT STANDS THIS METHOD DID NOT PRODUCE ACCEPTABLE RESULTS
        IF BRAIN COVERAGE IS NOT COMPLETE ON THE EPI IMAGE.
        ALSO: NEED TO ADD AUTOMATIC READING OF EPI RESOLUTION TO GET

    """

    unwarpflow = pe.Workflow(name=name)
    inputnode = pe.Node(
        interface=util.IdentityInterface(fields=['epi', 'T1W']),
        name='inputspec')
    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=['unwarped_func', 'warp_files']),
                         name='outputspec')

    tmedian = pe.Node(interface=ImageMaths(), name='tmedian')
    tmedian.inputs.op_string = '-Tmedian'
    epi_brain_ext = pe.Node(interface=util.Function(
        function=epi_brain_extract,
        input_names=['in_file'],
        output_names=['out_vol', 'out_mask']),
                            name='epi_brain_ext')

    fast_debias = pe.Node(interface=FAST(), name='FAST_debias')
    fast_debias.inputs.output_biascorrected = True

    robex = pe.Node(interface=util.Function(
        function=my_robex,
        input_names=['in_file'],
        output_names=['out_file', 'out_mask']),
                    name='robex')

    downsample_T1 = pe.Node(MRIConvert(), name='downsample_dti')
    downsample_T1.inputs.vox_size = (3.438, 3.438, 3.000)
    downsample_T1.inputs.out_type = 'niigz'

    contrast_invert = pe.Node(interface=util.Function(
        function=invert_contrast,
        input_names=['in_t1_brain', 'in_b0_brain'],
        output_names=['out_fn']),
                              name='contrast_invert')

    ants_syn = pe.Node(interface=util.Function(
        function=my_ants_registration_syn,
        input_names=['in_T1W', 'in_epi'],
        output_names=['out_transforms']),
                       name='ants_syn')
    ants_warp = pe.Node(interface=WarpTimeSeriesImageMultiTransform(),
                        name='ants_warp')
    '''connections'''
    # unwarpflow.connect(inputnode, 'T1W', robex, 'in_file')
    unwarpflow.connect(inputnode, 'T1W', fast_debias, 'in_files')
    # unwarpflow.connect(robex, 'out_file', fast_debias, 'in_files')
    unwarpflow.connect(fast_debias, 'restored_image', robex, 'in_file')
    # unwarpflow.connect(fast_debias, 'restored_image', downsample_T1, 'in_file')
    unwarpflow.connect(robex, 'out_file', downsample_T1, 'in_file')
    unwarpflow.connect(downsample_T1, 'out_file', contrast_invert,
                       'in_t1_brain')
    unwarpflow.connect(inputnode, 'epi', tmedian, 'in_file')
    unwarpflow.connect(tmedian, 'out_file', epi_brain_ext, 'in_file')
    unwarpflow.connect(epi_brain_ext, 'out_vol', contrast_invert,
                       'in_b0_brain')
    unwarpflow.connect(contrast_invert, 'out_fn', ants_syn, 'in_T1W')
    unwarpflow.connect(epi_brain_ext, 'out_vol', ants_syn, 'in_epi')
    unwarpflow.connect(ants_syn, 'out_transforms', outputnode,
                       'out_transforms')

    unwarpflow.connect(inputnode, 'epi', ants_warp, 'input_image')
    unwarpflow.connect(contrast_invert, 'out_fn', ants_warp, 'reference_image')
    unwarpflow.connect(ants_syn, 'out_transforms', ants_warp,
                       'transformation_series')

    unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'warp_files')
    unwarpflow.connect(ants_warp, 'output_image', outputnode, 'unwarped_func')

    return unwarpflow
Example #10
0
def get_flash2orig(file_flash,
                   file_inv2,
                   file_orig,
                   path_output,
                   cleanup=False):
    """
    This function computes the deformation field for the registration between a partial coverage
    GRE image and the freesurfer orig file. The following steps are performed: (1) set output 
    folder structure, (2) get scanner transform GRE <-> inv2 and inv2 -> orig, (3) generate flash
    cmap, (4) apply scanner transform inv2 -> GRE, (5) get flirt registration GRE -> inv2, (6) apply
    flirt to GRE cmap, (7) apply scanner transform to GRE cmap, (8) apply final deformation to GRE.
    The function needs the FSL environment set.
    Inputs:
        *file_flash: input path for GRE image.
        *file_inv2: input path for MP2RAGE INV2 image.
        *file_orig: input path for freesurfer orig image.
        *path_output: path where output is saved.
        *cleanup: delete intermediate files (boolean).
    
    created by Daniel Haenelt
    Date created: 18-04-2019
    Last modified: 16-05-2019
    """
    import os
    import shutil as sh
    from nipype.interfaces.fsl import FLIRT
    from nipype.interfaces.fsl.preprocess import ApplyXFM
    from nipype.interfaces.freesurfer.preprocess import MRIConvert
    from nighres.registration import apply_coordinate_mappings
    from lib.cmap import generate_coordinate_mapping
    from lib.registration.get_scanner_transform import get_scanner_transform
    """
    set folder structure
    """
    path_temp = os.path.join(path_output, "temp")

    if not os.path.exists(path_output):
        os.makedirs(path_output)

    if not os.path.exists(path_temp):
        os.makedirs(path_temp)

    # copy input files
    sh.copyfile(file_inv2, os.path.join(path_temp, "inv2.nii"))
    sh.copyfile(file_flash, os.path.join(path_temp, "flash.nii"))
    """
    convert orig to nifti
    """
    mc = MRIConvert()
    mc.inputs.in_file = file_orig
    mc.inputs.out_file = os.path.join(path_temp, "orig.nii")
    mc.inputs.in_type = "mgz"
    mc.inputs.out_type = "nii"
    mc.run()
    """
    scanner transformation
    """
    get_scanner_transform(os.path.join(path_temp, "inv2.nii"),
                          os.path.join(path_temp, "flash.nii"), path_temp,
                          False)
    get_scanner_transform(os.path.join(path_temp, "flash.nii"),
                          os.path.join(path_temp, "inv2.nii"), path_temp,
                          False)
    get_scanner_transform(os.path.join(path_temp, "inv2.nii"),
                          os.path.join(path_temp, "orig.nii"), path_temp,
                          False)
    """
    generate coordinate mapping
    """
    generate_coordinate_mapping(os.path.join(path_temp, "flash.nii"), 0,
                                path_temp, "flash", False, True)
    """
    scanner transform inv2 to flash
    """
    apply_coordinate_mappings(
        os.path.join(path_temp, "inv2.nii"),  # input 
        os.path.join(path_temp, "inv2_2_flash_scanner.nii"),  # cmap
        interpolation="linear",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_temp,  # output directory
        file_name=
        "inv2_apply_scanner"  # base name with file extension for output
    )
    """
    flirt flash to inv2
    """
    os.chdir(path_temp)
    flirt = FLIRT()
    flirt.inputs.cost_func = "mutualinfo"
    flirt.inputs.dof = 6
    flirt.inputs.interp = "trilinear"  # trilinear, nearestneighbour, sinc or spline
    flirt.inputs.in_file = os.path.join(path_temp, "flash.nii")
    flirt.inputs.reference = os.path.join(path_temp,
                                          "inv2_apply_scanner_def-img.nii.gz")
    flirt.inputs.output_type = "NIFTI_GZ"
    flirt.inputs.out_file = os.path.join(path_temp,
                                         "flash_apply_flirt_def-img.nii.gz")
    flirt.inputs.out_matrix_file = os.path.join(path_temp, "flirt_matrix.txt")
    flirt.run()
    """
    apply flirt to flash cmap
    """
    applyxfm = ApplyXFM()
    applyxfm.inputs.in_file = os.path.join(path_temp, "cmap_flash.nii")
    applyxfm.inputs.reference = os.path.join(path_temp, "flash.nii")
    applyxfm.inputs.in_matrix_file = os.path.join(path_temp,
                                                  "flirt_matrix.txt")
    applyxfm.inputs.interp = "trilinear"
    applyxfm.inputs.padding_size = 0
    applyxfm.inputs.output_type = "NIFTI_GZ"
    applyxfm.inputs.out_file = os.path.join(path_temp,
                                            "cmap_apply_flirt_def-img.nii.gz")
    applyxfm.inputs.apply_xfm = True
    applyxfm.run()
    """
    apply scanner transform to flash cmap
    """
    apply_coordinate_mappings(
        os.path.join(path_temp, "cmap_apply_flirt_def-img.nii.gz"),
        os.path.join(path_temp, "flash_2_inv2_scanner.nii"),  # cmap 1
        os.path.join(path_temp, "inv2_2_orig_scanner.nii"),  # cmap 2
        interpolation="linear",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_temp,  # output directory
        file_name=
        "cmap_apply_scanner"  # base name with file extension for output
    )
    """
    apply deformation to source image
    """
    apply_coordinate_mappings(
        os.path.join(path_temp, "flash.nii"),  # input 
        os.path.join(path_temp, "cmap_apply_scanner_def-img.nii.gz"),
        interpolation="linear",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_temp,  # output directory
        file_name=
        "flash_apply_deformation"  # base name with file extension for output
    )

    # rename final deformation examples
    os.rename(os.path.join(path_temp, "cmap_apply_scanner_def-img.nii.gz"),
              os.path.join(path_output, "flash2orig.nii.gz"))
    os.rename(
        os.path.join(path_temp, "flash_apply_deformation_def-img.nii.gz"),
        os.path.join(path_output, "flash2orig_example.nii.gz"))

    # clean intermediate files
    if cleanup:
        sh.rmtree(path_temp, ignore_errors=True)
Example #11
0
    datasink = Node(DataSink(base_directory=t2dir, container=output_dir),
                    name="datasink")
    substitutions = [('_subject_id_', '')]
    datasink.inputs.substitutions = substitutions

    # t2dir = '/autofs/space/bhim_001/users/aj660/PSACNN/data/IXI/T2/'
    # t2_file_list = sorted(glob.glob(os.path.join(t1dir, '*T2.nii.gz')))
    #
    # input_file = '/autofs/space/bhim_001/users/aj660/PSACNN/data/IXI/T2/IXI002-Guys-0828-T2.nii.gz'
    # output_dir = '/autofs/space/bhim_001/users/aj660/psacnn_brain_segmentation/test_output/IXI002-Guys-0828-T2'
    # subprocess.call(['mkdir', '-p', output_dir])

    preprocess_flow = Workflow(name='preprocess', base_dir=output_dir)

    conform = Node(MRIConvert(conform=True,
                              out_type='niigz',
                              out_file='conformed.nii.gz'),
                   name='conform')
    n4 = Node(N4BiasFieldCorrection(dimension=3,
                                    bspline_fitting_distance=300,
                                    shrink_factor=3,
                                    n_iterations=[50, 50, 30, 20],
                                    output_image='n4.nii.gz'),
              name='n4')
    robex = Node(ROBEX(seed=1729, stripped_image='brain.nii.gz'), name='robex')

    preprocess_flow.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id')]),
        (selectfiles, conform, [('anat', 'in_file')]),
        (conform, n4, [('out_file', 'input_image')]),
        (n4, robex, [('output_image', 'input_image')]),
Example #12
0
def get_nuisance_mask(input,
                      pathSPM,
                      deformation,
                      path_output,
                      nerode_white=1,
                      nerode_csf=1,
                      segmentation=True,
                      cleanup=True):
    """
    This function calculates WM and CSF masks in space of the functional time series. It uses SPM
    to compute WM and CSF probability maps. These maps are masked with a skullstrip mask and 
    transformed to native epi space.
    Inputs:
        *input: input anatomy (orig.mgz).
        *pathSPM: path to spm toolbox.
        *deformation: coordinate mapping for ana to epi transformation.
        *path_output: path where output is saved.
        *nerode_white: number of wm mask eroding steps.
        *nerode_csf: number of csf mask eroding steps.
        *segmentation: do not calculate new masks to not rerun everything.
        *cleanup: delete intermediate files.

    created by Daniel Haenelt
    Date created: 01-03-2019
    Last modified: 01-03-2019
    """
    import os
    import shutil as sh
    import nibabel as nb
    from scipy.ndimage.morphology import binary_erosion
    from nipype.interfaces.fsl import BET
    from nipype.interfaces.freesurfer.preprocess import MRIConvert
    from nighres.registration import apply_coordinate_mappings
    from lib.skullstrip.skullstrip_spm12 import skullstrip_spm12

    # make output folder
    if not os.path.exists(path_output):
        os.mkdir(path_output)

    # get filename without file extension of input file
    file = os.path.splitext(os.path.basename(input))[0]

    # convert to nifti format
    mc = MRIConvert()
    mc.inputs.in_file = input
    mc.inputs.out_file = os.path.join(path_output, file + ".nii")
    mc.inputs.out_type = "nii"
    mc.run()

    # bet skullstrip mask
    btr = BET()
    btr.inputs.in_file = os.path.join(path_output, file + ".nii")
    btr.inputs.frac = 0.5
    btr.inputs.mask = True
    btr.inputs.no_output = True
    btr.inputs.out_file = os.path.join(path_output, "bet")
    btr.inputs.output_type = "NIFTI"
    btr.run()

    # segmentation
    if segmentation:
        skullstrip_spm12(os.path.join(path_output, file + ".nii"), pathSPM,
                         path_output)

    # load tissue maps
    wm_array = nb.load(os.path.join(path_output, "skull",
                                    "c2" + file + ".nii")).get_fdata()
    csf_array = nb.load(
        os.path.join(path_output, "skull", "c3" + file + ".nii")).get_fdata()
    mask_array = nb.load(os.path.join(path_output, "bet_mask.nii")).get_fdata()

    # binarize
    wm_array[wm_array > 0] = 1
    csf_array[csf_array > 0] = 1

    # apply brain mask
    wm_array = wm_array * mask_array
    csf_array = csf_array * mask_array

    # erode wm
    wm_array = binary_erosion(
        wm_array,
        structure=None,
        iterations=nerode_white,
        mask=None,
        output=None,
        border_value=0,
        origin=0,
        brute_force=False,
    )

    # erode csf
    csf_array = binary_erosion(
        csf_array,
        structure=None,
        iterations=nerode_csf,
        mask=None,
        output=None,
        border_value=0,
        origin=0,
        brute_force=False,
    )

    # write wm and csf mask
    data_img = nb.load(input)
    wm_out = nb.Nifti1Image(wm_array, data_img.affine, data_img.header)
    nb.save(wm_out, os.path.join(path_output, "wm_mask_orig.nii"))
    csf_out = nb.Nifti1Image(csf_array, data_img.affine, data_img.header)
    nb.save(csf_out, os.path.join(path_output, "csf_mask_orig.nii"))

    # apply deformation to mask
    apply_coordinate_mappings(
        os.path.join(path_output, "wm_mask_orig.nii"),  # input 
        deformation,  # cmap
        interpolation="nearest",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_output,  # output directory
        file_name="wm_mask"  # base name with file extension for output
    )

    apply_coordinate_mappings(
        os.path.join(path_output, "csf_mask_orig.nii"),  # input 
        deformation,  # cmap
        interpolation="nearest",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_output,  # output directory
        file_name="csf_mask"  # base name with file extension for output
    )

    # rename transformed masks
    os.rename(os.path.join(path_output, "wm_mask_def-img.nii.gz"),
              os.path.join(path_output, "wm_mask.nii.gz"))
    os.rename(os.path.join(path_output, "csf_mask_def-img.nii.gz"),
              os.path.join(path_output, "csf_mask.nii.gz"))

    # cleanup
    if cleanup:
        os.remove(os.path.join(path_output, "bet_mask.nii"))
        os.remove(os.path.join(path_output, "csf_mask_orig.nii"))
        os.remove(os.path.join(path_output, "wm_mask_orig.nii"))
        os.remove(os.path.join(path_output, "orig.nii"))
        sh.rmtree(os.path.join(path_output, "skull"), ignore_errors=True)
Example #13
0
def test_MRIConvert_inputs():
    input_map = dict(
        apply_inv_transform=dict(argstr='--apply_inverse_transform %s', ),
        apply_transform=dict(argstr='--apply_transform %s', ),
        args=dict(argstr='%s', ),
        ascii=dict(argstr='--ascii', ),
        autoalign_matrix=dict(argstr='--autoalign %s', ),
        color_file=dict(argstr='--color_file %s', ),
        conform=dict(argstr='--conform', ),
        conform_min=dict(argstr='--conform_min', ),
        conform_size=dict(argstr='--conform_size %s', ),
        crop_center=dict(argstr='--crop %d %d %d', ),
        crop_gdf=dict(argstr='--crop_gdf', ),
        crop_size=dict(argstr='--cropsize %d %d %d', ),
        cut_ends=dict(argstr='--cutends %d', ),
        devolve_transform=dict(argstr='--devolvexfm %s', ),
        drop_n=dict(argstr='--ndrop %d', ),
        environ=dict(
            nohash=True,
            usedefault=True,
        ),
        fill_parcellation=dict(argstr='--fill_parcellation', ),
        force_ras=dict(argstr='--force_ras_good', ),
        frame=dict(argstr='--frame %d', ),
        frame_subsample=dict(argstr='--fsubsample %d %d %d', ),
        fwhm=dict(argstr='--fwhm %f', ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        in_center=dict(argstr='--in_center %s', ),
        in_file=dict(
            argstr='--input_volume %s',
            mandatory=True,
            position=-2,
        ),
        in_i_dir=dict(argstr='--in_i_direction %f %f %f', ),
        in_i_size=dict(argstr='--in_i_size %d', ),
        in_info=dict(argstr='--in_info', ),
        in_j_dir=dict(argstr='--in_j_direction %f %f %f', ),
        in_j_size=dict(argstr='--in_j_size %d', ),
        in_k_dir=dict(argstr='--in_k_direction %f %f %f', ),
        in_k_size=dict(argstr='--in_k_size %d', ),
        in_like=dict(argstr='--in_like %s', ),
        in_matrix=dict(argstr='--in_matrix', ),
        in_orientation=dict(argstr='--in_orientation %s', ),
        in_scale=dict(argstr='--scale %f', ),
        in_stats=dict(argstr='--in_stats', ),
        in_type=dict(argstr='--in_type %s', ),
        invert_contrast=dict(argstr='--invert_contrast %f', ),
        midframe=dict(argstr='--mid-frame', ),
        no_change=dict(argstr='--nochange', ),
        no_scale=dict(argstr='--no_scale 1', ),
        no_translate=dict(argstr='--no_translate', ),
        no_write=dict(argstr='--no_write', ),
        out_center=dict(argstr='--out_center %f %f %f', ),
        out_datatype=dict(argstr='--out_data_type %s', ),
        out_file=dict(
            argstr='--output_volume %s',
            genfile=True,
            position=-1,
        ),
        out_i_count=dict(argstr='--out_i_count %d', ),
        out_i_dir=dict(argstr='--out_i_direction %f %f %f', ),
        out_i_size=dict(argstr='--out_i_size %d', ),
        out_info=dict(argstr='--out_info', ),
        out_j_count=dict(argstr='--out_j_count %d', ),
        out_j_dir=dict(argstr='--out_j_direction %f %f %f', ),
        out_j_size=dict(argstr='--out_j_size %d', ),
        out_k_count=dict(argstr='--out_k_count %d', ),
        out_k_dir=dict(argstr='--out_k_direction %f %f %f', ),
        out_k_size=dict(argstr='--out_k_size %d', ),
        out_matrix=dict(argstr='--out_matrix', ),
        out_orientation=dict(argstr='--out_orientation %s', ),
        out_scale=dict(argstr='--out-scale %d', ),
        out_stats=dict(argstr='--out_stats', ),
        out_type=dict(argstr='--out_type %s', ),
        parse_only=dict(argstr='--parse_only', ),
        read_only=dict(argstr='--read_only', ),
        reorder=dict(argstr='--reorder %d %d %d', ),
        resample_type=dict(argstr='--resample_type %s', ),
        reslice_like=dict(argstr='--reslice_like %s', ),
        sdcm_list=dict(argstr='--sdcmlist %s', ),
        skip_n=dict(argstr='--nskip %d', ),
        slice_bias=dict(argstr='--slice-bias %f', ),
        slice_crop=dict(argstr='--slice-crop %d %d', ),
        slice_reverse=dict(argstr='--slice-reverse', ),
        smooth_parcellation=dict(argstr='--smooth_parcellation', ),
        sphinx=dict(argstr='--sphinx', ),
        split=dict(argstr='--split', ),
        status_file=dict(argstr='--status %s', ),
        subject_name=dict(argstr='--subject_name %s', ),
        subjects_dir=dict(),
        template_info=dict(),
        template_type=dict(argstr='--template_type %s', ),
        terminal_output=dict(nohash=True, ),
        unwarp_gradient=dict(argstr='--unwarp_gradient_nonlinearity', ),
        vox_size=dict(argstr='-voxsize %f %f %f', ),
        zero_ge_z_offset=dict(argstr='--zero_ge_z_offset', ),
        zero_outlines=dict(argstr='--zero_outlines', ),
    )
    inputs = MRIConvert.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Example #14
0
def t1_b0_registration(
        participant_id, session_id,
        caps_directory, working_directory=None,
        name="t1_b0_registration"):
    """
    Perform rigid registration of the T1-weighted image onto the B0 image.

    Given a subject, this pipelines performs a registration between its
    T1-weighted image onto its 1mm upsampled B0 image. Once done, the estimated
    transformation matrix is used to align the binary mask of the segmentation
    of the white matter (obtained with FSL fast) from the anatomical space to
    the diffusion space. The same operation is performed on the Desikan and
    Destrieux parcellation (obtained with FreeSurfer recon-all) except that
    these latter are not resliced on the B0 image.

    These steps enable to prepare the data for the DWI processing pipelines.

    Args:
         participant_id (str): Subject (participant) ID in a BIDS format
            ('sub-<participant_label>').
         session_id (str): Session ID in a BIDS format ('ses-<session_label>').
         caps_directory (str): Directory where the results are stored
            in a CAPS hierarchy.
         working_directory (Optional[str]): Directory where the temporary
            results are stored. If not specified, it is automatically generated
            (generally in /tmp/).
         name (Optional[str]): Name of the pipelines.

    Inputnode:
        in_bias_corrected_bet_t1 (str): File containing the bias corrected
            brain extracted T1-weighted image. It corresponds to the
            out_brain_extracted file from FSL-T1 pipelines.
        in_preprocessed_dwi (str): File containing the preprocessed DWI
            dataset. It assumes that the reference b0 is the first volume in
            the dataset (which is the case if you are using Clinica).
        in_b0_mask (str): File containing the mask of the b0 image. It assumes
            that `in_b0_mask` has the same header as `in_preprocessed_dwi`
            (which is the case if you are using Clinica).
        in_white_matter_binary_mask (str): File containing the binary
            segmentation of the white matter (obtained with FSL fast). You can
            use the one generated by FreeSurfer recon-all but you must convert
            your image in FreeSurfer space to Native space first.
        in_desikan_parcellation (str): File containing the Desikan parcellation
            (obtained with FreeSurfer recon-all). The file is usually located
            in ${participant_id}/mri/aparc+aseg.mgz.
        in_destrieux_parcellation (str): File containing the Destrieux
            parcellation (obtained with FreeSurfer recon-all). The file is
            usually located in ${participant_id}/mri/aparc.a2009s+aseg.mgz.

    Outputnode:
        out_registered_t1 (str): File containing the registration of the
            T1-weighted image onto the diffusion space.
        out_flirt_matrix (str): File containing the transformation matrix
            estimated by FSL flirt.
        out_wm_mask_in_diffusion_space (str): File containing the segmentation
            of the white matter in diffusion space.
        out_mrtrix_matrix (str): File containing the transformation matrix in
            MRtrix format (can be used with the mrtransform command).
        out_desikan_in_diffusion_space (str): File containing the Desikan
            parcellation in diffusion space.
        out_destrieux_in_diffusion_space (str): File containing the Desikan
            parcellation in diffusion space.

    Example:
        >>> from clinica.pipelines.dwi.dwi_registration import t1_b0_registration_pipeline
        >>> t1_b0_registration = t1_b0_registration_pipeline(participant_id='sub-CLNC01', session_id='ses-M00', caps_directory='/path/to/output/results')
        >>> t1_b0_registration.inputs.inputnode.in_bias_corrected_bet_t1 = 'sub-CLNC01_ses-M00_bias_corrected_brain_extracted_t1.nii'
        >>> t1_b0_registration.inputs.inputnode.in_preprocessed_dwi = 'sub-CLNC01_ses-M00_preprocessed_dwi.nii'
        >>> t1_b0_registration.inputs.inputnode.in_b0_mask = 'sub-CLNC01_ses-M00_b0_mask.nii'
        >>> t1_b0_registration.inputs.inputnode.in_white_matter_binary_mask = 'sub-CLNC01_ses-M00_id_wm_mask.nii'
        >>> t1_b0_registration.inputs.inputnode.in_desikan_parcellation = 'sub-CLNC01_ses-M00/mri/aparc+aseg.mgz'
        >>> t1_b0_registration.inputs.inputnode.in_destrieux_parcellation = 'sub-CLNC01_ses-M00/mri/aparc.a2009s+aseg.mgz'
        >>> t1_b0_registration.run()
    """
    from os.path import join
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.io as nio
    import nipype.interfaces.utility as niu
    import nipype.pipeline.engine as pe
    from nipype.interfaces.freesurfer.preprocess import MRIConvert
    from clinica.utils.freesurfer import freesurfer_volume_to_native_volume
    from clinica.utils.mri_registration import convert_flirt_transformation_to_mrtrix_transformation
    from clinica.utils.mri_registration import apply_mrtrix_transform_without_resampling
    import tempfile
    from clinica.utils.check_dependency import check_freesurfer, check_fsl, check_mrtrix

    if working_directory is None:
        working_directory = tempfile.mkdtemp()

    check_freesurfer()
    check_fsl()
    check_mrtrix()

    caps_identifier = participant_id + '_' + session_id

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['in_bias_corrected_bet_t1', 'in_preprocessed_dwi',
                'in_b0_mask', 'in_white_matter_binary_mask',
                'in_desikan_parcellation', 'in_destrieux_parcellation']),
        name='inputnode')

    get_b0 = pe.Node(fsl.ExtractROI(args='0 1'), name='get_b0')

    upsample_b0 = pe.Node(
        MRIConvert(vox_size=(1, 1, 1), out_type='niigz'),
        name='upsample_b0')

    upsample_b0_mask = pe.Node(
        MRIConvert(vox_size=(1, 1, 1), out_type='niigz'),
        name='upsample_b0_mask')

    registration_t1_to_b0 = pe.Node(fsl.FLIRT(
        dof=6, interp='spline', cost='normmi', cost_func='normmi',
        out_matrix_file=caps_identifier + '_t1-to-b0_withResampling.mat'),
        name='registration_t1_to_b0')

    apply_flirt_registration = pe.Node(
        fsl.ApplyXfm(apply_xfm=True, interp='spline'),
        name='apply_flirt_registration')
    apply_flirt_registration.inputs.out_file = \
        caps_identifier + '_binarymask-whitematter_reslicedOnDiffusionSpace.nii.gz'

    convert_flirt_to_mrtrix = pe.Node(interface=niu.Function(
        input_names=['in_source_image', 'in_reference_image',
                     'in_flirt_matrix', 'name_output_matrix'],
        output_names=['out_mrtrix_matrix'],
        function=convert_flirt_transformation_to_mrtrix_transformation),
        name='convert_flirt_to_mrtrix')
    convert_flirt_to_mrtrix.inputs.name_output_matrix = \
        caps_identifier + '_t1-to-b0_withoutResampling.mat'

    desikan_in_native_space = pe.Node(interface=niu.Function(
        input_names=['freesurfer_volume', 'native_volume', 'name_output_volume'],
        output_names=['out_volume'],
        function=freesurfer_volume_to_native_volume),
        name='desikan_in_native_space')
    destrieux_in_native_space = pe.Node(interface=niu.Function(
        input_names=['freesurfer_volume', 'native_volume', 'name_output_volume'],
        output_names=['out_volume'],
        function=freesurfer_volume_to_native_volume),
        name='destrieux_in_native_space')

    desikan_in_diffusion_space = pe.Node(interface=niu.Function(
        input_names=['in_image', 'in_mrtrix_matrix', 'name_output_image'],
        output_names=['out_deformed_image'],
        function=apply_mrtrix_transform_without_resampling),
        name='desikan_in_diffusion_space')
    desikan_in_diffusion_space.inputs.name_output_image = \
        caps_identifier + '_parcellation-desikan_onDiffusionSpace.nii.gz'
    destrieux_in_diffusion_space = pe.Node(interface=niu.Function(
        input_names=['in_image', 'in_mrtrix_matrix', 'name_output_image'],
        output_names=['out_deformed_image'],
        function=apply_mrtrix_transform_without_resampling),
        name='destrieux_in_diffusion_space')
    destrieux_in_diffusion_space.inputs.name_output_image = \
        caps_identifier + '_parcellation-destrieux_onDiffusionSpace.nii.gz'

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['out_registered_t1', 'out_flirt_matrix',
                'out_wm_mask_in_diffusion_space', 'out_mrtrix_matrix',
                'out_desikan_in_diffusion_space',
                'out_destrieux_in_diffusion_space']),
        name='outputnode')

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = join(caps_directory, 'subjects',
                                          participant_id, session_id)
#    datasink.inputs.substitutions = [('fast_pve_0.nii.gz', caps_identifier + '_binary-csf.nii.gz')]

    wf = pe.Workflow(name=name)
    wf.base_dir = working_directory
    wf.connect([
        # Get b0 from DWI:
        (inputnode, get_b0, [('in_preprocessed_dwi', 'in_file')]),
        # Upsample at 1mm the b0 image:
        (get_b0, upsample_b0, [('roi_file', 'in_file')]),
        # Upsample at 1mm the b0 mask:
        (inputnode, upsample_b0_mask, [('in_b0_mask', 'in_file')]),
        # Register the T1 image onto the b0:
        (inputnode,        registration_t1_to_b0, [('in_bias_corrected_bet_t1', 'in_file')]),
        (upsample_b0,      registration_t1_to_b0, [('out_file', 'reference')]),
        (upsample_b0_mask, registration_t1_to_b0, [('out_file', 'ref_weight')]),
        # Apply flirt registration to WM mask:
        (inputnode,             apply_flirt_registration, [('in_white_matter_binary_mask', 'in_file')]),
        (upsample_b0,           apply_flirt_registration, [('out_file', 'reference')]),
        (registration_t1_to_b0, apply_flirt_registration, [('out_matrix_file', 'in_matrix_file')]),
        # Convert flirt matrix to MRtrix matrix:
        (inputnode,             convert_flirt_to_mrtrix, [('in_bias_corrected_bet_t1', 'in_source_image')]),
        (upsample_b0,           convert_flirt_to_mrtrix, [('out_file', 'in_reference_image')]),
        (registration_t1_to_b0, convert_flirt_to_mrtrix, [('out_matrix_file', 'in_flirt_matrix')]),
        # Convert FreeSurfer parcellations into native space:
        (inputnode, desikan_in_native_space, [('in_desikan_parcellation', 'freesurfer_volume'),
                                              ('in_bias_corrected_bet_t1', 'native_volume')]),
        (inputnode, destrieux_in_native_space, [('in_destrieux_parcellation', 'freesurfer_volume'),
                                                ('in_bias_corrected_bet_t1', 'native_volume')]),
        # Apply registration without resampling on Desikan & Destrieux parcellations:
        (desikan_in_native_space, desikan_in_diffusion_space, [('out_volume', 'in_image')]),  # noqa
        (convert_flirt_to_mrtrix, desikan_in_diffusion_space, [('out_mrtrix_matrix', 'in_mrtrix_matrix')]),  # noqa
        (destrieux_in_native_space, destrieux_in_diffusion_space, [('out_volume', 'in_image')]),  # noqa
        (convert_flirt_to_mrtrix,   destrieux_in_diffusion_space, [('out_mrtrix_matrix', 'in_mrtrix_matrix')]),  # noqa
        # Outputnode:
        (registration_t1_to_b0,        outputnode, [('out_file',                          'out_registered_t1')]),  # noqa
        (registration_t1_to_b0,        outputnode, [('out_matrix_file',                    'out_flirt_matrix')]),  # noqa
        (apply_flirt_registration,     outputnode, [('out_file',             'out_wm_mask_in_diffusion_space')]),  # noqa
        (convert_flirt_to_mrtrix,      outputnode, [('out_mrtrix_matrix',                 'out_mrtrix_matrix')]),  # noqa
        (desikan_in_diffusion_space,   outputnode, [('out_deformed_image',   'out_desikan_in_diffusion_space')]),  # noqa
        (destrieux_in_diffusion_space, outputnode, [('out_deformed_image', 'out_destrieux_in_diffusion_space')]),  # noqa
        # Datasink:
        (registration_t1_to_b0,        datasink, [('out_file',                          'dwi.@out_registered_t1')]),  # noqa
        (registration_t1_to_b0,        datasink, [('out_matrix_file',                    'dwi.@out_flirt_matrix')]),  # noqa
        (apply_flirt_registration,     datasink, [('out_file',              'dwi.@out_wm_mask_in_diffusion_mask')]),  # noqa
        (convert_flirt_to_mrtrix,      datasink, [('out_mrtrix_matrix',                 'dwi.@out_mrtrix_matrix')]),  # noqa
        (desikan_in_diffusion_space,   datasink, [('out_deformed_image',   'dwi.@out_desikan_in_diffusion_space')]),  # noqa
        (destrieux_in_diffusion_space, datasink, [('out_deformed_image', 'dwi.@out_destrieux_in_diffusion_space')])   # noqa
    ])
    return wf
Example #15
0
def map2surface(input_surf,
                input_vol,
                hemi,
                path_output,
                input_white=None,
                input_ind=None,
                cleanup=True):
    """
    This function samples data from the input volume to the input surface and optionally maps those
    values to a target surface if an index file is given.
    Inputs:
        *input_surf: surface mesh onto which volume data is sampled.
        *input_vol: volume from which data is sampled.
        *hemi: hemisphere.
        *path_output: path where to save output.
        *input_white: white surface in target surface space (only necessary if index file is given).
        *input_ind: textfile with mapping of vertex indices to target space.
        *cleanup: remove intermediate files.
            
    created by Daniel Haenelt
    Date created: 06-02-2019      
    Last modified: 03-08-2020
    """
    import os
    import numpy as np
    import nibabel as nb
    import shutil as sh
    from nibabel.freesurfer.io import read_geometry
    from nipype.interfaces.freesurfer import SampleToSurface
    from nipype.interfaces.freesurfer.preprocess import MRIConvert

    # set freesurfer path environment
    os.environ["SUBJECTS_DIR"] = path_output

    # freesurfer subject
    tmp = np.random.randint(0, 10, 5)
    tmp_string = ''.join(str(i) for i in tmp)
    sub = "tmp_" + tmp_string

    # make output folder
    if not os.path.exists(path_output):
        os.makedirs(path_output)

    # mimic freesurfer folder structure (with some additional folder for intermediate files)
    path_sub = os.path.join(path_output, sub)
    path_mri = os.path.join(path_sub, "mri")
    path_surf = os.path.join(path_sub, "surf")

    os.makedirs(path_sub)
    os.makedirs(path_mri)
    os.makedirs(path_surf)

    # copy input volume as orig.mgz to mimicked freesurfer folder
    if os.path.splitext(os.path.basename(input_vol))[1] is not ".mgz":
        mc = MRIConvert()
        mc.inputs.in_file = input_vol
        mc.inputs.out_file = os.path.join(path_mri, "orig.mgz")
        mc.inputs.out_type = 'mgz'
        mc.run()
    else:
        sh.copyfile(input_vol, os.path.join(path_mri, "orig.mgz"))

    # copy input surface to mimicked freesurfer folder
    sh.copyfile(input_surf, os.path.join(path_surf, hemi + ".source"))

    # input volume file name
    if os.path.splitext(os.path.basename(input_vol))[1] == ".gz":
        name_vol = os.path.splitext(
            os.path.splitext(os.path.basename(input_vol))[0])[0]
    else:
        name_vol = os.path.splitext(os.path.basename(input_vol))[0]
    name_surf = os.path.basename(input_surf).split('.')[1]

    # mri_vol2surf
    sampler = SampleToSurface()
    sampler.inputs.subject_id = sub
    sampler.inputs.reg_header = True
    sampler.inputs.hemi = hemi
    sampler.inputs.source_file = input_vol
    sampler.inputs.surface = "source"
    sampler.inputs.sampling_method = "point"
    sampler.inputs.sampling_range = 0
    sampler.inputs.sampling_units = "mm"
    sampler.inputs.interp_method = "nearest"  # or trilinear
    sampler.inputs.out_type = "mgh"
    sampler.inputs.out_file = os.path.join(path_surf,
                                           hemi + "." + "sampled.mgh")
    sampler.run()

    if input_ind:
        # read ind
        ind_orig = np.loadtxt(input_ind, dtype=int)

        # read white
        vtx_orig, _ = read_geometry(input_white)

        # read sampled morph data
        vals_img = nb.load(os.path.join(path_surf, hemi + "." + "sampled.mgh"))
        vals_array = vals_img.get_fdata()

        # get anzahl der vertices als Nullen in white
        vals_orig = np.zeros([len(vtx_orig[:, 0]), 1, 1])

        # setze sampled data da rein
        vals_orig[ind_orig] = vals_array

        # write sampled data in anatomical space
        vals_img.header["dims"][0] = len(vals_orig[:, 0])
        vals_img.header["Mdc"] = np.eye(3)
        res_img = nb.Nifti1Image(vals_orig, vals_img.affine, vals_img.header)
        nb.save(
            res_img,
            os.path.join(
                path_output,
                hemi + "." + name_vol + "_" + name_surf + "_def_trans.mgh"))
    else:
        # write sampled data in epi space
        sh.copyfile(
            os.path.join(path_surf, hemi + "." + "sampled.mgh"),
            os.path.join(path_output,
                         hemi + "." + name_vol + "_" + name_surf + "_def.mgh"))

    # delete intermediate files
    if cleanup:
        sh.rmtree(path_sub, ignore_errors=True)
Example #16
0
if not os.path.exists(path_syn):
    os.makedirs(path_syn)

# copy input files
sh.copyfile(file_orig, os.path.join(path_orig, "orig.mgz"))
sh.copyfile(file_mean_epi, os.path.join(path_epi, "epi.nii"))
sh.copyfile(file_t1, os.path.join(path_t1, "T1.nii"))
"""
mask preparation
"""

# convert to nifti
if os.path.splitext(file_mask)[1] == ".mgh":
    sh.copyfile(file_mask, os.path.join(path_t1, "mask.mgh"))
    mc = MRIConvert()
    mc.inputs.in_file = os.path.join(path_t1, "mask.mgh")
    mc.inputs.out_file = os.path.join(path_t1, "mask.nii")
    mc.inputs.in_type = "mgh"
    mc.inputs.out_type = "nii"
    mc.run()
elif os.path.splitext(file_mask)[1] == ".mgz":
    sh.copyfile(file_mask, os.path.join(path_t1, "mask.mgz"))
    mc = MRIConvert()
    mc.inputs.in_file = os.path.join(path_t1, "mask.mgz")
    mc.inputs.out_file = os.path.join(path_t1, "mask.nii")
    mc.inputs.in_type = "mgz"
    mc.inputs.out_type = "nii"
    mc.run()
elif os.path.splitext(file_mask)[1] == ".gz":
    sh.copyfile(file_mask, os.path.join(path_t1, "mask.nii.gz"))