Exemplo n.º 1
0
 def __init__(self, in_file='path', mask_file='path', **options):
     from nipype.interfaces.fsl import ApplyMask
     mf = ApplyMask()
     mf.inputs.in_file = in_file
     mf.inputs.mask_file = mask_file
     for ef in options:
         setattr(mf.inputs, ef, options[ef])
     self.res = mf.run()
Exemplo n.º 2
0
 def t1_brain_extraction_pipeline(self, **kwargs):
     """
     Masks the T1 image using the coregistered T2 brain mask as the brain
     mask from T2 is usually more reliable (using BET in any case)
     """
     pipeline = self.create_pipeline(
         name='t1_brain_extraction_pipeline',
         inputs=[
             DatasetSpec('t1', nifti_gz_format),
             DatasetSpec('brain_mask', nifti_gz_format)
         ],
         outputs=[DatasetSpec('t1_brain', nifti_gz_format)],
         version=1,
         desc="Mask T1 with T2 brain mask",
         citations=[fsl_cite],
         **kwargs)
     # Create apply mask node
     apply_mask = pipeline.create_node(ApplyMask(),
                                       name='appy_mask',
                                       requirements=[fsl5_req])
     apply_mask.inputs.output_type = 'NIFTI_GZ'
     # Connect inputs
     pipeline.connect_input('t1', apply_mask, 'in_file')
     pipeline.connect_input('brain_mask', apply_mask, 'mask_file')
     # Connect outputs
     pipeline.connect_output('t1_brain', apply_mask, 'out_file')
     # Check and return
     return pipeline
Exemplo n.º 3
0
def smoothing_skullstrip(
    fmriprep_dir,
    output_dir,
    work_dir,
    subject_list,
    task,
    run,
    fwhm=6.0,
    name="smoothing_skullstrip",
):
    """
    FSL smooth fMRIprep output
    """
    workflow = pe.Workflow(name=name)
    workflow.base_dir = work_dir

    template = {
        "bolds": "sub-{subject}/func/sub-{subject}_task-{task}_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz",
        "mask": "sub-{subject}/func/sub-{subject}_task-{task}_run-{run}_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz",
    }

    bg = pe.Node(SelectFiles(template, base_directory=fmriprep_dir), name="datagrabber")
    bg.iterables = [("subject", subject_list), ("task", task), ("run", run)]

    # Create DataSink object
    sinker = pe.Node(DataSink(), name="sinker")
    sinker.inputs.base_directory = output_dir
    sinker.inputs.substitutions = [
        ("_run_1_subject_", "sub-"),
        ("_skip0", "func"),
        ("desc-preproc_bold_smooth_masked_roi", f"desc-preproc-fwhm{int(fwhm)}mm_bold"),
    ]

    # Smoothing
    susan = create_susan_smooth()
    susan.inputs.inputnode.fwhm = fwhm

    # masking the smoothed output
    # note: susan workflow returns a list but apply mask only accept string of path
    mask_results = pe.MapNode(
        ApplyMask(), name="mask_results", iterfield=["in_file", "mask_file"]
    )

    # remove first five volumes
    skip = pe.MapNode(fsl.ExtractROI(), name="skip", iterfield=["in_file"])
    skip.inputs.t_min = 5
    skip.inputs.t_size = -1

    workflow.connect(
        [
            (
                bg,
                susan,
                [("bolds", "inputnode.in_files"), ("mask", "inputnode.mask_file")],
            ),
            (bg, mask_results, [("mask", "mask_file")]),
            (susan, mask_results, [("outputnode.smoothed_files", "in_file")]),
            (mask_results, skip, [("out_file", "in_file")]),
            (skip, sinker, [("roi_file", f"func_smooth-{int(fwhm)}mm.@out_file")]),
        ]
    )
    return workflow
Exemplo n.º 4
0
def canonical(
    subjects_participants,
    regdir,
    f2s,
    template="~/GitHub/mriPipeline/templates/waxholm/WHS_SD_rat_T2star_v1.01_downsample3.nii.gz",
    f_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
    s_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
):
    """Warp a functional image based on the functional-to-structural and the structural-to-template registrations.
	Currently this approach is failing because the functiona-to-structural registration pushes the brain stem too far down.
	This may be

	"""
    template = os.path.expanduser(template)
    for subject_participant in subjects_participants:
        func_image_dir = os.path.expanduser(
            f_file_format.format(**subject_participant))
        struct_image_dir = os.path.expanduser(
            s_file_format.format(**subject_participant))
        try:
            for myfile in os.listdir(func_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    func_image = os.path.join(func_image_dir, myfile)
            for myfile in os.listdir(struct_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    struct_image = os.path.join(struct_image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            #struct
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = '{}/ss_n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = '{}/ss__n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET_res = struct_BET.run()

            struct_mask = ApplyMask()
            struct_mask.inputs.in_file = n4_res.outputs.output_image
            struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            struct_mask_res = struct_mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Affine', 'SyN']  ##
            struct_registration.inputs.transform_parameters = [(1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                2000, 1000, 500
            ], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
            struct_registration.inputs.metric_weight = [1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 32]  #
            struct_registration.inputs.sampling_strategy = ['Random', None]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [1.e-11,
                                                                1.e-8]  #
            struct_registration.inputs.convergence_window_size = [20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [False, False]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = 6

            struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = '{}/s_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            struct_registration_res = struct_registration.run()

            #func
            func_n4 = ants.N4BiasFieldCorrection()
            func_n4.inputs.dimension = 3
            func_n4.inputs.input_image = func_image
            func_n4.inputs.bspline_fitting_distance = 100
            func_n4.inputs.shrink_factor = 2
            func_n4.inputs.n_iterations = [200, 200, 200, 200]
            func_n4.inputs.convergence_threshold = 1e-11
            func_n4.inputs.output_image = '{}/f_n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            func_n4_res = func_n4.run()

            func_registration = ants.Registration()
            func_registration.inputs.fixed_image = n4_res.outputs.output_image
            func_registration.inputs.output_transform_prefix = "func_"
            func_registration.inputs.transforms = [f2s]
            func_registration.inputs.transform_parameters = [(0.1, )]
            func_registration.inputs.number_of_iterations = [[40, 20, 10]]
            func_registration.inputs.dimension = 3
            func_registration.inputs.write_composite_transform = True
            func_registration.inputs.collapse_output_transforms = True
            func_registration.inputs.initial_moving_transform_com = True
            func_registration.inputs.metric = ['MeanSquares']
            func_registration.inputs.metric_weight = [1]
            func_registration.inputs.radius_or_number_of_bins = [16]
            func_registration.inputs.sampling_strategy = ["Regular"]
            func_registration.inputs.sampling_percentage = [0.3]
            func_registration.inputs.convergence_threshold = [1.e-2]
            func_registration.inputs.convergence_window_size = [8]
            func_registration.inputs.smoothing_sigmas = [[4, 2,
                                                          1]]  # [1,0.5,0]
            func_registration.inputs.sigma_units = ['vox']
            func_registration.inputs.shrink_factors = [[3, 2, 1]]
            func_registration.inputs.use_estimate_learning_rate_once = [True]
            func_registration.inputs.use_histogram_matching = [False]
            func_registration.inputs.winsorize_lower_quantile = 0.005
            func_registration.inputs.winsorize_upper_quantile = 0.995
            func_registration.inputs.args = '--float'
            func_registration.inputs.num_threads = 6

            func_registration.inputs.moving_image = func_n4_res.outputs.output_image
            func_registration.inputs.output_warped_image = '{}/f_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            func_registration_res = func_registration.run()

            warp = ants.ApplyTransforms()
            warp.inputs.reference_image = template
            warp.inputs.input_image_type = 3
            warp.inputs.interpolation = 'Linear'
            warp.inputs.invert_transform_flags = [False, False]
            warp.inputs.terminal_output = 'file'
            warp.inputs.output_image = '{}/{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            warp.num_threads = 6

            warp.inputs.input_image = func_image
            warp.inputs.transforms = [
                func_registration_res.outputs.composite_transform,
                struct_registration_res.outputs.composite_transform
            ]
            warp.run()
Exemplo n.º 5
0
def structural_to_functional_per_participant_test(
    subjects_sessions,
    template="~/GitHub/mriPipeline/templates/waxholm/new/WHS_SD_masked.nii.gz",
    f_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
    s_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
    num_threads=3,
):

    template = os.path.expanduser(template)
    for subject_session in subjects_sessions:
        func_image_dir = os.path.expanduser(
            f_file_format.format(**subject_session))
        struct_image_dir = os.path.expanduser(
            s_file_format.format(**subject_session))
        try:
            for myfile in os.listdir(func_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    func_image = os.path.join(func_image_dir, myfile)
            for myfile in os.listdir(struct_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    struct_image = os.path.join(struct_image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = '{}_{}_1_biasCorrection_forRegistration.nii.gz'.format(
                *subject_session.values())
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = '{}_{}_1_biasCorrection_forMasking.nii.gz'.format(
                *subject_session.values())
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET.inputs.out_file = '{}_{}_2_brainExtraction.nii.gz'.format(
                *subject_session.values())
            struct_BET_res = struct_BET.run()

            # we need/can not apply a fill, because the "holes" if any, will be at the rostral edge (touching it, and thus not counting as holes)
            struct_mask = ApplyMask()
            struct_mask.inputs.in_file = n4_res.outputs.output_image
            struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            struct_mask.inputs.out_file = '{}_{}_3_brainMasked.nii.gz'.format(
                *subject_session.values())
            struct_mask_res = struct_mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Affine', 'SyN']  ##
            struct_registration.inputs.transform_parameters = [(1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                2000, 1000, 500
            ], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
            struct_registration.inputs.metric_weight = [1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 32]  #
            struct_registration.inputs.sampling_strategy = ['Random', None]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [1.e-11,
                                                                1.e-8]  #
            struct_registration.inputs.convergence_window_size = [20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [False, False]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = num_threads

            struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = '{}_{}_4_structuralRegistration.nii.gz'.format(
                *subject_session.values())
            struct_registration_res = struct_registration.run()

            warp = ants.ApplyTransforms()
            warp.inputs.reference_image = template
            warp.inputs.input_image_type = 3
            warp.inputs.interpolation = 'Linear'
            warp.inputs.invert_transform_flags = [False]
            warp.inputs.terminal_output = 'file'
            warp.inputs.output_image = '{}_{}_5_functionalWarp.nii.gz'.format(
                *subject_session.values())
            warp.num_threads = num_threads

            warp.inputs.input_image = func_image
            warp.inputs.transforms = struct_registration_res.outputs.composite_transform
            warp.run()
Exemplo n.º 6
0
def func_img_proc(T1C_original, T2_original, FLAIR_original, 
                  T1C_bet, T2_bet, FLAIR_bet, mask_T1C_bet,
                  T1C_isovoxel, T2_isovoxel, FLAIR_isovoxel, mask_T1C_bet_iso,
                  T1C_corrected, T2_corrected, FLAIR_corrected, T1C_bet_temp):
    
    ##Skull Stripping

    t1c_isovoxel = func_resample_isovoxel(T1C_original)
    sitk.WriteImage(t1c_isovoxel, T1C_isovoxel)
    print("resampling T1C_original - completed")
    
    func_register(T2_original, T1C_isovoxel, T2_isovoxel)
    print("register T2_original to T1C_isovoxel - completed")
    
    func_register(FLAIR_original, T1C_isovoxel, FLAIR_isovoxel)
    print("register FLAIR_original to T1C_isovoxel - completed")
    
    bet_t1gd_iso = BET(in_file = T1C_isovoxel,
                       frac = 0.4,
                       mask = True,  # brain tissue mask is stored with '_mask' suffix after T1C_bet.
                       reduce_bias = True,
                       out_file = T1C_bet_temp)
    bet_t1gd_iso.run()
    print("Acquired BET mask...")
    os.remove(T1C_bet_temp)
    
    brain_mask_file = T1C_bet_temp[:len(T1C_bet_temp)-len('.nii.gz')] + '_mask.nii.gz'
    
    ApplyBet_T1C = ApplyMask(in_file = T1C_isovoxel,
                                  mask_file= brain_mask_file,
                                  out_file= T1C_bet)
    ApplyBet_T1C.run()
    
    ApplyBet_T2 = ApplyMask(in_file = T2_isovoxel,
                                  mask_file= brain_mask_file,
                                  out_file=T2_bet)
    ApplyBet_T2.run()
    
    ApplyBet_FLAIR = ApplyMask(in_file = FLAIR_isovoxel,
                                  mask_file= brain_mask_file,
                                  out_file=FLAIR_bet)
    ApplyBet_FLAIR.run()
    
    print("Skull stripping of T1C, T2, FLAIR... - done")

    ### Resampling, REgisgtering BET files

    t1c_isovoxel = func_resample_isovoxel(T1C_bet, isseg=False)
    sitk.WriteImage(t1c_isovoxel, T1C_isovoxel)
    
    bmask_isovoxel = func_resample_isovoxel(mask_T1C_bet, isseg=True)
    sitk.WriteImage(bmask_isovoxel, mask_T1C_bet_iso)
    
    print("resampling T1C & brain mask - completed")
    
    func_register(T2_bet, T1C_isovoxel, T2_isovoxel)
    print("register T2 to T1C_isovoxel - completed")
    
    func_register(FLAIR_bet, T1C_isovoxel, FLAIR_isovoxel)
    print("register FLAIR to T1C_isovoxel - completed")
    
    ### Corrections

    func_n4bias(T1C_isovoxel, T1C_corrected)
    print("T1C bias correction done...")
    func_n4bias(T2_isovoxel, T2_corrected)
    print("T2 bias correction done...")
    func_n4bias(FLAIR_isovoxel, FLAIR_corrected)
    print("FLAIR bias correction done...")
def get_wf_tissue_masks(name='wf_tissue_masks'):
    '''
    This Function gives a workflow that resamples the T1 brains, extracts the
    tissue types thresholds at 0.5 and registers them to T2* space
    It then registers the tissue priors to the T2* space and then performs a
    bitwise AND between two maps.
    '''
    # csf_tissue_prior_path, gm_tissue_prior_path, wm_tissue_prior_path,
    # threshold = 0.5

    wf_tissue_masks = Workflow(name=name)

    inputspec = Node(IdentityInterface(fields=[
        'resampled_anat_file_path', 'func2anat_mat_path', 'std2func_mat_path',
        'reference_func_file_path', 'brain_mask_eroded', 'threshold'
    ]),
                     name="inputspec")

    # FSL FAST node to segment the T1 brain
    fast = Node(FAST(out_basename='fast_'), name='fast')
    # probability_maps=True,segments=True,
    wf_tissue_masks.connect(inputspec, 'resampled_anat_file_path', fast,
                            'in_files')

    #  Invert the func2anat matrix to get anat2func
    inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')
    wf_tissue_masks.connect(inputspec, 'func2anat_mat_path', inv_mat,
                            'in_file')

    # Transform the above segmented tissue masks to the functional space using the inverse matrix
    anat2func_xform_csf = Node(FLIRT(output_type='NIFTI',
                                     apply_xfm=True,
                                     interp='sinc'),
                               name='anat2func_xform_csf')

    wf_tissue_masks.connect(inputspec, 'reference_func_file_path',
                            anat2func_xform_csf, 'reference')
    wf_tissue_masks.connect(inv_mat, 'out_file', anat2func_xform_csf,
                            'in_matrix_file')

    anat2func_xform_wm = Node(FLIRT(output_type='NIFTI',
                                    apply_xfm=True,
                                    interp='sinc'),
                              name='anat2func_xform_wm')
    wf_tissue_masks.connect(inputspec, 'reference_func_file_path',
                            anat2func_xform_wm, 'reference')
    wf_tissue_masks.connect(inv_mat, 'out_file', anat2func_xform_wm,
                            'in_matrix_file')

    std2func_xform_eroded_brain = Node(FLIRT(output_type='NIFTI',
                                             apply_xfm=True,
                                             interp='nearestneighbour'),
                                       name='std2func_xform_eroded_brain')
    wf_tissue_masks.connect(inputspec, 'reference_func_file_path',
                            std2func_xform_eroded_brain, 'reference')
    wf_tissue_masks.connect(inputspec, 'std2func_mat_path',
                            std2func_xform_eroded_brain, 'in_matrix_file')

    def select_item_from_array(arr, index=0):
        import numpy as np
        arr = np.array(arr)
        return arr[index]

    wf_tissue_masks.connect(
        fast, ('partial_volume_files', select_item_from_array, 0),
        anat2func_xform_csf, 'in_file')
    wf_tissue_masks.connect(
        fast, ('partial_volume_files', select_item_from_array, 2),
        anat2func_xform_wm, 'in_file')
    wf_tissue_masks.connect(inputspec, 'brain_mask_eroded',
                            std2func_xform_eroded_brain, 'in_file')

    # Threshold

    def get_opstring(threshold):
        op = '-thr ' + str(threshold) + ' -bin'
        return op

    # print(inputspec.outputs)
    # ----- CSF ------

    threshold_csf = Node(interface=ImageMaths(suffix='_thresh'),
                         name='threshold_csf')
    # threshold_csf.inputs.op_string = '-thresh '+str(inputspec.outputs.threshold)+' -bin'
    wf_tissue_masks.connect(inputspec, ('threshold', get_opstring),
                            threshold_csf, 'op_string')
    wf_tissue_masks.connect(anat2func_xform_csf, 'out_file', threshold_csf,
                            'in_file')

    # ------- GM --------

    # threshold_gm = Node(interface=ImageMaths(op_string='-thresh',
    #                                             suffix='_thresh'),
    #                    name='threshold_gm')
    #
    #
    # wf_tissue_priors.connect(inputspec, ('threshold', get_opstring), threshold_gm, 'op_string' )
    # wf_tissue_priors.connect(fast, partial_volume_map[1], threshold_gm, 'in_file')
    #
    # -------- WM --------

    threshold_wm = Node(interface=ImageMaths(suffix='_thresh'),
                        name='threshold_wm')
    wf_tissue_masks.connect(inputspec, ('threshold', get_opstring),
                            threshold_wm, 'op_string')
    wf_tissue_masks.connect(anat2func_xform_wm, 'out_file', threshold_wm,
                            'in_file')

    #  -------------------

    #
    # wf_tissue_masks.connect(threshold_csf, 'out_file', std2func_xform_csf, 'in_file')
    # wf_tissue_masks.connect(threshold_wm, 'out_file', std2func_xform_wm, 'in_file')

    # Masking the outer brain CSF

    csf_mask = Node(interface=ApplyMask(), name='csf_mask')
    wf_tissue_masks.connect(threshold_csf, 'out_file', csf_mask, 'in_file')
    wf_tissue_masks.connect(std2func_xform_eroded_brain, 'out_file', csf_mask,
                            'mask_file')

    # Masking the outer brain WM that might be present due to poor BET

    wm_mask = Node(interface=ApplyMask(), name='wm_mask')
    wf_tissue_masks.connect(threshold_wm, 'out_file', wm_mask, 'in_file')
    wf_tissue_masks.connect(std2func_xform_eroded_brain, 'out_file', wm_mask,
                            'mask_file')

    # wm_mask = Node(interface=ApplyMask(),
    #                    name='wm_mask')
    # wf_tissue_masks.connect(std2func_xform_wm, 'out_file', wm_mask, 'in_file')
    # wf_tissue_masks.connect(std2func_xform_wm_prior, 'out_file', wm_mask, 'mask_file')

    outputspec = Node(IdentityInterface(fields=['csf_mask', 'wm_mask']),
                      name="outputspec")

    wf_tissue_masks.connect(csf_mask, 'out_file', outputspec, 'csf_mask')
    # wf_tissue_priors.connect(threshold_gm, 'out_file', outputspec, 'gm_tissue_prior_path')
    wf_tissue_masks.connect(wm_mask, 'out_file', outputspec, 'wm_mask')

    return wf_tissue_masks
Exemplo n.º 8
0
def structural_per_participant_test(
    participant,
    conditions=["", "_aF", "_cF1", "_cF2", "_pF"],
    template="/home/chymera/ni_data/templates/ds_QBI_chr.nii.gz",
):

    for i in conditions:
        image_dir = "/home/chymera/ni_data/ofM.dr/preprocessing/generic_work/_subject_session_{}.ofM{}/_scan_type_T2_TurboRARE/s_bru2nii/".format(
            participant, i)
        print(image_dir)
        try:
            for myfile in os.listdir(image_dir):
                if myfile.endswith(".nii"):
                    mimage = os.path.join(image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = mimage
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = 'ss_n4_{}_ofM{}.nii.gz'.format(
                participant, i)
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = mimage
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = 'ss__n4_{}_ofM{}.nii.gz'.format(
                participant, i)
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET_res = struct_BET.run()

            mask = ApplyMask()
            mask.inputs.in_file = n4_res.outputs.output_image
            mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            mask_res = mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Rigid', 'Affine',
                                                     'SyN']  ##
            struct_registration.inputs.transform_parameters = [(.1, ), (1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                150, 100, 50
            ], [2000, 1000, 500], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = [
                'MeanSquares', 'MeanSquares', 'Mattes'
            ]
            struct_registration.inputs.metric_weight = [1, 1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 16,
                                                                   32]  #
            struct_registration.inputs.sampling_strategy = [
                'Random', 'Random', None
            ]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [
                1.e-10, 1.e-11, 1.e-8
            ]  #
            struct_registration.inputs.convergence_window_size = [20, 20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2,
                                                            1], [4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1],
                                                         [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [
                False, False, False
            ]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = 6

            struct_registration.inputs.moving_image = mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = 'ss_{}_ofM{}.nii.gz'.format(
                participant, i)
            res = struct_registration.run()
def anatPipeline(resultsDir, workDir, subDir, subid):

	pnf = join(subDir,subid+".ipynb")
	pnb = getIPythonNB(pnf, True)

	text= "# Anatomic Pipeline results for subject " + subid
	code= ""
	addCell(pnb, text,code)

	print("\n ** PIPELINE : starting anatomic pipeline.\n")
	ANAT_DIR = abspath(join(subDir, 'anat'))
	ANAT_T1W = abspath(join(ANAT_DIR,  subid + '_T1w.nii.gz'))
	ANAT_BET_T1W=abspath(join(resultsDir, subid + '_T1w_bet.nii.gz'))

	text="Anatomic T1W image"
	code=("%pylab inline\nimport nibabel as nb;img = nb.load('"
		+ANAT_T1W+"');data = img.get_data();"
	"plt.imshow(np.rot90(data[...,100]),cmap='gray');"
	"plt.gca().set_axis_off()")
	addCell(pnb, text,code)

	#A. PREPROCESSING PIPELINE

	#1. SKULL STRIPPING WITH BET
	betNodeInputs={}
	betNodeInputs['in_file']=ANAT_T1W
	#betNodeInputs['out_file']=ANAT_BET_T1W #very strange that this stops it working in workflow mode
	betNodeInputs['mask']=True
	betNodeInputs['frac']=0.5
	betNode = createNiPypeNode(BET(),'betNode',betNodeInputs)
	#n_file=ANAT_T1W
	#betNode = Node(BET(in_file=in_file,mask=True), name="betNode")
	#print(betNode.inputs)
	#print(betNode._interface.cmdline)
	#print(betNode.outputs)
	#betResults = betNode.run()


	#print('** debug ** command line output')
	#print(betNode._interface.cmdline)
	#print('** debug ** inputs')
	#print(betNode.inputs)
	#print('** debug ** outputs')
	#print(tempresult.outputs)

	#2. SMOOTH ORIGINAL IMAGE WITH ISOTROPIC SMOOTH
	smoothNodeInputs = {}
	smoothNodeInputs['in_file']=ANAT_T1W
	smoothNodeInputs['fwhm']=4
	smoothNode = createNiPypeNode(IsotropicSmooth(),'smoothNode',smoothNodeInputs)
	#smoothResults =smoothNode.run()

	#3. MASK SMOOTHED IMAGE WITH APPLYMASK
	maskNodeInputs = {}
	maskNode = createNiPypeNode(ApplyMask(),'maskNode',maskNodeInputs)

	# ILLUSTRATING USING WORKFLOW WITH APPLYMASK
	#4. create workflow
	wfName='smoothflow'
	wfGraph='smoothWorkflow_graph.dot'
	wfGraphDetailed='smoothWorkflow_graph_detailed.dot'
	wf = Workflow(name=wfName,base_dir=workDir)
	WF_DIR=abspath(join(workDir, wfName))
	wf.connect(betNode, "mask_file",maskNode, "mask_file")
	wf.connect([(smoothNode,maskNode,[("out_file", "in_file")])])
	wf.write_graph(wfGraph, graph2use='colored')
	wfImg = plt.imread(WF_DIR + '/' + wfGraph+'.png')
	plt.imshow(wfImg)
	#plt.show(block=False) #set to true if you want to see this graph
	wf.write_graph(wfGraph, graph2use='flat')
	wfImgDetailed = plt.imread(WF_DIR + '/' + wfGraphDetailed+'.png')
	#plt.imshow(wfImgDetailed)
	#plt.show(block=False)

	# run the workflow
	wf.run()
	print(wf.inputs)
	print(wf.outputs)
	#print(betNode.inputs)
	#print(betNode.outputs)
	#print(wf.get_node('betNode').inputs)
	#print(wf.get_node('betNode').outputs)

	pnames=[]
	wfInputs = getWorkFlowInputs(wf,True)
	if len(wfInputs)> 0:
		for node in wf.list_node_names():
			for key in wfInputs[node]:
				filename = wfInputs[node][key]
				filecomps = filename.split('.')
				if filecomps[-1]=='nii' or (filecomps[-1]=='gz' and filecomps[-2]=='nii'):
					pnames.append(filename)

	wfOutputs = getWorkFlowOutputs(wf,True)
	if len(wfOutputs)> 0:
		for node in wf.list_node_names():
			for key in wfOutputs[node]:
				filename = wfOutputs[node][key]
				filecomps = filename.split('.')
				if filecomps[-1]=='nii' or (filecomps[-1]=='gz' and filecomps[-2]=='nii'):
					pnames.append(filename)

	pnames = set(pnames)

	#
	#%pylab inline
	#from nilearn import plotting

	#niplot.plot_anat(betNode.inputs.in_file,title='T1W in-file', cut_coords=(10,10,10), display_mode='ortho', dim=-1, draw_cross=False, annotate=False)
	#niplot.show() #need a better way to display
	#plt.show(block=False)

	#niplot.plot_anat(smoothResults.outputs.out_file,title='T1W in-file', cut_coords=(10,10,10), display_mode='ortho', dim=-1, draw_cross=False, annotate=False)
	#niplot.show() #need a better way to display

	#niplot.plot_anat(betResults.outputs.out_file,title='T1W skull-stripped out-file', cut_coords=(10,10,10), display_mode='ortho', dim=-1, draw_cross=False, annotate=False)
	#niplot.show() #need a better way to display that doesn't hold up process
	#niplot.show() #for now just do this at the end.
	#plt.show(block=False)

	#plot images vertically
	#fnames=[]
	#fnames.append(betResults.outputs.out_file)
	#fnames.append(smoothResults.outputs.out_file)

	ranges = [30, 138, 180]
	plot_slices(pnames,ranges, 'v')
	plt.show()

	#plot images horizontally
	#fnames=[]
	#fnames.append(betResults.outputs.out_file)
	#fnames.append(smoothResults.outputs.out_file)

	ranges=range(130,136)
	plot_slices(pnames,ranges, 'h')
	plt.show()

	# ILLUSTRATING USING NESTED WORKFLOW WITH PROVIDED SUSAN FLOW for Non-linear smoothing
	# 5. Create Susan workflow and display
	# Note that to specify specific location for susan to work then you will need to embed it into another workflow!!!	
	wfName='susan_smooth'
	wfGraph='susan_smooth_graph.dot'
	WF_DIR=abspath(join(workDir, wfName))
	susanWf = create_susan_smooth(name='susan_smooth', separate_masks=False)
	#print(susanWf.inputs)
	#print(susanWf.outputs)
	#print(susanWf.inputs.inputnode) # this specifies the visible inputs/outputs to external
	#print(susanWf.outputs.outputnode)
	graphLoc=join(WF_DIR,wfGraph)
	susanWf.write_graph(graphLoc,graph2use='colored')
	susanWfImg = plt.imread(join(WF_DIR,wfGraph+'.png'))
	plt.imshow(susanWfImg)
	plt.gca().set_axis_off()
	plt.show()

	# 6. Create new Workflow and use Susan as the smoothing step
	# Initiate workflow with name and base directory
	wfName='smoothSusanFlow'
	wfGraph='smoothSusanFlow_graph.dot'
	WF_DIR=abspath(join(workDir, wfName))
	wf2 = Workflow(name=wfName, base_dir=workDir)

	# Create new skullstrip and mask nodes
	betNodeInputs={}
	betNodeInputs['in_file']=ANAT_T1W
	betNodeInputs['mask']=True
	betNodeInputs['frac']=0.5
	betNode2 = createNiPypeNode(BET(),'betNode2',betNodeInputs)

	maskNodeInputs = {}
	maskNode2 = createNiPypeNode(ApplyMask(),'maskNode2',maskNodeInputs)


	# Connect the nodes to each other and to the susan workflow
	wf2.connect([(betNode2, maskNode2, [("mask_file", "mask_file")]),
             (betNode2, susanWf, [("mask_file", "inputnode.mask_file")]),
             (susanWf, list_extract, [("outputnode.smoothed_files",
                                     "list_out")]),
             (list_extract, maskNode2, [("out_file", "in_file")])
             ])

	# Specify the remaining input variables for the susan workflow
	susanWf.inputs.inputnode.in_files = abspath(ANAT_T1W)
	susanWf.inputs.inputnode.fwhm = 4

	#detailed graph showing workflow details embedded in overall workflow
	graphLoc=join(WF_DIR,wfGraph)
	wf2.write_graph(graphLoc, graph2use='colored')
	graphImgLoc=join(WF_DIR,wfGraph+'.png')
	wf2Img = plt.imread(graphImgLoc)
	plt.imshow(wf2Img)
	plt.gca().set_axis_off()
	plt.show()


	text="Demonstrating Nilearn plotting"
	plotTitle = 'dynamic title'
	code=("%pylab inline\nfrom nilearn import plotting;plotting.plot_anat('"+ ANAT_T1W + 
		"',title='" + plotTitle +"', cut_coords=(" + "10,10,10"+"), display_mode='ortho', dim=-1, draw_cross=False, annotate=False)")
	addCell(pnb, text,code)

	#niplot.plot_anat(betNode.inputs.in_file,title='T1W in-file', cut_coords=(10,10,10), display_mode='ortho', dim=-1, draw_cross=False, annotate=False)
	#niplot.show() #need a better way to display
	#plt.show(block=False)

	text="Anatomic Workflow"
	code=("%pylab inline\nimg=matplotlib.image.imread('"+ graphImgLoc + 
		"');imgplot = plt.imshow(img)")
	addCell(pnb, text,code)
	closeIPythonNB(pnf, pnb, True)

	#graph showing summary of embedded workflow
	wf2.write_graph(join(WF_DIR,wfGraph), graph2use='orig')
	wf2Img = plt.imread(join(WF_DIR,wfGraph+'.png'))
	plt.imshow(wf2Img)
	plt.gca().set_axis_off()
	plt.show()

	#run the new workflow with embedded susan
	wf2.run()

	print(wf2.inputs)
	print(wf2.outputs)
	print(str(wf2.list_node_names()))
	print(str(susanWf.list_node_names()))

	#LOrdie there has to be an easier way than this to get the outputs and input files generically from a workflow?
	pnames=[]
	wfInputs = getWorkFlowInputs(wf2,True)
	if len(wfInputs)> 0:
		for node in wf2.list_node_names():
			if node in wfInputs:
				for key in wfInputs[node]:
					filename = wfInputs[node][key]
					filecomps = filename.split('.')
					if filecomps[-1]=='nii' or (filecomps[-1]=='gz' and filecomps[-2]=='nii'):
						pnames.append(filename)

	wfOutputs = getWorkFlowOutputs(wf2,True)
	if len(wfOutputs)> 0:
		for node in wf2.list_node_names():
			if node in wfOutputs:
				for key in wfOutputs[node]:
					filename = wfOutputs[node][key]
					filecomps = filename.split('.')
					if filecomps[-1]=='nii' or (filecomps[-1]=='gz' and filecomps[-2]=='nii'):
						pnames.append(filename)

	pnames = set(pnames)
	ranges = [30, 138, 180]
	plot_slices(pnames,ranges, 'v')
	plt.show()

	#plot images horizontally
	#fnames=[]
	#fnames.append(betResults.outputs.out_file)
	#fnames.append(smoothResults.outputs.out_file)

	ranges=range(130,136)
	plot_slices(pnames,ranges, 'h')
	plt.show()


	# 6 Demonstrate efficient recomputing of workflows - only dependent steps need to be recomputed
	#original workflow
	wf.inputs.smoothNode.fwhm = 1
	wf.run()

	pnames=[]
	wfOutputs = getWorkFlowOutputs(wf,True)
	if len(wfOutputs)> 0:
		for node in wf.list_node_names():
			for key in wfOutputs[node]:
				filename = wfOutputs[node][key]
				filecomps = filename.split('.')
				if filecomps[-1]=='nii' or (filecomps[-1]=='gz' and filecomps[-2]=='nii'):
					pnames.append(filename)
	pnames = set(pnames)
	ranges = [30, 138]
	plot_slices(pnames,ranges, 'h')
	plt.show()

	#susan workflow
	wf2.inputs.susan_smooth.inputnode.fwhm = 1
	wf2.run()

	pnames=[]
	wfOutputs = getWorkFlowOutputs(wf2,True)
	if len(wfOutputs)> 0:
		for node in wf2.list_node_names():
			if node in wfOutputs:
				for key in wfOutputs[node]:
					filename = wfOutputs[node][key]
					filecomps = filename.split('.')
					if filecomps[-1]=='nii' or (filecomps[-1]=='gz' and filecomps[-2]=='nii'):
						pnames.append(filename)

	pnames = set(pnames)
	ranges = [30, 138]
	plot_slices(pnames,ranges, 'h')
	plt.show()
Exemplo n.º 10
0
def canonical(subjects_participants, regdir, f2s,
	template = "~/GitHub/mriPipeline/templates/waxholm/WHS_SD_rat_T2star_v1.01_downsample3.nii.gz",
	f_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
	s_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
	):

	"""Warp a functional image based on the functional-to-structural and the structural-to-template registrations.
	Currently this approach is failing because the functiona-to-structural registration pushes the brain stem too far down.
	This may be

	"""
	template = os.path.expanduser(template)
	for subject_participant in subjects_participants:
		func_image_dir = os.path.expanduser(f_file_format.format(**subject_participant))
		struct_image_dir = os.path.expanduser(s_file_format.format(**subject_participant))
		try:
			for myfile in os.listdir(func_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					func_image = os.path.join(func_image_dir,myfile)
			for myfile in os.listdir(struct_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					struct_image = os.path.join(struct_image_dir,myfile)
		except FileNotFoundError:
			pass
		else:
			#struct
			n4 = ants.N4BiasFieldCorrection()
			n4.inputs.dimension = 3
			n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			n4.inputs.bspline_fitting_distance = 100
			n4.inputs.shrink_factor = 2
			n4.inputs.n_iterations = [200,200,200,200]
			n4.inputs.convergence_threshold = 1e-11
			n4.inputs.output_image = '{}/ss_n4_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			n4_res = n4.run()

			_n4 = ants.N4BiasFieldCorrection()
			_n4.inputs.dimension = 3
			_n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			_n4.inputs.bspline_fitting_distance = 95
			_n4.inputs.shrink_factor = 2
			_n4.inputs.n_iterations = [500,500,500,500]
			_n4.inputs.convergence_threshold = 1e-14
			_n4.inputs.output_image = '{}/ss__n4_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			_n4_res = _n4.run()

			#we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
			struct_cutoff = ImageMaths()
			struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
			struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
			struct_cutoff_res = struct_cutoff.run()

			struct_BET = BET()
			struct_BET.inputs.mask = True
			struct_BET.inputs.frac = 0.3
			struct_BET.inputs.robust = True
			struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
			struct_BET_res = struct_BET.run()

			struct_mask = ApplyMask()
			struct_mask.inputs.in_file = n4_res.outputs.output_image
			struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
			struct_mask_res = struct_mask.run()

			struct_registration = ants.Registration()
			struct_registration.inputs.fixed_image = template
			struct_registration.inputs.output_transform_prefix = "output_"
			struct_registration.inputs.transforms = ['Affine', 'SyN'] ##
			struct_registration.inputs.transform_parameters = [(1.0,), (1.0, 3.0, 5.0)] ##
			struct_registration.inputs.number_of_iterations = [[2000, 1000, 500], [100, 100, 100]] #
			struct_registration.inputs.dimension = 3
			struct_registration.inputs.write_composite_transform = True
			struct_registration.inputs.collapse_output_transforms = True
			struct_registration.inputs.initial_moving_transform_com = True
			# Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
			struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
			struct_registration.inputs.metric_weight = [1, 1]
			struct_registration.inputs.radius_or_number_of_bins = [16, 32] #
			struct_registration.inputs.sampling_strategy = ['Random', None]
			struct_registration.inputs.sampling_percentage = [0.3, 0.3]
			struct_registration.inputs.convergence_threshold = [1.e-11, 1.e-8] #
			struct_registration.inputs.convergence_window_size = [20, 20]
			struct_registration.inputs.smoothing_sigmas = [[4, 2, 1], [4, 2, 1]]
			struct_registration.inputs.sigma_units = ['vox', 'vox']
			struct_registration.inputs.shrink_factors = [[3, 2, 1],[3, 2, 1]]
			struct_registration.inputs.use_estimate_learning_rate_once = [True, True]
			# if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
			struct_registration.inputs.use_histogram_matching = [False, False]
			struct_registration.inputs.winsorize_lower_quantile = 0.005
			struct_registration.inputs.winsorize_upper_quantile = 0.98
			struct_registration.inputs.args = '--float'
			struct_registration.inputs.num_threads = 6

			struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
			struct_registration.inputs.output_warped_image = '{}/s_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			struct_registration_res = struct_registration.run()

			#func
			func_n4 = ants.N4BiasFieldCorrection()
			func_n4.inputs.dimension = 3
			func_n4.inputs.input_image = func_image
			func_n4.inputs.bspline_fitting_distance = 100
			func_n4.inputs.shrink_factor = 2
			func_n4.inputs.n_iterations = [200,200,200,200]
			func_n4.inputs.convergence_threshold = 1e-11
			func_n4.inputs.output_image = '{}/f_n4_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			func_n4_res = func_n4.run()

			func_registration = ants.Registration()
			func_registration.inputs.fixed_image = n4_res.outputs.output_image
			func_registration.inputs.output_transform_prefix = "func_"
			func_registration.inputs.transforms = [f2s]
			func_registration.inputs.transform_parameters = [(0.1,)]
			func_registration.inputs.number_of_iterations = [[40, 20, 10]]
			func_registration.inputs.dimension = 3
			func_registration.inputs.write_composite_transform = True
			func_registration.inputs.collapse_output_transforms = True
			func_registration.inputs.initial_moving_transform_com = True
			func_registration.inputs.metric = ['MeanSquares']
			func_registration.inputs.metric_weight = [1]
			func_registration.inputs.radius_or_number_of_bins = [16]
			func_registration.inputs.sampling_strategy = ["Regular"]
			func_registration.inputs.sampling_percentage = [0.3]
			func_registration.inputs.convergence_threshold = [1.e-2]
			func_registration.inputs.convergence_window_size = [8]
			func_registration.inputs.smoothing_sigmas = [[4, 2, 1]] # [1,0.5,0]
			func_registration.inputs.sigma_units = ['vox']
			func_registration.inputs.shrink_factors = [[3, 2, 1]]
			func_registration.inputs.use_estimate_learning_rate_once = [True]
			func_registration.inputs.use_histogram_matching = [False]
			func_registration.inputs.winsorize_lower_quantile = 0.005
			func_registration.inputs.winsorize_upper_quantile = 0.995
			func_registration.inputs.args = '--float'
			func_registration.inputs.num_threads = 6

			func_registration.inputs.moving_image = func_n4_res.outputs.output_image
			func_registration.inputs.output_warped_image = '{}/f_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			func_registration_res = func_registration.run()

			warp = ants.ApplyTransforms()
			warp.inputs.reference_image = template
			warp.inputs.input_image_type = 3
			warp.inputs.interpolation = 'Linear'
			warp.inputs.invert_transform_flags = [False, False]
			warp.inputs.terminal_output = 'file'
			warp.inputs.output_image = '{}/{}_ofM{}.nii.gz'.format(regdir,participant,i)
			warp.num_threads = 6

			warp.inputs.input_image = func_image
			warp.inputs.transforms = [func_registration_res.outputs.composite_transform, struct_registration_res.outputs.composite_transform]
			warp.run()
Exemplo n.º 11
0
def structural_to_functional_per_participant_test(subjects_sessions,
	template = "~/GitHub/mriPipeline/templates/waxholm/new/WHS_SD_masked.nii.gz",
	f_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
	s_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
	num_threads = 3,
	):

	template = os.path.expanduser(template)
	for subject_session in subjects_sessions:
		func_image_dir = os.path.expanduser(f_file_format.format(**subject_session))
		struct_image_dir = os.path.expanduser(s_file_format.format(**subject_session))
		try:
			for myfile in os.listdir(func_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					func_image = os.path.join(func_image_dir,myfile)
			for myfile in os.listdir(struct_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					struct_image = os.path.join(struct_image_dir,myfile)
		except FileNotFoundError:
			pass
		else:
			n4 = ants.N4BiasFieldCorrection()
			n4.inputs.dimension = 3
			n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			n4.inputs.bspline_fitting_distance = 100
			n4.inputs.shrink_factor = 2
			n4.inputs.n_iterations = [200,200,200,200]
			n4.inputs.convergence_threshold = 1e-11
			n4.inputs.output_image = '{}_{}_1_biasCorrection_forRegistration.nii.gz'.format(*subject_session.values())
			n4_res = n4.run()

			_n4 = ants.N4BiasFieldCorrection()
			_n4.inputs.dimension = 3
			_n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			_n4.inputs.bspline_fitting_distance = 95
			_n4.inputs.shrink_factor = 2
			_n4.inputs.n_iterations = [500,500,500,500]
			_n4.inputs.convergence_threshold = 1e-14
			_n4.inputs.output_image = '{}_{}_1_biasCorrection_forMasking.nii.gz'.format(*subject_session.values())
			_n4_res = _n4.run()

			#we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
			struct_cutoff = ImageMaths()
			struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
			struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
			struct_cutoff_res = struct_cutoff.run()

			struct_BET = BET()
			struct_BET.inputs.mask = True
			struct_BET.inputs.frac = 0.3
			struct_BET.inputs.robust = True
			struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
			struct_BET.inputs.out_file = '{}_{}_2_brainExtraction.nii.gz'.format(*subject_session.values())
			struct_BET_res = struct_BET.run()

			# we need/can not apply a fill, because the "holes" if any, will be at the rostral edge (touching it, and thus not counting as holes)
			struct_mask = ApplyMask()
			struct_mask.inputs.in_file = n4_res.outputs.output_image
			struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
			struct_mask.inputs.out_file = '{}_{}_3_brainMasked.nii.gz'.format(*subject_session.values())
			struct_mask_res = struct_mask.run()

			struct_registration = ants.Registration()
			struct_registration.inputs.fixed_image = template
			struct_registration.inputs.output_transform_prefix = "output_"
			struct_registration.inputs.transforms = ['Affine', 'SyN'] ##
			struct_registration.inputs.transform_parameters = [(1.0,), (1.0, 3.0, 5.0)] ##
			struct_registration.inputs.number_of_iterations = [[2000, 1000, 500], [100, 100, 100]] #
			struct_registration.inputs.dimension = 3
			struct_registration.inputs.write_composite_transform = True
			struct_registration.inputs.collapse_output_transforms = True
			struct_registration.inputs.initial_moving_transform_com = True
			# Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
			struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
			struct_registration.inputs.metric_weight = [1, 1]
			struct_registration.inputs.radius_or_number_of_bins = [16, 32] #
			struct_registration.inputs.sampling_strategy = ['Random', None]
			struct_registration.inputs.sampling_percentage = [0.3, 0.3]
			struct_registration.inputs.convergence_threshold = [1.e-11, 1.e-8] #
			struct_registration.inputs.convergence_window_size = [20, 20]
			struct_registration.inputs.smoothing_sigmas = [[4, 2, 1], [4, 2, 1]]
			struct_registration.inputs.sigma_units = ['vox', 'vox']
			struct_registration.inputs.shrink_factors = [[3, 2, 1],[3, 2, 1]]
			struct_registration.inputs.use_estimate_learning_rate_once = [True, True]
			# if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
			struct_registration.inputs.use_histogram_matching = [False, False]
			struct_registration.inputs.winsorize_lower_quantile = 0.005
			struct_registration.inputs.winsorize_upper_quantile = 0.98
			struct_registration.inputs.args = '--float'
			struct_registration.inputs.num_threads = num_threads

			struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
			struct_registration.inputs.output_warped_image = '{}_{}_4_structuralRegistration.nii.gz'.format(*subject_session.values())
			struct_registration_res = struct_registration.run()

			warp = ants.ApplyTransforms()
			warp.inputs.reference_image = template
			warp.inputs.input_image_type = 3
			warp.inputs.interpolation = 'Linear'
			warp.inputs.invert_transform_flags = [False]
			warp.inputs.terminal_output = 'file'
			warp.inputs.output_image = '{}_{}_5_functionalWarp.nii.gz'.format(*subject_session.values())
			warp.num_threads = num_threads

			warp.inputs.input_image = func_image
			warp.inputs.transforms = struct_registration_res.outputs.composite_transform
			warp.run()