Ejemplo n.º 1
0
    def mask_image(self,folder,in_path,out_path):
        """
        This function for creating mask_image.
        It is dependent on the class name: func.
        folder : read the string, new folder's name.
        in_path : read the list, element: string, directory address of data.
        out_path : read the string, name of the new data.

        For complete details, see the BET() documentation.
        <https://nipype.readthedocs.io/en/latest/interfaces.html>
        """
        print('#################_Mask_image_started_#######################')
        preprocessing = time.time()

        # Check and if not make the directory
        if not os.path.isdir(folder):
            func.newfolder(folder)

        # Create, and save the data : anatomy and whole brain mask image 
        for i in range(len(in_path)):
            output = folder+'/'+out_path[i]
            print(in_path[i], "mask image started")
            skullstrip = BET(in_file=in_path[i],
                             out_file=output,
                             mask=True)
            skullstrip.run()
            print(output,"mask image completed")
        print("computation_time :","%.2fs" %(time.time() - preprocessing))
        print('#################_Mask_image_completed_#####################')
Ejemplo n.º 2
0
def mask_CTBrain(CT_dir, T1_dir, frac):

    # nav to dir with T1 images
    os.chdir(T1_dir)

    # run BET to extract brain from T1
    bet = BET()
    bet.inputs.in_file = T1_dir + '/T1.nii'
    bet.inputs.frac = frac
    bet.inputs.robust = True
    bet.inputs.mask = True
    print "::: Extracting Brain mask from T1 using BET :::"
    bet.run()

    # use flrit to correg CT to T1
    flirt = FLIRT()
    flirt.inputs.in_file = CT_dir + '/CT.nii'
    flirt.inputs.reference = T1_dir + '/T1.nii'
    flirt.inputs.cost_func = 'mutualinfo'
    print "::: Estimating corregistration from CT to T1 using FLIRT :::"
    flirt.run()

    # get inverse of estimated coreg of CT tp T1
    print "::: Estimating inverse affine for Brain mask of CT :::"
    os.system('convert_xfm -omat inv_CT_flirt.mat -inverse CT_flirt.mat')
    os.system('mv inv_CT_flirt.mat %s' % (CT_dir))

    # apply inverse affine coreg to get brain mask in CT space
    applyxfm = ApplyXfm()
    applyxfm.inputs.in_file = T1_dir + '/T1_brain_mask.nii.gz'
    applyxfm.inputs.in_matrix_file = CT_dir + '/inv_CT_flirt.mat'
    applyxfm.inputs.out_file = CT_dir + '/CT_mask.nii.gz'
    applyxfm.inputs.reference = CT_dir + '/CT.nii'
    applyxfm.inputs.apply_xfm = True
    print "::: Applying inverse affine to Brain mask to get CT mask :::"
    applyxfm.run()

    # dilate brain mask to make sure all elecs are in final masked img
    CT_mask = nib.load(CT_dir + '/CT_mask.nii.gz')
    CT_mask_dat = CT_mask.get_data()
    kernel = np.ones((5, 5), np.uint8)

    print "::: Dilating CT mask :::"
    dilated = cv2.dilate(CT_mask_dat, kernel, iterations=1)
    hdr = CT_mask.get_header()
    affine = CT_mask.get_affine()
    N = nib.Nifti1Image(dilated, affine, hdr)
    new_fname = CT_dir + '/dilated_CT_mask.nii'
    N.to_filename(new_fname)

    # apply mask to CT
    os.chdir(CT_dir)
    print "::: masking CT with dilated brain mask :::"
    os.system(
        "fslmaths 'CT.nii' -mas 'dilated_CT_mask.nii.gz' 'masked_CT.nii'")
    os.system('gunzip masked_CT.nii.gz')
Ejemplo n.º 3
0
def mask_CTBrain(CT_dir, T1_dir, frac):

    # nav to dir with T1 images
    os.chdir(T1_dir)

    # run BET to extract brain from T1
    bet = BET()
    bet.inputs.in_file = T1_dir + "/T1.nii"
    bet.inputs.frac = frac
    bet.inputs.robust = True
    bet.inputs.mask = True
    print "::: Extracting Brain mask from T1 using BET :::"
    bet.run()

    # use flrit to correg CT to T1
    flirt = FLIRT()
    flirt.inputs.in_file = CT_dir + "/CT.nii"
    flirt.inputs.reference = T1_dir + "/T1.nii"
    flirt.inputs.cost_func = "mutualinfo"
    print "::: Estimating corregistration from CT to T1 using FLIRT :::"
    flirt.run()

    # get inverse of estimated coreg of CT tp T1
    print "::: Estimating inverse affine for Brain mask of CT :::"
    os.system("convert_xfm -omat inv_CT_flirt.mat -inverse CT_flirt.mat")
    os.system("mv inv_CT_flirt.mat %s" % (CT_dir))

    # apply inverse affine coreg to get brain mask in CT space
    applyxfm = ApplyXfm()
    applyxfm.inputs.in_file = T1_dir + "/T1_brain_mask.nii.gz"
    applyxfm.inputs.in_matrix_file = CT_dir + "/inv_CT_flirt.mat"
    applyxfm.inputs.out_file = CT_dir + "/CT_mask.nii.gz"
    applyxfm.inputs.reference = CT_dir + "/CT.nii"
    applyxfm.inputs.apply_xfm = True
    print "::: Applying inverse affine to Brain mask to get CT mask :::"
    applyxfm.run()

    # dilate brain mask to make sure all elecs are in final masked img
    CT_mask = nib.load(CT_dir + "/CT_mask.nii.gz")
    CT_mask_dat = CT_mask.get_data()
    kernel = np.ones((5, 5), np.uint8)

    print "::: Dilating CT mask :::"
    dilated = cv2.dilate(CT_mask_dat, kernel, iterations=1)
    hdr = CT_mask.get_header()
    affine = CT_mask.get_affine()
    N = nib.Nifti1Image(dilated, affine, hdr)
    new_fname = CT_dir + "/dilated_CT_mask.nii"
    N.to_filename(new_fname)

    # apply mask to CT
    os.chdir(CT_dir)
    print "::: masking CT with dilated brain mask :::"
    os.system("fslmaths 'CT.nii' -mas 'dilated_CT_mask.nii.gz' 'masked_CT.nii'")
    os.system("gunzip masked_CT.nii.gz")
Ejemplo n.º 4
0
def bet(record):
    import nipype
    from nipype.interfaces.fsl import BET
    from nipype.utils.filemanip import hash_infile

    nipype.config.enable_provenance()

    in_file_uri = record['t1_uri']
    os.chdir('/tmp')
    fname = 'anatomy.nii.gz'

    with open(fname, 'wb') as fd:
        response = requests.get(in_file_uri, stream=True)
        if not response.ok:
            response.raise_for_status()
        for chunk in response.iter_content(1024):
            fd.write(chunk)

    # Check if interface has run with this input before
    sha = hash_infile(os.path.abspath(fname), crypto=hashlib.sha512)
    select = SelectQuery(config=app.config)
    res = select.execute_select('E0921842-1EDB-49F8-A4B3-BA51B85AD407')
    sha_recs = res[res.sha512.str.contains(sha)]
    bet_recs = res[res.interface.str.contains('BET')]
    results = dict()
    if sha_recs.empty or \
            (not sha_recs.empty and bet_recs.empty):
        better = BET()
        better.inputs.in_file = os.path.abspath(fname)
        result = better.run()
        prov = result.provenance.rdf().serialize(format='json-ld')
        results.update({'prov': prov})
    else:
        results.update({'prov': res.to_json(orient='records')})
    return results
Ejemplo n.º 5
0
def skullstrip(infile: Path) -> Tuple[Path, Path]:
    cmd = BET()
    cmd.inputs.in_file = str(infile)
    cmd.inputs.out_file = str(infile).replace(SLICETIME_SUFFIX, BET_SUFFIX)
    cmd.inputs.output_type = "NIFTI_GZ"
    cmd.inputs.functional = True
    cmd.inputs.mask = True
    results = cmd.run()
    return results.outputs.out_file, results.outputs.mask_file
Ejemplo n.º 6
0
def test_skull_stripping_ids_only():

    data_dir = os.path.abspath('.') + '/data/ds114'
    pipeline_name = 'test_skullStrippingTransformer_ds114'

    IDS = ['01', '02', '03']
    sessions = ['test', 'retest']

    transformer = SkullStrippingTransformer(
        data_dir=data_dir,
        pipeline_name=pipeline_name,
        search_param=dict(extensions='T1w.nii.gz'),
        variant='skullStrippingIDsOnly')
    transformer.fit_transform(IDS)

    for subject in IDS:
        for session in sessions:

            in_file = build_T1w_in_file_path(data_dir, subject, session)

            out_file = build_T1w_out_file_path(data_dir, pipeline_name,
                                               subject, session,
                                               'skullStrippingIDsOnly',
                                               'neededBrain')

            dirname = os.path.dirname(out_file)
            if not os.path.exists(dirname):
                os.makedirs(dirname)

            betfsl = BET(in_file=in_file, out_file=out_file)
            betfsl.run()

            res_file = build_T1w_out_file_path(data_dir, pipeline_name,
                                               subject, session,
                                               'skullStrippingIDsOnly',
                                               'brain')

            result_mat = nib.load(res_file).affine
            output_mat = nib.load(out_file).affine
            assert np.allclose(result_mat, output_mat)
            os.remove(res_file)
            os.remove(out_file)
Ejemplo n.º 7
0
def run_bet(robust: bool = True, skip_existing: bool = True):
    scans = get_scans(LOCATION_DICT["raw"])
    for scan in scans:
        print(f"\nCurrent series: {scan}")
        if skip_existing:
            print("Checking for existing skull-stripping output...", end="\t")
        dest = get_default_destination(scan)
        if skip_existing and os.path.isfile(dest):
            print(f"\u2714")
            continue
        print(f"\u2718")
        print("Running skull-stripping with BET...", end="\t")
        try:
            bet = BET(robust=True)
            bet.inputs.in_file = scan
            bet.inputs.out_file = dest
            bet.run()
            print(f"\u2714\tDone!")
        except Exception as e:
            print(f"\u2718")
            print(e.args)
            break
Ejemplo n.º 8
0
def functional_per_participant_test():
    for i in ["", "_aF", "_cF1", "_cF2", "_pF"]:
        template = "~/ni_data/templates/ds_QBI_chr.nii.gz"
        participant = "4008"
        image_dir = "~/ni_data/ofM.dr/preprocessing/generic_work/_subject_session_{}.ofM{}/_scan_type_7_EPI_CBV/temporal_mean/".format(
            participant, i)
        try:
            for myfile in os.listdir(image_dir):
                if myfile.endswith(".nii.gz"):
                    mimage = os.path.join(image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = mimage
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = 'n4_{}_ofM{}.nii.gz'.format(
                participant, i)
            n4_res = n4.run()

            functional_cutoff = ImageMaths()
            functional_cutoff.inputs.op_string = "-thrP 30"
            functional_cutoff.inputs.in_file = n4_res.outputs.output_image
            functional_cutoff_res = functional_cutoff.run()

            functional_BET = BET()
            functional_BET.inputs.mask = True
            functional_BET.inputs.frac = 0.5
            functional_BET.inputs.in_file = functional_cutoff_res.outputs.out_file
            functional_BET_res = functional_BET.run()

            registration = ants.Registration()
            registration.inputs.fixed_image = template
            registration.inputs.output_transform_prefix = "output_"
            registration.inputs.transforms = ['Affine', 'SyN']
            registration.inputs.transform_parameters = [(0.1, ),
                                                        (3.0, 3.0, 5.0)]
            registration.inputs.number_of_iterations = [[10000, 10000, 10000],
                                                        [100, 100, 100]]
            registration.inputs.dimension = 3
            registration.inputs.write_composite_transform = True
            registration.inputs.collapse_output_transforms = True
            registration.inputs.initial_moving_transform_com = True
            registration.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
            registration.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
            registration.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
            registration.inputs.sampling_strategy = ['Regular'] * 2 + [[
                None, None
            ]]
            registration.inputs.sampling_percentage = [0.3] * 2 + [[
                None, None
            ]]
            registration.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
            registration.inputs.convergence_window_size = [20] * 2 + [5]
            registration.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[
                1, 0.5, 0
            ]]
            registration.inputs.sigma_units = ['vox'] * 3
            registration.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
            registration.inputs.use_estimate_learning_rate_once = [True] * 3
            registration.inputs.use_histogram_matching = [False] * 2 + [True]
            registration.inputs.winsorize_lower_quantile = 0.005
            registration.inputs.winsorize_upper_quantile = 0.995
            registration.inputs.args = '--float'
            registration.inputs.num_threads = 4
            registration.plugin_args = {
                'qsub_args': '-pe orte 4',
                'sbatch_args': '--mem=6G -c 4'
            }

            registration.inputs.moving_image = functional_BET_res.outputs.out_file
            registration.inputs.output_warped_image = '{}_ofM{}.nii.gz'.format(
                participant, i)
            res = registration.run()
Ejemplo n.º 9
0
def structural_to_functional_per_participant_test(
    subjects_sessions,
    template="~/GitHub/mriPipeline/templates/waxholm/new/WHS_SD_masked.nii.gz",
    f_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
    s_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
    num_threads=3,
):

    template = os.path.expanduser(template)
    for subject_session in subjects_sessions:
        func_image_dir = os.path.expanduser(
            f_file_format.format(**subject_session))
        struct_image_dir = os.path.expanduser(
            s_file_format.format(**subject_session))
        try:
            for myfile in os.listdir(func_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    func_image = os.path.join(func_image_dir, myfile)
            for myfile in os.listdir(struct_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    struct_image = os.path.join(struct_image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = '{}_{}_1_biasCorrection_forRegistration.nii.gz'.format(
                *subject_session.values())
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = '{}_{}_1_biasCorrection_forMasking.nii.gz'.format(
                *subject_session.values())
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET.inputs.out_file = '{}_{}_2_brainExtraction.nii.gz'.format(
                *subject_session.values())
            struct_BET_res = struct_BET.run()

            # we need/can not apply a fill, because the "holes" if any, will be at the rostral edge (touching it, and thus not counting as holes)
            struct_mask = ApplyMask()
            struct_mask.inputs.in_file = n4_res.outputs.output_image
            struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            struct_mask.inputs.out_file = '{}_{}_3_brainMasked.nii.gz'.format(
                *subject_session.values())
            struct_mask_res = struct_mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Affine', 'SyN']  ##
            struct_registration.inputs.transform_parameters = [(1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                2000, 1000, 500
            ], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
            struct_registration.inputs.metric_weight = [1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 32]  #
            struct_registration.inputs.sampling_strategy = ['Random', None]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [1.e-11,
                                                                1.e-8]  #
            struct_registration.inputs.convergence_window_size = [20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [False, False]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = num_threads

            struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = '{}_{}_4_structuralRegistration.nii.gz'.format(
                *subject_session.values())
            struct_registration_res = struct_registration.run()

            warp = ants.ApplyTransforms()
            warp.inputs.reference_image = template
            warp.inputs.input_image_type = 3
            warp.inputs.interpolation = 'Linear'
            warp.inputs.invert_transform_flags = [False]
            warp.inputs.terminal_output = 'file'
            warp.inputs.output_image = '{}_{}_5_functionalWarp.nii.gz'.format(
                *subject_session.values())
            warp.num_threads = num_threads

            warp.inputs.input_image = func_image
            warp.inputs.transforms = struct_registration_res.outputs.composite_transform
            warp.run()
Ejemplo n.º 10
0
def canonical(subjects_participants, regdir, f2s,
	template = "~/GitHub/mriPipeline/templates/waxholm/WHS_SD_rat_T2star_v1.01_downsample3.nii.gz",
	f_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
	s_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
	):

	"""Warp a functional image based on the functional-to-structural and the structural-to-template registrations.
	Currently this approach is failing because the functiona-to-structural registration pushes the brain stem too far down.
	This may be

	"""
	template = os.path.expanduser(template)
	for subject_participant in subjects_participants:
		func_image_dir = os.path.expanduser(f_file_format.format(**subject_participant))
		struct_image_dir = os.path.expanduser(s_file_format.format(**subject_participant))
		try:
			for myfile in os.listdir(func_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					func_image = os.path.join(func_image_dir,myfile)
			for myfile in os.listdir(struct_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					struct_image = os.path.join(struct_image_dir,myfile)
		except FileNotFoundError:
			pass
		else:
			#struct
			n4 = ants.N4BiasFieldCorrection()
			n4.inputs.dimension = 3
			n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			n4.inputs.bspline_fitting_distance = 100
			n4.inputs.shrink_factor = 2
			n4.inputs.n_iterations = [200,200,200,200]
			n4.inputs.convergence_threshold = 1e-11
			n4.inputs.output_image = '{}/ss_n4_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			n4_res = n4.run()

			_n4 = ants.N4BiasFieldCorrection()
			_n4.inputs.dimension = 3
			_n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			_n4.inputs.bspline_fitting_distance = 95
			_n4.inputs.shrink_factor = 2
			_n4.inputs.n_iterations = [500,500,500,500]
			_n4.inputs.convergence_threshold = 1e-14
			_n4.inputs.output_image = '{}/ss__n4_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			_n4_res = _n4.run()

			#we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
			struct_cutoff = ImageMaths()
			struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
			struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
			struct_cutoff_res = struct_cutoff.run()

			struct_BET = BET()
			struct_BET.inputs.mask = True
			struct_BET.inputs.frac = 0.3
			struct_BET.inputs.robust = True
			struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
			struct_BET_res = struct_BET.run()

			struct_mask = ApplyMask()
			struct_mask.inputs.in_file = n4_res.outputs.output_image
			struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
			struct_mask_res = struct_mask.run()

			struct_registration = ants.Registration()
			struct_registration.inputs.fixed_image = template
			struct_registration.inputs.output_transform_prefix = "output_"
			struct_registration.inputs.transforms = ['Affine', 'SyN'] ##
			struct_registration.inputs.transform_parameters = [(1.0,), (1.0, 3.0, 5.0)] ##
			struct_registration.inputs.number_of_iterations = [[2000, 1000, 500], [100, 100, 100]] #
			struct_registration.inputs.dimension = 3
			struct_registration.inputs.write_composite_transform = True
			struct_registration.inputs.collapse_output_transforms = True
			struct_registration.inputs.initial_moving_transform_com = True
			# Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
			struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
			struct_registration.inputs.metric_weight = [1, 1]
			struct_registration.inputs.radius_or_number_of_bins = [16, 32] #
			struct_registration.inputs.sampling_strategy = ['Random', None]
			struct_registration.inputs.sampling_percentage = [0.3, 0.3]
			struct_registration.inputs.convergence_threshold = [1.e-11, 1.e-8] #
			struct_registration.inputs.convergence_window_size = [20, 20]
			struct_registration.inputs.smoothing_sigmas = [[4, 2, 1], [4, 2, 1]]
			struct_registration.inputs.sigma_units = ['vox', 'vox']
			struct_registration.inputs.shrink_factors = [[3, 2, 1],[3, 2, 1]]
			struct_registration.inputs.use_estimate_learning_rate_once = [True, True]
			# if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
			struct_registration.inputs.use_histogram_matching = [False, False]
			struct_registration.inputs.winsorize_lower_quantile = 0.005
			struct_registration.inputs.winsorize_upper_quantile = 0.98
			struct_registration.inputs.args = '--float'
			struct_registration.inputs.num_threads = 6

			struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
			struct_registration.inputs.output_warped_image = '{}/s_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			struct_registration_res = struct_registration.run()

			#func
			func_n4 = ants.N4BiasFieldCorrection()
			func_n4.inputs.dimension = 3
			func_n4.inputs.input_image = func_image
			func_n4.inputs.bspline_fitting_distance = 100
			func_n4.inputs.shrink_factor = 2
			func_n4.inputs.n_iterations = [200,200,200,200]
			func_n4.inputs.convergence_threshold = 1e-11
			func_n4.inputs.output_image = '{}/f_n4_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			func_n4_res = func_n4.run()

			func_registration = ants.Registration()
			func_registration.inputs.fixed_image = n4_res.outputs.output_image
			func_registration.inputs.output_transform_prefix = "func_"
			func_registration.inputs.transforms = [f2s]
			func_registration.inputs.transform_parameters = [(0.1,)]
			func_registration.inputs.number_of_iterations = [[40, 20, 10]]
			func_registration.inputs.dimension = 3
			func_registration.inputs.write_composite_transform = True
			func_registration.inputs.collapse_output_transforms = True
			func_registration.inputs.initial_moving_transform_com = True
			func_registration.inputs.metric = ['MeanSquares']
			func_registration.inputs.metric_weight = [1]
			func_registration.inputs.radius_or_number_of_bins = [16]
			func_registration.inputs.sampling_strategy = ["Regular"]
			func_registration.inputs.sampling_percentage = [0.3]
			func_registration.inputs.convergence_threshold = [1.e-2]
			func_registration.inputs.convergence_window_size = [8]
			func_registration.inputs.smoothing_sigmas = [[4, 2, 1]] # [1,0.5,0]
			func_registration.inputs.sigma_units = ['vox']
			func_registration.inputs.shrink_factors = [[3, 2, 1]]
			func_registration.inputs.use_estimate_learning_rate_once = [True]
			func_registration.inputs.use_histogram_matching = [False]
			func_registration.inputs.winsorize_lower_quantile = 0.005
			func_registration.inputs.winsorize_upper_quantile = 0.995
			func_registration.inputs.args = '--float'
			func_registration.inputs.num_threads = 6

			func_registration.inputs.moving_image = func_n4_res.outputs.output_image
			func_registration.inputs.output_warped_image = '{}/f_{}_ofM{}.nii.gz'.format(regdir,participant,i)
			func_registration_res = func_registration.run()

			warp = ants.ApplyTransforms()
			warp.inputs.reference_image = template
			warp.inputs.input_image_type = 3
			warp.inputs.interpolation = 'Linear'
			warp.inputs.invert_transform_flags = [False, False]
			warp.inputs.terminal_output = 'file'
			warp.inputs.output_image = '{}/{}_ofM{}.nii.gz'.format(regdir,participant,i)
			warp.num_threads = 6

			warp.inputs.input_image = func_image
			warp.inputs.transforms = [func_registration_res.outputs.composite_transform, struct_registration_res.outputs.composite_transform]
			warp.run()
Ejemplo n.º 11
0
def func_img_proc(T1C_original, T2_original, FLAIR_original, 
                  T1C_bet, T2_bet, FLAIR_bet, mask_T1C_bet,
                  T1C_isovoxel, T2_isovoxel, FLAIR_isovoxel, mask_T1C_bet_iso,
                  T1C_corrected, T2_corrected, FLAIR_corrected, T1C_bet_temp):
    
    ##Skull Stripping

    t1c_isovoxel = func_resample_isovoxel(T1C_original)
    sitk.WriteImage(t1c_isovoxel, T1C_isovoxel)
    print("resampling T1C_original - completed")
    
    func_register(T2_original, T1C_isovoxel, T2_isovoxel)
    print("register T2_original to T1C_isovoxel - completed")
    
    func_register(FLAIR_original, T1C_isovoxel, FLAIR_isovoxel)
    print("register FLAIR_original to T1C_isovoxel - completed")
    
    bet_t1gd_iso = BET(in_file = T1C_isovoxel,
                       frac = 0.4,
                       mask = True,  # brain tissue mask is stored with '_mask' suffix after T1C_bet.
                       reduce_bias = True,
                       out_file = T1C_bet_temp)
    bet_t1gd_iso.run()
    print("Acquired BET mask...")
    os.remove(T1C_bet_temp)
    
    brain_mask_file = T1C_bet_temp[:len(T1C_bet_temp)-len('.nii.gz')] + '_mask.nii.gz'
    
    ApplyBet_T1C = ApplyMask(in_file = T1C_isovoxel,
                                  mask_file= brain_mask_file,
                                  out_file= T1C_bet)
    ApplyBet_T1C.run()
    
    ApplyBet_T2 = ApplyMask(in_file = T2_isovoxel,
                                  mask_file= brain_mask_file,
                                  out_file=T2_bet)
    ApplyBet_T2.run()
    
    ApplyBet_FLAIR = ApplyMask(in_file = FLAIR_isovoxel,
                                  mask_file= brain_mask_file,
                                  out_file=FLAIR_bet)
    ApplyBet_FLAIR.run()
    
    print("Skull stripping of T1C, T2, FLAIR... - done")

    ### Resampling, REgisgtering BET files

    t1c_isovoxel = func_resample_isovoxel(T1C_bet, isseg=False)
    sitk.WriteImage(t1c_isovoxel, T1C_isovoxel)
    
    bmask_isovoxel = func_resample_isovoxel(mask_T1C_bet, isseg=True)
    sitk.WriteImage(bmask_isovoxel, mask_T1C_bet_iso)
    
    print("resampling T1C & brain mask - completed")
    
    func_register(T2_bet, T1C_isovoxel, T2_isovoxel)
    print("register T2 to T1C_isovoxel - completed")
    
    func_register(FLAIR_bet, T1C_isovoxel, FLAIR_isovoxel)
    print("register FLAIR to T1C_isovoxel - completed")
    
    ### Corrections

    func_n4bias(T1C_isovoxel, T1C_corrected)
    print("T1C bias correction done...")
    func_n4bias(T2_isovoxel, T2_corrected)
    print("T2 bias correction done...")
    func_n4bias(FLAIR_isovoxel, FLAIR_corrected)
    print("FLAIR bias correction done...")
Ejemplo n.º 12
0
#from mriqc.motion import plot_frame_displacement
#
#test_workflow = Workflow(name="qc_workflow")
#
#test_workflow.base_dir = work_dir


#from nipype import SelectFiles
#templates = dict(T1="*_{subject_id}_*/T1w_MPR_BIC_v1/*00001.nii*")
#file_list = Node(SelectFiles(templates), name = "EPI_and_T1_File_Selection")
#file_list.inputs.base_directory = data_dir
#file_list.iterables = [("subject_id", subject_list), ("p_dir", ["LR", "RL"])]


from nipype.interfaces.fsl import BET

mask_gen = BET()
mask_gen.inputs.in_file = "/Users/craigmoodie/Documents/AA_Connectivity_Psychosis/AA_Connectivity_Analysis/10_Subject_ICA_test/Subject_1/T1w_MPR_BIC_v1/S1543TWL_P126317_1_19_00001.nii"
mask_gen.inputs.mask = True
mask_gen.inputs.out_file = "bet_mean"
mask_gen.run()


#mask_gen = MapNode(BET(), name="Mask_Generation", iterfield="in_file")



#from spatial_qc import summary_mask, ghost_all, ghost_direction, ghost_mask, fwhm, artifacts, efc, cnr, snr
#
#
#summary_mask()
Ejemplo n.º 13
0
    def transform(self, X, y=None):

        in_files_search_param = self.gather_steps[1]

        if type(X[0]) == str and  \
           self.backend == 'fsl' and \
           self.gather_steps[0] != 'source':

            in_files_dir = os.path.join(self.project_path, 'derivatives',
                                        self.pipeline_name, 'steps',
                                        self.gather_steps[0])

            layout = BIDSLayout(in_files_dir)

            X.copy()

            for subject in X:

                in_files = []
                for path in layout.get(subject=subject,
                                       **in_files_search_param):
                    path = path.filename
                    in_files.append(path)

                for in_file in in_files:

                    path, filename = os.path.split(in_file)
                    path, directory = os.path.split(path)

                    inner_structure_path = list()
                    inner_structure_path.append(directory)
                    while 'sub-' not in directory:
                        path, directory = os.path.split(path)
                        inner_structure_path.append(directory)
                    inner_structure_path = reversed(inner_structure_path)

                    out_dir = os.path.join(self.project_path, 'derivatives',
                                           self.pipeline_name, 'steps',
                                           self.transformer_name,
                                           *inner_structure_path)

                    if not os.path.exists(out_dir):
                        os.makedirs(out_dir)

                    out_filename = get_brain_filename(filename)
                    out_file = os.path.join(out_dir, out_filename)

                    betfsl = BET(in_file=in_file,
                                 out_file=out_file,
                                 **self.backend_param)
                    betfsl.run()

        if type(X[0]) == str and  \
           self.backend == 'fsl' and \
           self.gather_steps[0] == 'source':

            in_files_dir = self.project_path

            layout = BIDSLayout(in_files_dir)

            X = X.copy()

            for subject in X:

                in_files = []
                for path in layout.get(subject=subject,
                                       **in_files_search_param):
                    path = path.filename
                    if 'derivatives' not in path.split(os.sep):
                        in_files.append(path)

                for in_file in in_files:

                    path, filename = os.path.split(in_file)
                    path, directory = os.path.split(path)

                    inner_structure_path = list()
                    inner_structure_path.append(directory)
                    while 'sub-' not in directory:
                        path, directory = os.path.split(path)
                        inner_structure_path.append(directory)
                    inner_structure_path = reversed(inner_structure_path)

                    out_dir = os.path.join(self.project_path, 'derivatives',
                                           self.pipeline_name, 'steps',
                                           self.transformer_name,
                                           *inner_structure_path)

                    if not os.path.exists(out_dir):
                        os.makedirs(out_dir)

                    out_filename = get_brain_filename(filename)
                    out_file = os.path.join(out_dir, out_filename)
                    print(out_file)

                    betfsl = BET(in_file=in_file,
                                 out_file=out_file,
                                 **self.backend_param)
                    betfsl.run()

        return X
Ejemplo n.º 14
0
"""Runs bet twice to improve skull-stripping with center of gravity"""
from nipype.interfaces.fsl import BET

anat = "../../data/ds000171/sub-control01/anat/sub-control01_T1w.nii.gz"

bet = BET()

bet.inputs.in_file = anat
bet.inputs.frac = 0.5

brain = '../output/better-bet-anat/anat_bet.nii.gz'
bet.inputs.out_file = brain
bet.inputs.mask = True
bet.run()

# Load mask as numpy array
import nibabel as nib
mask_path = '../output/better-bet-anat/anat_bet_mask.nii.gz'
mask = nib.load(mask_path)
mask_array = mask.get_fdata()

# Get center of mass with scipy
from scipy.ndimage.measurements import center_of_mass
cog = [int(x)
       for x in list(center_of_mass(mask_array))]  # list comprehension yo!

bet2 = BET()
bet2.inputs.in_file = anat
bet2.inputs.frac = 0.3
bet2.inputs.center = cog
bet2.inputs.out_file = '../output/better-bet-anat/anat_bet2.nii.gz'
Ejemplo n.º 15
0
def structural_to_functional_per_participant_test(subjects_sessions,
	template = "~/GitHub/mriPipeline/templates/waxholm/new/WHS_SD_masked.nii.gz",
	f_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
	s_file_format = "~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
	num_threads = 3,
	):

	template = os.path.expanduser(template)
	for subject_session in subjects_sessions:
		func_image_dir = os.path.expanduser(f_file_format.format(**subject_session))
		struct_image_dir = os.path.expanduser(s_file_format.format(**subject_session))
		try:
			for myfile in os.listdir(func_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					func_image = os.path.join(func_image_dir,myfile)
			for myfile in os.listdir(struct_image_dir):
				if myfile.endswith((".nii.gz", ".nii")):
					struct_image = os.path.join(struct_image_dir,myfile)
		except FileNotFoundError:
			pass
		else:
			n4 = ants.N4BiasFieldCorrection()
			n4.inputs.dimension = 3
			n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			n4.inputs.bspline_fitting_distance = 100
			n4.inputs.shrink_factor = 2
			n4.inputs.n_iterations = [200,200,200,200]
			n4.inputs.convergence_threshold = 1e-11
			n4.inputs.output_image = '{}_{}_1_biasCorrection_forRegistration.nii.gz'.format(*subject_session.values())
			n4_res = n4.run()

			_n4 = ants.N4BiasFieldCorrection()
			_n4.inputs.dimension = 3
			_n4.inputs.input_image = struct_image
			# correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
			_n4.inputs.bspline_fitting_distance = 95
			_n4.inputs.shrink_factor = 2
			_n4.inputs.n_iterations = [500,500,500,500]
			_n4.inputs.convergence_threshold = 1e-14
			_n4.inputs.output_image = '{}_{}_1_biasCorrection_forMasking.nii.gz'.format(*subject_session.values())
			_n4_res = _n4.run()

			#we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
			struct_cutoff = ImageMaths()
			struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
			struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
			struct_cutoff_res = struct_cutoff.run()

			struct_BET = BET()
			struct_BET.inputs.mask = True
			struct_BET.inputs.frac = 0.3
			struct_BET.inputs.robust = True
			struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
			struct_BET.inputs.out_file = '{}_{}_2_brainExtraction.nii.gz'.format(*subject_session.values())
			struct_BET_res = struct_BET.run()

			# we need/can not apply a fill, because the "holes" if any, will be at the rostral edge (touching it, and thus not counting as holes)
			struct_mask = ApplyMask()
			struct_mask.inputs.in_file = n4_res.outputs.output_image
			struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
			struct_mask.inputs.out_file = '{}_{}_3_brainMasked.nii.gz'.format(*subject_session.values())
			struct_mask_res = struct_mask.run()

			struct_registration = ants.Registration()
			struct_registration.inputs.fixed_image = template
			struct_registration.inputs.output_transform_prefix = "output_"
			struct_registration.inputs.transforms = ['Affine', 'SyN'] ##
			struct_registration.inputs.transform_parameters = [(1.0,), (1.0, 3.0, 5.0)] ##
			struct_registration.inputs.number_of_iterations = [[2000, 1000, 500], [100, 100, 100]] #
			struct_registration.inputs.dimension = 3
			struct_registration.inputs.write_composite_transform = True
			struct_registration.inputs.collapse_output_transforms = True
			struct_registration.inputs.initial_moving_transform_com = True
			# Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
			struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
			struct_registration.inputs.metric_weight = [1, 1]
			struct_registration.inputs.radius_or_number_of_bins = [16, 32] #
			struct_registration.inputs.sampling_strategy = ['Random', None]
			struct_registration.inputs.sampling_percentage = [0.3, 0.3]
			struct_registration.inputs.convergence_threshold = [1.e-11, 1.e-8] #
			struct_registration.inputs.convergence_window_size = [20, 20]
			struct_registration.inputs.smoothing_sigmas = [[4, 2, 1], [4, 2, 1]]
			struct_registration.inputs.sigma_units = ['vox', 'vox']
			struct_registration.inputs.shrink_factors = [[3, 2, 1],[3, 2, 1]]
			struct_registration.inputs.use_estimate_learning_rate_once = [True, True]
			# if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
			struct_registration.inputs.use_histogram_matching = [False, False]
			struct_registration.inputs.winsorize_lower_quantile = 0.005
			struct_registration.inputs.winsorize_upper_quantile = 0.98
			struct_registration.inputs.args = '--float'
			struct_registration.inputs.num_threads = num_threads

			struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
			struct_registration.inputs.output_warped_image = '{}_{}_4_structuralRegistration.nii.gz'.format(*subject_session.values())
			struct_registration_res = struct_registration.run()

			warp = ants.ApplyTransforms()
			warp.inputs.reference_image = template
			warp.inputs.input_image_type = 3
			warp.inputs.interpolation = 'Linear'
			warp.inputs.invert_transform_flags = [False]
			warp.inputs.terminal_output = 'file'
			warp.inputs.output_image = '{}_{}_5_functionalWarp.nii.gz'.format(*subject_session.values())
			warp.num_threads = num_threads

			warp.inputs.input_image = func_image
			warp.inputs.transforms = struct_registration_res.outputs.composite_transform
			warp.run()
Ejemplo n.º 16
0
def canonical(
    subjects_participants,
    regdir,
    f2s,
    template="~/GitHub/mriPipeline/templates/waxholm/WHS_SD_rat_T2star_v1.01_downsample3.nii.gz",
    f_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_SE_EPI/f_bru2nii/",
    s_file_format="~/GitHub/mripipeline/base/preprocessing/generic_work/_subject_session_{subject}.{session}/_scan_type_T2_TurboRARE/s_bru2nii/",
):
    """Warp a functional image based on the functional-to-structural and the structural-to-template registrations.
	Currently this approach is failing because the functiona-to-structural registration pushes the brain stem too far down.
	This may be

	"""
    template = os.path.expanduser(template)
    for subject_participant in subjects_participants:
        func_image_dir = os.path.expanduser(
            f_file_format.format(**subject_participant))
        struct_image_dir = os.path.expanduser(
            s_file_format.format(**subject_participant))
        try:
            for myfile in os.listdir(func_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    func_image = os.path.join(func_image_dir, myfile)
            for myfile in os.listdir(struct_image_dir):
                if myfile.endswith((".nii.gz", ".nii")):
                    struct_image = os.path.join(struct_image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            #struct
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = '{}/ss_n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = struct_image
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = '{}/ss__n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET_res = struct_BET.run()

            struct_mask = ApplyMask()
            struct_mask.inputs.in_file = n4_res.outputs.output_image
            struct_mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            struct_mask_res = struct_mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Affine', 'SyN']  ##
            struct_registration.inputs.transform_parameters = [(1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                2000, 1000, 500
            ], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = ['MeanSquares', 'Mattes']
            struct_registration.inputs.metric_weight = [1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 32]  #
            struct_registration.inputs.sampling_strategy = ['Random', None]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [1.e-11,
                                                                1.e-8]  #
            struct_registration.inputs.convergence_window_size = [20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [False, False]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = 6

            struct_registration.inputs.moving_image = struct_mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = '{}/s_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            struct_registration_res = struct_registration.run()

            #func
            func_n4 = ants.N4BiasFieldCorrection()
            func_n4.inputs.dimension = 3
            func_n4.inputs.input_image = func_image
            func_n4.inputs.bspline_fitting_distance = 100
            func_n4.inputs.shrink_factor = 2
            func_n4.inputs.n_iterations = [200, 200, 200, 200]
            func_n4.inputs.convergence_threshold = 1e-11
            func_n4.inputs.output_image = '{}/f_n4_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            func_n4_res = func_n4.run()

            func_registration = ants.Registration()
            func_registration.inputs.fixed_image = n4_res.outputs.output_image
            func_registration.inputs.output_transform_prefix = "func_"
            func_registration.inputs.transforms = [f2s]
            func_registration.inputs.transform_parameters = [(0.1, )]
            func_registration.inputs.number_of_iterations = [[40, 20, 10]]
            func_registration.inputs.dimension = 3
            func_registration.inputs.write_composite_transform = True
            func_registration.inputs.collapse_output_transforms = True
            func_registration.inputs.initial_moving_transform_com = True
            func_registration.inputs.metric = ['MeanSquares']
            func_registration.inputs.metric_weight = [1]
            func_registration.inputs.radius_or_number_of_bins = [16]
            func_registration.inputs.sampling_strategy = ["Regular"]
            func_registration.inputs.sampling_percentage = [0.3]
            func_registration.inputs.convergence_threshold = [1.e-2]
            func_registration.inputs.convergence_window_size = [8]
            func_registration.inputs.smoothing_sigmas = [[4, 2,
                                                          1]]  # [1,0.5,0]
            func_registration.inputs.sigma_units = ['vox']
            func_registration.inputs.shrink_factors = [[3, 2, 1]]
            func_registration.inputs.use_estimate_learning_rate_once = [True]
            func_registration.inputs.use_histogram_matching = [False]
            func_registration.inputs.winsorize_lower_quantile = 0.005
            func_registration.inputs.winsorize_upper_quantile = 0.995
            func_registration.inputs.args = '--float'
            func_registration.inputs.num_threads = 6

            func_registration.inputs.moving_image = func_n4_res.outputs.output_image
            func_registration.inputs.output_warped_image = '{}/f_{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            func_registration_res = func_registration.run()

            warp = ants.ApplyTransforms()
            warp.inputs.reference_image = template
            warp.inputs.input_image_type = 3
            warp.inputs.interpolation = 'Linear'
            warp.inputs.invert_transform_flags = [False, False]
            warp.inputs.terminal_output = 'file'
            warp.inputs.output_image = '{}/{}_ofM{}.nii.gz'.format(
                regdir, participant, i)
            warp.num_threads = 6

            warp.inputs.input_image = func_image
            warp.inputs.transforms = [
                func_registration_res.outputs.composite_transform,
                struct_registration_res.outputs.composite_transform
            ]
            warp.run()
Ejemplo n.º 17
0
def structural_per_participant_test(
    participant,
    conditions=["", "_aF", "_cF1", "_cF2", "_pF"],
    template="/home/chymera/ni_data/templates/ds_QBI_chr.nii.gz",
):

    for i in conditions:
        image_dir = "/home/chymera/ni_data/ofM.dr/preprocessing/generic_work/_subject_session_{}.ofM{}/_scan_type_T2_TurboRARE/s_bru2nii/".format(
            participant, i)
        print(image_dir)
        try:
            for myfile in os.listdir(image_dir):
                if myfile.endswith(".nii"):
                    mimage = os.path.join(image_dir, myfile)
        except FileNotFoundError:
            pass
        else:
            n4 = ants.N4BiasFieldCorrection()
            n4.inputs.dimension = 3
            n4.inputs.input_image = mimage
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            n4.inputs.bspline_fitting_distance = 100
            n4.inputs.shrink_factor = 2
            n4.inputs.n_iterations = [200, 200, 200, 200]
            n4.inputs.convergence_threshold = 1e-11
            n4.inputs.output_image = 'ss_n4_{}_ofM{}.nii.gz'.format(
                participant, i)
            n4_res = n4.run()

            _n4 = ants.N4BiasFieldCorrection()
            _n4.inputs.dimension = 3
            _n4.inputs.input_image = mimage
            # correction bias is introduced (along the z-axis) if the following value is set to under 85. This is likely contingent on resolution.
            _n4.inputs.bspline_fitting_distance = 95
            _n4.inputs.shrink_factor = 2
            _n4.inputs.n_iterations = [500, 500, 500, 500]
            _n4.inputs.convergence_threshold = 1e-14
            _n4.inputs.output_image = 'ss__n4_{}_ofM{}.nii.gz'.format(
                participant, i)
            _n4_res = _n4.run()

            #we do this on a separate bias-corrected image to remove hyperintensities which we have to create in order to prevent brain regions being caught by the negative threshold
            struct_cutoff = ImageMaths()
            struct_cutoff.inputs.op_string = "-thrP 20 -uthrp 98"
            struct_cutoff.inputs.in_file = _n4_res.outputs.output_image
            struct_cutoff_res = struct_cutoff.run()

            struct_BET = BET()
            struct_BET.inputs.mask = True
            struct_BET.inputs.frac = 0.3
            struct_BET.inputs.robust = True
            struct_BET.inputs.in_file = struct_cutoff_res.outputs.out_file
            struct_BET_res = struct_BET.run()

            mask = ApplyMask()
            mask.inputs.in_file = n4_res.outputs.output_image
            mask.inputs.mask_file = struct_BET_res.outputs.mask_file
            mask_res = mask.run()

            struct_registration = ants.Registration()
            struct_registration.inputs.fixed_image = template
            struct_registration.inputs.output_transform_prefix = "output_"
            struct_registration.inputs.transforms = ['Rigid', 'Affine',
                                                     'SyN']  ##
            struct_registration.inputs.transform_parameters = [(.1, ), (1.0, ),
                                                               (1.0, 3.0, 5.0)
                                                               ]  ##
            struct_registration.inputs.number_of_iterations = [[
                150, 100, 50
            ], [2000, 1000, 500], [100, 100, 100]]  #
            struct_registration.inputs.dimension = 3
            struct_registration.inputs.write_composite_transform = True
            struct_registration.inputs.collapse_output_transforms = True
            struct_registration.inputs.initial_moving_transform_com = True
            # Tested on Affine transform: CC takes too long; Demons does not tilt, but moves the slices too far caudally; GC tilts too much on
            struct_registration.inputs.metric = [
                'MeanSquares', 'MeanSquares', 'Mattes'
            ]
            struct_registration.inputs.metric_weight = [1, 1, 1]
            struct_registration.inputs.radius_or_number_of_bins = [16, 16,
                                                                   32]  #
            struct_registration.inputs.sampling_strategy = [
                'Random', 'Random', None
            ]
            struct_registration.inputs.sampling_percentage = [0.3, 0.3, 0.3]
            struct_registration.inputs.convergence_threshold = [
                1.e-10, 1.e-11, 1.e-8
            ]  #
            struct_registration.inputs.convergence_window_size = [20, 20, 20]
            struct_registration.inputs.smoothing_sigmas = [[4, 2,
                                                            1], [4, 2, 1],
                                                           [4, 2, 1]]
            struct_registration.inputs.sigma_units = ['vox', 'vox', 'vox']
            struct_registration.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1],
                                                         [3, 2, 1]]
            struct_registration.inputs.use_estimate_learning_rate_once = [
                True, True, True
            ]
            # if the fixed_image is not acquired similarly to the moving_image (e.g. RARE to histological (e.g. AMBMC)) this should be False
            struct_registration.inputs.use_histogram_matching = [
                False, False, False
            ]
            struct_registration.inputs.winsorize_lower_quantile = 0.005
            struct_registration.inputs.winsorize_upper_quantile = 0.98
            struct_registration.inputs.args = '--float'
            struct_registration.inputs.num_threads = 6

            struct_registration.inputs.moving_image = mask_res.outputs.out_file
            struct_registration.inputs.output_warped_image = 'ss_{}_ofM{}.nii.gz'.format(
                participant, i)
            res = struct_registration.run()
Ejemplo n.º 18
0
"""Test FSL's Brain Extraction Interface"""

from nipype.interfaces.fsl import BET
bet = BET()

bet.inputs.in_file = 'test-data/haxby2001/subj2/anat.nii.gz'
bet.inputs.out_file = 'output-fsl-bet/anat_bet.nii.gz'
bet.cmdline
bet.run()
Ejemplo n.º 19
0
def get_nuisance_mask(input,
                      pathSPM,
                      deformation,
                      path_output,
                      nerode_white=1,
                      nerode_csf=1,
                      segmentation=True,
                      cleanup=True):
    """
    This function calculates WM and CSF masks in space of the functional time series. It uses SPM
    to compute WM and CSF probability maps. These maps are masked with a skullstrip mask and 
    transformed to native epi space.
    Inputs:
        *input: input anatomy (orig.mgz).
        *pathSPM: path to spm toolbox.
        *deformation: coordinate mapping for ana to epi transformation.
        *path_output: path where output is saved.
        *nerode_white: number of wm mask eroding steps.
        *nerode_csf: number of csf mask eroding steps.
        *segmentation: do not calculate new masks to not rerun everything.
        *cleanup: delete intermediate files.

    created by Daniel Haenelt
    Date created: 01-03-2019
    Last modified: 01-03-2019
    """
    import os
    import shutil as sh
    import nibabel as nb
    from scipy.ndimage.morphology import binary_erosion
    from nipype.interfaces.fsl import BET
    from nipype.interfaces.freesurfer.preprocess import MRIConvert
    from nighres.registration import apply_coordinate_mappings
    from lib.skullstrip.skullstrip_spm12 import skullstrip_spm12

    # make output folder
    if not os.path.exists(path_output):
        os.mkdir(path_output)

    # get filename without file extension of input file
    file = os.path.splitext(os.path.basename(input))[0]

    # convert to nifti format
    mc = MRIConvert()
    mc.inputs.in_file = input
    mc.inputs.out_file = os.path.join(path_output, file + ".nii")
    mc.inputs.out_type = "nii"
    mc.run()

    # bet skullstrip mask
    btr = BET()
    btr.inputs.in_file = os.path.join(path_output, file + ".nii")
    btr.inputs.frac = 0.5
    btr.inputs.mask = True
    btr.inputs.no_output = True
    btr.inputs.out_file = os.path.join(path_output, "bet")
    btr.inputs.output_type = "NIFTI"
    btr.run()

    # segmentation
    if segmentation:
        skullstrip_spm12(os.path.join(path_output, file + ".nii"), pathSPM,
                         path_output)

    # load tissue maps
    wm_array = nb.load(os.path.join(path_output, "skull",
                                    "c2" + file + ".nii")).get_fdata()
    csf_array = nb.load(
        os.path.join(path_output, "skull", "c3" + file + ".nii")).get_fdata()
    mask_array = nb.load(os.path.join(path_output, "bet_mask.nii")).get_fdata()

    # binarize
    wm_array[wm_array > 0] = 1
    csf_array[csf_array > 0] = 1

    # apply brain mask
    wm_array = wm_array * mask_array
    csf_array = csf_array * mask_array

    # erode wm
    wm_array = binary_erosion(
        wm_array,
        structure=None,
        iterations=nerode_white,
        mask=None,
        output=None,
        border_value=0,
        origin=0,
        brute_force=False,
    )

    # erode csf
    csf_array = binary_erosion(
        csf_array,
        structure=None,
        iterations=nerode_csf,
        mask=None,
        output=None,
        border_value=0,
        origin=0,
        brute_force=False,
    )

    # write wm and csf mask
    data_img = nb.load(input)
    wm_out = nb.Nifti1Image(wm_array, data_img.affine, data_img.header)
    nb.save(wm_out, os.path.join(path_output, "wm_mask_orig.nii"))
    csf_out = nb.Nifti1Image(csf_array, data_img.affine, data_img.header)
    nb.save(csf_out, os.path.join(path_output, "csf_mask_orig.nii"))

    # apply deformation to mask
    apply_coordinate_mappings(
        os.path.join(path_output, "wm_mask_orig.nii"),  # input 
        deformation,  # cmap
        interpolation="nearest",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_output,  # output directory
        file_name="wm_mask"  # base name with file extension for output
    )

    apply_coordinate_mappings(
        os.path.join(path_output, "csf_mask_orig.nii"),  # input 
        deformation,  # cmap
        interpolation="nearest",  # nearest or linear
        padding="zero",  # closest, zero or max
        save_data=True,  # save output data to file (boolean)
        overwrite=True,  # overwrite existing results (boolean)
        output_dir=path_output,  # output directory
        file_name="csf_mask"  # base name with file extension for output
    )

    # rename transformed masks
    os.rename(os.path.join(path_output, "wm_mask_def-img.nii.gz"),
              os.path.join(path_output, "wm_mask.nii.gz"))
    os.rename(os.path.join(path_output, "csf_mask_def-img.nii.gz"),
              os.path.join(path_output, "csf_mask.nii.gz"))

    # cleanup
    if cleanup:
        os.remove(os.path.join(path_output, "bet_mask.nii"))
        os.remove(os.path.join(path_output, "csf_mask_orig.nii"))
        os.remove(os.path.join(path_output, "wm_mask_orig.nii"))
        os.remove(os.path.join(path_output, "orig.nii"))
        sh.rmtree(os.path.join(path_output, "skull"), ignore_errors=True)
Ejemplo n.º 20
0
def functional_per_participant_test():
	for i in ["","_aF","_cF1","_cF2","_pF"]:
		template = "~/ni_data/templates/ds_QBI_chr.nii.gz"
		participant = "4008"
		image_dir = "~/ni_data/ofM.dr/preprocessing/generic_work/_subject_session_{}.ofM{}/_scan_type_7_EPI_CBV/temporal_mean/".format(participant,i)
		try:
			for myfile in os.listdir(image_dir):
				if myfile.endswith(".nii.gz"):
					mimage = os.path.join(image_dir,myfile)
		except FileNotFoundError:
			pass
		else:
			n4 = ants.N4BiasFieldCorrection()
			n4.inputs.dimension = 3
			n4.inputs.input_image = mimage
			n4.inputs.bspline_fitting_distance = 100
			n4.inputs.shrink_factor = 2
			n4.inputs.n_iterations = [200,200,200,200]
			n4.inputs.convergence_threshold = 1e-11
			n4.inputs.output_image = 'n4_{}_ofM{}.nii.gz'.format(participant,i)
			n4_res = n4.run()

			functional_cutoff = ImageMaths()
			functional_cutoff.inputs.op_string = "-thrP 30"
			functional_cutoff.inputs.in_file = n4_res.outputs.output_image
			functional_cutoff_res = functional_cutoff.run()

			functional_BET = BET()
			functional_BET.inputs.mask = True
			functional_BET.inputs.frac = 0.5
			functional_BET.inputs.in_file = functional_cutoff_res.outputs.out_file
			functional_BET_res = functional_BET.run()

			registration = ants.Registration()
			registration.inputs.fixed_image = template
			registration.inputs.output_transform_prefix = "output_"
			registration.inputs.transforms = ['Affine', 'SyN']
			registration.inputs.transform_parameters = [(0.1,), (3.0, 3.0, 5.0)]
			registration.inputs.number_of_iterations = [[10000, 10000, 10000], [100, 100, 100]]
			registration.inputs.dimension = 3
			registration.inputs.write_composite_transform = True
			registration.inputs.collapse_output_transforms = True
			registration.inputs.initial_moving_transform_com = True
			registration.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
			registration.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
			registration.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
			registration.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
			registration.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
			registration.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
			registration.inputs.convergence_window_size = [20] * 2 + [5]
			registration.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
			registration.inputs.sigma_units = ['vox'] * 3
			registration.inputs.shrink_factors = [[3, 2, 1]]*2 + [[4, 2, 1]]
			registration.inputs.use_estimate_learning_rate_once = [True] * 3
			registration.inputs.use_histogram_matching = [False] * 2 + [True]
			registration.inputs.winsorize_lower_quantile = 0.005
			registration.inputs.winsorize_upper_quantile = 0.995
			registration.inputs.args = '--float'
			registration.inputs.num_threads = 4
			registration.plugin_args = {'qsub_args': '-pe orte 4', 'sbatch_args': '--mem=6G -c 4'}

			registration.inputs.moving_image = functional_BET_res.outputs.out_file
			registration.inputs.output_warped_image = '{}_ofM{}.nii.gz'.format(participant,i)
			res = registration.run()