コード例 #1
0
def _test_be(moving_image_id, reg):
    img = image_registration.img_data(moving_image_id, util.DATA_FOLDER,
                                      util.TEMP_FOLDER_PATH)
    img = image_registration.pre_process(img, False)

    resampled_file = img.pre_processed_filepath
    name = splitext(splitext(basename(resampled_file))[0])[0] + "_bet"
    img.pre_processed_filepath = util.TEMP_FOLDER_PATH + "/res/" +\
        splitext(basename(resampled_file))[0] +\
        '_bet.nii.gz'

    reg.inputs.fixed_image = resampled_file
    reg.inputs.fixed_image_mask = img.label_inv_filepath
    reg.inputs.output_transform_prefix = util.TEMP_FOLDER_PATH + name
    reg.inputs.output_warped_image = util.TEMP_FOLDER_PATH + name + '_betReg.nii'
    transform = util.TEMP_FOLDER_PATH + name + 'InverseComposite.h5'

    reg.run()

    img.init_transform = transform

    reg_volume = util.transform_volume(resampled_file, transform)

    mult = ants.MultiplyImages()
    mult.inputs.dimension = 3
    mult.inputs.first_input = reg_volume
    mult.inputs.second_input = image_registration.TEMPLATE_MASK
    mult.inputs.output_product_image = img.pre_processed_filepath
    mult.run()

    util.generate_image(img.pre_processed_filepath,
                        image_registration.TEMPLATE_VOLUME)
コード例 #2
0
def skullstrip_standard_node():
    skullstrip_standard = Node(ants.MultiplyImages(
        dimension=3,
        first_input='/flywheel/v0/templates/mni_icbm152_nlin_asym_09a/mni_icbm152_t1_tal_nlin_asym_09a.nii',
        second_input='/flywheel/v0/templates/mni_icbm152_nlin_asym_09a/mni_icbm152_t1_tal_nlin_asym_09a_mask.nii',
        output_product_image='/flywheel/v0/templates/mni_icbm152_nlin_asym_09a/mni_icbm152_t1_tal_nlin_asym_09a_brain.nii'),
        name='skullstrip_standard_node')

    return (skullstrip_standard)
コード例 #3
0
ファイル: util.py プロジェクト: leb/NeuroImageRegistration
def prepare_template(template_vol, template_mask, overwrite=False):
    """ prepare template volumemoving"""
    # pylint: disable= global-statement,
    global TEMPLATE_MASKED_VOLUME

    TEMPLATE_MASKED_VOLUME = TEMP_FOLDER_PATH + "masked_template.nii"
    mult = ants.MultiplyImages()
    mult.inputs.dimension = 3
    mult.inputs.first_input = template_vol
    mult.inputs.second_input = template_mask
    mult.inputs.output_product_image = TEMPLATE_MASKED_VOLUME
    if not overwrite and os.path.exists(mult.inputs.output_product_image):
        return
    mult.run()
コード例 #4
0
def post_calculations(moving_dataset_image_ids, result=None):
    """ Transform images and calculate avg"""
    if result is None:
        result = {}

    for _id in moving_dataset_image_ids:
        img = img_data(_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
        img.load_db_transforms()

        img_pre = img_data(img.fixed_image, util.DATA_FOLDER,
                           util.TEMP_FOLDER_PATH)
        img_pre.load_db_transforms()

        reg_vol = util.transform_volume(img.reg_img_filepath,
                                        img_pre.get_transforms())
        vol = util.TEMP_FOLDER_PATH + util.get_basename(
            basename(reg_vol)) + '_BE.nii.gz'

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_vol
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = vol
        mult.run()

        label = "img"
        if label in result:
            result[label].append(vol)
        else:
            result[label] = [vol]

        for (segmentation, label) in util.find_reg_label_images(_id):
            segmentation = util.transform_volume(segmentation,
                                                 img_pre.get_transforms(),
                                                 label_img=True)
            if label in result:
                result[label].append(segmentation)
            else:
                result[label] = [segmentation]
    return result
コード例 #5
0
def nonlinear_alignment_iteration(iternum=0, gradient_step=0.2):
    """
    Takes a template image and a set of input images, does
    a linear alignment to the template and updates it with the
    inverse of the average affine transform to the new template

    Returns a workflow

    """
    iteration_wf = Workflow(name="nl_iterative_alignment_%03d" % iternum)
    input_node_fields = ["image_paths", "template_image", "iteration_num"]
    inputnode = pe.Node(
        niu.IdentityInterface(fields=input_node_fields), name='inputnode')
    inputnode.inputs.iteration_num = iternum
    outputnode = pe.Node(
        niu.IdentityInterface(fields=["registered_image_paths", "affine_transforms",
                                      "warp_transforms", "composite_transforms",
                                      "updated_template"]), name='outputnode')
    ants_settings = pkgrf("qsiprep", "data/intramodal_nonlinear.json")
    reg = ants.Registration(from_file=ants_settings)
    iter_reg = pe.MapNode(
        reg, name="nlreg_%03d" % iternum, iterfield=["moving_image"])

    # Average the images
    averaged_images = pe.Node(
        ants.AverageImages(normalize=True, dimension=3),
        name="averaged_images")

    # Make an automask
    mask_average = pe.Node(afni.Automask(), name='mask_average')

    # Shape update to template:
    # Average the affines so that the inverse can be applied to the template
    affines_to_list = pe.Node(niu.Merge(1), name="affines_to_list")
    warps_to_list = pe.Node(niu.Merge(1), name="warps_to_list")
    avg_affines = pe.Node(
        ants.AverageAffineTransform(dimension=3,
                                    output_affine_transform="AveragedAffines.mat"),
        name="avg_affines")

    # Average the warps:
    average_warps = pe.Node(
        ants.AverageImages(dimension=3, normalize=False), name="average_warps")
    # Scale by the gradient step
    scale_warp = pe.Node(
        ants.MultiplyImages(dimension=3, second_input=gradient_step,
                            output_product_image="scaled_warp.nii.gz"),
        name="scale_warp")
    # Align the warps to the template image
    align_warp = pe.Node(
        ants.ApplyTransforms(
            input_image_type=1, invert_transform_flags=[True]),
        name="align_warp")

    # transform the template for the shape update
    shape_update_template = pe.Node(
        ants.ApplyTransforms(interpolation="LanczosWindowedSinc",
                             invert_transform_flags=[True, False, False, False, False]),
        name="shape_update_template")
    shape_update_merge = pe.Node(niu.Merge(5), name="shape_update_merge")

    # Run the images through antsRegistration
    def get_first(input_pairs):
        return [input_pair[0] for input_pair in input_pairs]

    def get_second(input_pairs):
        return [input_pair[1] for input_pair in input_pairs]

    iteration_wf.connect([
        (inputnode, iter_reg, [
            ('image_paths', 'moving_image'),
            ('template_image', 'fixed_image')]),
        (iter_reg, affines_to_list, [(('forward_transforms', get_first), 'in1')]),
        (affines_to_list, avg_affines, [('out', 'transforms')]),
        (iter_reg, warps_to_list, [(('forward_transforms', get_second), 'in1')]),
        (iter_reg, averaged_images, [('warped_image', 'images')]),

        # Average the warps, scale them, and transform to be aligned with the template
        (warps_to_list, average_warps, [('out', 'images')]),
        (average_warps, scale_warp, [('output_average_image', 'first_input')]),
        (scale_warp, align_warp, [
            ('output_product_image', 'input_image')]),
        (avg_affines, align_warp, [('affine_transform', 'transforms')]),
        (inputnode, align_warp, [('template_image', 'reference_image')]),
        (avg_affines, shape_update_merge, [('affine_transform', 'in1')]),
        (align_warp, shape_update_merge, [
            ('output_image', 'in2'), ('output_image', 'in3'),
            ('output_image', 'in4'), ('output_image', 'in5')]),
        (shape_update_merge, shape_update_template, [('out', 'transforms')]),
        (averaged_images, shape_update_template, [
            ('output_average_image', 'input_image'),
            ('output_average_image', 'reference_image')]),
        (shape_update_template, outputnode, [('output_image', 'updated_template')]),
        (iter_reg, outputnode, [
            ('forward_transforms', 'affine_transforms'),
            ('warped_image', 'registered_image_paths')])
    ])

    return iteration_wf
コード例 #6
0
def pre_process(img, do_bet=True, slice_size=1, reg_type=None, be_method=None):
    # pylint: disable= too-many-statements, too-many-locals, too-many-branches
    """ Pre process the data"""
    path = img.temp_data_path

    input_file = img.img_filepath
    n4_file = path + util.get_basename(input_file) + '_n4.nii.gz'
    norm_file = path + util.get_basename(n4_file) + '_norm.nii.gz'
    resampled_file = path + util.get_basename(norm_file) + '_resample.nii.gz'
    name = util.get_basename(resampled_file) + "_be"
    img.pre_processed_filepath = path + name + '.nii.gz'

    n4bias = ants.N4BiasFieldCorrection()
    n4bias.inputs.dimension = 3
    n4bias.inputs.num_threads = NUM_THREADS_ANTS
    n4bias.inputs.input_image = input_file
    n4bias.inputs.output_image = n4_file
    n4bias.run()

    # normalization [0,100], same as template
    normalize_img = nib.load(n4_file)
    temp_data = normalize_img.get_data()
    temp_img = nib.Nifti1Image(temp_data / np.amax(temp_data) * 100,
                               normalize_img.affine, normalize_img.header)
    temp_img.to_filename(norm_file)
    del temp_img

    # resample volume to 1 mm slices
    target_affine_3x3 = np.eye(3) * slice_size
    img_3d_affine = resample_img(norm_file, target_affine=target_affine_3x3)
    nib.save(img_3d_affine, resampled_file)

    if not do_bet:
        img.pre_processed_filepath = resampled_file
        return img

    if be_method == 0:
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'

        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = resampled_file
        reg.inputs.moving_image = util.TEMPLATE_VOLUME
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        else:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.number_of_iterations = ([[
            15000, 12000, 10000, 10000, 10000, 5000, 5000
        ], [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[19, 16, 12, 9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[10, 10, 10, 8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_beReg.nii.gz'

        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        reg.run()
        util.LOGGER.info("Finished be registration")

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = util.TEMPLATE_MASK
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    elif be_method == 1:
        # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
        bet = fsl.BET(command=BET_COMMAND)
        bet.inputs.in_file = resampled_file
        # pylint: disable= pointless-string-statement
        """ fractional intensity threshold (0->1); default=0.5;
        smaller values give larger brain outline estimates"""
        bet.inputs.frac = 0.25
        """ vertical gradient in fractional intensity threshold (-1->1);
        default=0; positive values give larger brain outline at bottom,
        smaller at top """
        bet.inputs.vertical_gradient = 0
        """  This attempts to reduce image bias, and residual neck voxels.
        This can be useful when running SIENA or SIENAX, for example.
        Various stages involving FAST segmentation-based bias field removal
        and standard-space masking are combined to produce a result which
        can often give better results than just running bet2."""
        # bet.inputs.reduce_bias = True
        bet.inputs.mask = True

        bet.inputs.out_file = img.pre_processed_filepath

        bet.run()
        util.generate_image(img.pre_processed_filepath, resampled_file)
    elif be_method == 2:
        if BET_FRAC > 0:
            name = util.get_basename(resampled_file) + "_bet"
            # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET/UserGuide#Main_bet2_options:
            bet = fsl.BET(command=BET_COMMAND)
            bet.inputs.in_file = resampled_file
            # pylint: disable= pointless-string-statement
            """ fractional intensity threshold (0->1); default=0.5;
            smaller values give larger brain outline estimates"""
            bet.inputs.frac = BET_FRAC
            """ vertical gradient in fractional intensity threshold (-1->1);
            default=0; positive values give larger brain outline at bottom,
            smaller at top """
            bet.inputs.vertical_gradient = 0
            """  This attempts to reduce image bias, and residual neck voxels.
            This can be useful when running SIENA or SIENAX, for example.
            Various stages involving FAST segmentation-based bias field removal
            and standard-space masking are combined to produce a result which
            can often give better results than just running bet2."""
            bet.inputs.reduce_bias = True
            bet.inputs.mask = True
            bet.inputs.out_file = path + name + '.nii.gz'
            util.LOGGER.info("starting bet registration")
            start_time = datetime.datetime.now()
            util.LOGGER.info(bet.cmdline)
            if not os.path.exists(bet.inputs.out_file):
                bet.run()
            util.LOGGER.info("Finished bet registration 0: ")
            util.LOGGER.info(datetime.datetime.now() - start_time)
            name += "_be"
            print('OR HERE 3??????')
            moving_image = util.TEMPLATE_MASKED_VOLUME
            print(moving_image)
            fixed_image = bet.inputs.out_file
        else:
            name = util.get_basename(resampled_file) + "_be"
            moving_image = util.TEMPLATE_VOLUME
            fixed_image = resampled_file
        img.init_transform = path + name + '_InitRegTo' + str(
            img.fixed_image) + '.h5'
        img.pre_processed_filepath = path + name + '.nii.gz'
        print('DO I GET HERE??????')
        reg = ants.Registration()
        # reg.inputs.args = "--verbose 1"
        reg.inputs.collapse_output_transforms = True
        reg.inputs.fixed_image = fixed_image
        print('OR HERE 1??????')
        print(moving_image)
        reg.inputs.moving_image = moving_image
        print('OR HERE 2??????')
        reg.inputs.fixed_image_mask = img.label_inv_filepath

        reg.inputs.num_threads = NUM_THREADS_ANTS
        reg.inputs.initial_moving_transform_com = True

        if reg_type == RIGID:
            reg.inputs.transforms = ['Rigid', 'Rigid']
        elif reg_type == COMPOSITEAFFINE:
            reg.inputs.transforms = ['Rigid', 'CompositeAffine']
        elif reg_type == SIMILARITY:
            reg.inputs.transforms = ['Rigid', 'Similarity']
        elif reg_type == AFFINE:
            reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.metric = ['MI', 'MI']
        reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.metric_weight = [1, 1]
        reg.inputs.convergence_window_size = [5, 5]
        reg.inputs.sampling_strategy = ['Regular'] * 2
        reg.inputs.sampling_percentage = [0.5] * 2
        reg.inputs.number_of_iterations = ([[10000, 10000, 5000, 5000],
                                            [10000, 10000, 5000, 5000]])
        reg.inputs.shrink_factors = [[9, 5, 3, 1], [9, 5, 3, 1]]
        reg.inputs.smoothing_sigmas = [[8, 4, 1, 0], [8, 4, 1, 0]]
        reg.inputs.transform_parameters = [(0.25, ), (0.25, )]
        reg.inputs.convergence_threshold = [1.e-6] * 2
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.use_estimate_learning_rate_once = [True, True]

        reg.inputs.write_composite_transform = True
        reg.inputs.output_transform_prefix = path + name
        reg.inputs.output_warped_image = path + name + '_TemplateReg.nii.gz'
        print('FINISHED SETTING UP ALL reg.inputs')
        transform = path + name + 'InverseComposite.h5'
        util.LOGGER.info("starting be registration")
        util.LOGGER.info(reg.cmdline)
        start_time = datetime.datetime.now()
        if not os.path.exists(reg.inputs.output_warped_image):
            reg.run()
        util.LOGGER.info("Finished be registration: ")
        util.LOGGER.info(datetime.datetime.now() - start_time)

        reg_volume = util.transform_volume(resampled_file, transform)
        shutil.copy(transform, img.init_transform)

        brain_mask = util.TEMPLATE_MASK
        #brain_mask = img.reg_brainmask_filepath
        if not brain_mask:
            brain_mask = util.TEMPLATE_MASK
        print("Using brain mask " + brain_mask)

        mult = ants.MultiplyImages()
        mult.inputs.dimension = 3
        mult.inputs.first_input = reg_volume
        mult.inputs.second_input = brain_mask
        mult.inputs.output_product_image = img.pre_processed_filepath
        mult.run()

        util.generate_image(img.pre_processed_filepath, reg_volume)
    else:
        util.LOGGER.error(" INVALID BE METHOD!!!!")

    util.LOGGER.info("---BET " + img.pre_processed_filepath)
    return img
コード例 #7
0
get_gm = Node(name='Get_GM',
              interface=Function(input_names=['posteriors'],
                                 output_names=['GM'],
                                 function=Get_GM))

#-----------------------------------------------------------------------------------------------------
# In[1]:
#Make a mask of the warped image, to use it with atropos
binarize_warped_image = Node(fsl.UnaryMaths(), name='Binarize_Warped_Image')
binarize_warped_image.inputs.operation = 'bin'
binarize_warped_image.output_datatype = 'char'

#-----------------------------------------------------------------------------------------------------
# In[1]:
#Multiply by Jacobian determinant to ge the modulate image
modulate_GM = Node(ants.MultiplyImages(), name='Modulate_GM')
modulate_GM.inputs.dimension = 3
modulate_GM.inputs.output_product_image = 'Modulated_GM.nii.gz'

#-----------------------------------------------------------------------------------------------------
# In[1]:
#Smooth the modulated images
smoothing = Node(fsl.Smooth(), name='Smoothing')
smoothing.iterables = ('fwhm', [1.5, 2, 2.3, 2.7, 3])

#-----------------------------------------------------------------------------------------------------
# In[1]:
VBM_workflow.connect([
    (infosource, selectfiles, [('subject_id', 'subject_id')]),
    (selectfiles, bias_corr, [('3D', 'input_image')]),
    (bias_corr, brain_ext, [('output_image', 'anatomical_image')]),
コード例 #8
0
def init_enhance_and_skullstrip_dwi_wf(name='enhance_and_skullstrip_dwi_wf',
                                       do_biascorrection=True,
                                       omp_nthreads=1):
    """
    https://community.mrtrix.org/t/dwibiascorrect-with-ants-high-intensity-in-cerebellum-brainstem/1338/3

    Truncates image intensities, runs N4, creates a rough initial mask

    .. workflow ::
        :graph2use: orig
        :simple_form: yes

        from qsiprep.workflows.dwi.util import init_enhance_and_skullstrip_dwi_wf
        wf = init_enhance_and_skullstrip_dwi_wf(omp_nthreads=1)

    **Parameters**
        name : str
            Name of workflow (default: ``enhance_and_skullstrip_dwi_wf``)
        do_biascorrection : Bool
            Do bias correction on ``in_file``?
        omp_nthreads : int
            number of threads available to parallel nodes

    **Inputs**

        in_file
            dwi image (single volume)


    **Outputs**

        bias_corrected_file
            the ``in_file`` after N4BiasFieldCorrection and sharpening
        skull_stripped_file
            the ``bias_corrected_file`` after soft skull-stripping
        mask_file
            mask of the skull-stripped input file
        out_report
            reportlet for the skull-stripping

    """
    workflow = Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['mask_file', 'skull_stripped_file', 'bias_corrected_file']),
                         name='outputnode')

    # Truncate intensity values so they're OK for N4
    truncate_values = pe.Node(ImageMath(dimension=3,
                                        operation="TruncateImageIntensity",
                                        secondary_arg="0.0 0.98 512"),
                              name="truncate_values")

    # Truncate intensity values for creating a mask
    # (there are many high outliers in b=0 images)
    truncate_values_for_masking = pe.Node(ImageMath(
        dimension=3,
        operation="TruncateImageIntensity",
        secondary_arg="0.0 0.9 512"),
                                          name="truncate_values_for_masking")

    # N4 will break if any negative values are present.
    rescale_image = pe.Node(ImageMath(dimension=3,
                                      operation="RescaleImage",
                                      secondary_arg="0 1000"),
                            name="rescale_image")

    # Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
    n4_correct = pe.Node(ants.N4BiasFieldCorrection(
        dimension=3,
        n_iterations=[200, 200],
        convergence_threshold=1e-6,
        bspline_order=3,
        bspline_fitting_distance=150,
        copy_header=True,
        args='-v 1'),
                         name='n4_correct',
                         n_procs=1)

    # Sharpen the b0 ref
    sharpen_image = pe.Node(ImageMath(dimension=3, operation="Sharpen"),
                            name="sharpen_image")

    # Basic mask
    initial_mask = pe.Node(afni.Automask(outputtype="NIFTI_GZ"),
                           name="initial_mask")

    # Fill holes left by Automask
    fill_holes = pe.Node(ImageMath(dimension=3,
                                   operation='FillHoles',
                                   secondary_arg='2'),
                         name='fill_holes')

    # Dilate before smoothing
    dilate_mask = pe.Node(ImageMath(dimension=3,
                                    operation='MD',
                                    secondary_arg='1'),
                          name='dilate_mask')

    # Smooth the mask and use it as a weight for N4
    smooth_mask = pe.Node(ImageMath(dimension=3,
                                    operation='G',
                                    secondary_arg='4'),
                          name='smooth_mask')

    # Make a "soft" skull-stripped image
    apply_mask = pe.Node(ants.MultiplyImages(
        dimension=3, output_product_image="SkullStrippedRef.nii.gz"),
                         name="apply_mask")

    fix_mask_header = pe.Node(CopyHeader(), name='fix_mask_header')
    fix_smooth_mask_header = pe.Node(CopyHeader(),
                                     name='fix_smooth_mask_header')

    workflow.connect([
        (inputnode, truncate_values, [('in_file', 'in_file')]),
        (truncate_values, rescale_image, [('out_file', 'in_file')]),
        (inputnode, truncate_values_for_masking, [('in_file', 'in_file')]),
        (truncate_values_for_masking, initial_mask, [('out_file', 'in_file')]),
        (initial_mask, fill_holes, [('out_file', 'in_file')]),
        (fill_holes, dilate_mask, [('out_file', 'in_file')]),
        (dilate_mask, smooth_mask, [('out_file', 'in_file')]),
        (rescale_image, n4_correct, [('out_file', 'input_image')]),
        (rescale_image, fix_smooth_mask_header, [('out_file', 'hdr_file')]),
        (smooth_mask, fix_smooth_mask_header, [('out_file', 'in_file')]),
        (fix_smooth_mask_header, n4_correct, [('out_file', 'weight_image')]),
        (n4_correct, sharpen_image, [('output_image', 'in_file')]),
        (sharpen_image, outputnode, [('out_file', 'bias_corrected_file')]),
        (sharpen_image, apply_mask, [('out_file', 'first_input')]),
        (smooth_mask, apply_mask, [('out_file', 'second_input')]),
        (apply_mask, outputnode, [('output_product_image',
                                   'skull_stripped_file')]),
        (fill_holes, fix_mask_header, [('out_file', 'in_file')]),
        (sharpen_image, fix_mask_header, [('out_file', 'hdr_file')]),
        (fix_mask_header, outputnode, [('out_file', 'mask_file')])
    ])

    return workflow