예제 #1
0
파일: dmri.py 프로젝트: amrka/banana
    def tensor_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Fits the apparrent diffusion tensor (DT) to each voxel of the image
        """

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format),
#                     FilesetSpec('brain_mask', nifti_gz_format)],
#             outputs=[FilesetSpec('tensor', nifti_gz_format)],

        pipeline = self.new_pipeline(
            name='tensor',
            desc=("Estimates the apparent diffusion tensor in each "
                  "voxel"),
            references=[],
            name_maps=name_maps)
        # Create tensor fit node
        dwi2tensor = pipeline.add(
            'dwi2tensor',
            FitTensor())
        dwi2tensor.inputs.out_file = 'dti.nii.gz'
        # Gradient merge node
        fsl_grads = pipeline.add("fsl_grads", MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', dwi2tensor, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('bias_correct', dwi2tensor, 'in_file')
        pipeline.connect_input('brain_mask', dwi2tensor, 'in_mask')
        # Connect to outputs
        pipeline.connect_output('tensor', dwi2tensor, 'out_file')
        # Check inputs/output are connected
        return pipeline
예제 #2
0
파일: dmri.py 프로젝트: amrka/banana
    def fod_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        """

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format),
#                     FilesetSpec('wm_response', text_format),
#                     FilesetSpec('brain_mask', nifti_gz_format)],
#             outputs=[FilesetSpec('fod', nifti_gz_format)],
        pipeline = self.new_pipeline(
            name='fod',
            desc=("Estimates the fibre orientation distribution in each"
                  " voxel"),
            references=[mrtrix_cite],
            name_maps=name_maps)
        if self.branch('fod_algorithm', 'msmt_csd'):
            pipeline.add_input(FilesetSpec('gm_response', text_format))
            pipeline.add_input(FilesetSpec('csf_response', text_format))
        # Create fod fit node
        dwi2fod = pipeline.add(
            'dwi2fod',
            EstimateFOD(),
            requirements=[mrtrix_req.v('3.0rc3')])
        dwi2fod.inputs.algorithm = self.parameter('fod_algorithm')
        # Gradient merge node
        fsl_grads = pipeline.add("fsl_grads", MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', dwi2fod, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('bias_correct', dwi2fod, 'in_file')
        pipeline.connect_input('wm_response', dwi2fod, 'wm_txt')
        pipeline.connect_input('brain_mask', dwi2fod, 'mask_file')
        # Connect to outputs
        pipeline.connect_output('wm_odf', dwi2fod, 'wm_odf')
        # If multi-tissue 
        if self.multi_tissue:
            pipeline.connect_input('gm_response', dwi2fod, 'gm_txt')
            pipeline.connect_input('csf_response', dwi2fod, 'csf_txt')
            dwi2fod.inputs.gm_odf = 'gm.mif'
            dwi2fod.inputs.csf_odf = 'csf.mif'
            pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf')
            pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf')
        # Check inputs/output are connected
        return pipeline
예제 #3
0
파일: dmri.py 프로젝트: amrka/banana
    def response_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Estimates the fibre orientation distribution (FOD) using constrained
        spherical deconvolution

        Parameters
        ----------
        response_algorithm : str
            Algorithm used to estimate the response
        """
#         outputs = [FilesetSpec('wm_response', text_format)]
#         if self.branch('response_algorithm', ('dhollander', 'msmt_5tt')):
#             outputs.append(FilesetSpec('gm_response', text_format))
#             outputs.append(FilesetSpec('csf_response', text_format))

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format),
#                     FilesetSpec('brain_mask', nifti_gz_format)],
#             outputs=outputs,

        pipeline = self.new_pipeline(
            name='response',
            desc=("Estimates the fibre response function"),
            references=[mrtrix_cite],
            name_maps=name_maps)
        # Create fod fit node
        response = pipeline.add(
            'response',
            ResponseSD(),
            requirements=[mrtrix_req.v('3.0rc3')])
        response.inputs.algorithm = self.parameter('response_algorithm')
        # Gradient merge node
        fsl_grads = pipeline.add(
            "fsl_grads",
            MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', response, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('bias_correct', response, 'in_file')
        pipeline.connect_input('brain_mask', response, 'in_mask')
        # Connect to outputs
        pipeline.connect_output('wm_response', response, 'wm_file')
        if self.multi_tissue:
            response.inputs.gm_file = 'gm.txt'
            response.inputs.csf_file = 'csf.txt'
            pipeline.connect_output('gm_response', response, 'gm_file')
            pipeline.connect_output('csf_response', response, 'csf_file')
        # Check inputs/output are connected
        return pipeline
예제 #4
0
파일: dmri.py 프로젝트: amrka/banana
    def extract_b0_pipeline(self, **name_maps):  # @UnusedVariable
        """
        Extracts the b0 images from a DWI study and takes their mean
        """

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('b0', nifti_gz_format)],
        pipeline = self.new_pipeline(
            name='extract_b0',
            desc="Extract b0 image from a DWI study",
            references=[mrtrix_cite],
            name_maps=name_maps)
        # Gradient merge node
        fsl_grads = pipeline.add("fsl_grads", MergeTuple(2))
        # Extraction node
        extract_b0s = pipeline.add(
            'extract_b0s', ExtractDWIorB0(),
            requirements=[mrtrix_req.v('3.0rc3')])
        extract_b0s.inputs.bzero = True
        extract_b0s.inputs.quiet = True
        # FIXME: Need a registration step before the mean
        # Mean calculation node
        mean = pipeline.add(
            "mean",
            MRMath(),
            requirements=[mrtrix_req.v('3.0rc3')])
        mean.inputs.axis = 3
        mean.inputs.operation = 'mean'
        mean.inputs.quiet = True
        # Convert to Nifti
        mrconvert = pipeline.add("output_conversion", MRConvert(),
                                         requirements=[mrtrix_req.v('3.0rc3')])
        mrconvert.inputs.out_ext = '.nii.gz'
        mrconvert.inputs.quiet = True
        # Connect inputs
        pipeline.connect_input('bias_correct', extract_b0s, 'in_file')
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        # Connect between nodes
        pipeline.connect(extract_b0s, 'out_file', mean, 'in_files')
        pipeline.connect(fsl_grads, 'out', extract_b0s, 'grad_fsl')
        pipeline.connect(mean, 'out_file', mrconvert, 'in_file')
        # Connect outputs
        pipeline.connect_output('b0', mrconvert, 'out_file')
        # Check inputs/outputs are connected
        return pipeline
예제 #5
0
 def fsl_grads(self, pipeline, coregistered=True):
     "Adds and returns a node to the pipeline to merge the FSL grads and "
     "bvecs"
     try:
         fslgrad = pipeline.node('fslgrad')
     except ArcanaNameError:
         if self.is_coregistered and coregistered:
             grad_dirs = 'grad_dirs_coreg'
         else:
             grad_dirs = 'grad_dirs'
         # Gradient merge node
         fslgrad = pipeline.add(
             "fslgrad",
             MergeTuple(2),
             inputs={
                 'in1': (grad_dirs, fsl_bvecs_format),
                 'in2': ('bvalues', fsl_bvals_format)})
     return (fslgrad, 'out')
예제 #6
0
파일: dmri.py 프로젝트: amrka/banana
    def brain_extraction_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Generates a whole brain mask using MRtrix's 'dwi2mask' command

        Parameters
        ----------
        mask_tool: Str
            Can be either 'bet' or 'dwi2mask' depending on which mask tool you
            want to use
        """

        if self.branch('brain_extract_method', 'mrtrix'):
            pipeline = self.new_pipeline(
                'brain_extraction',
                desc="Generate brain mask from b0 images",
                references=[mrtrix_cite],
                name_maps=name_maps)

            # Gradient merge node
            grad_fsl = pipeline.add(
                "grad_fsl",
                MergeTuple(2),
                inputs={
                    'in1': ('grad_dirs', fsl_bvecs_format),
                    'in2': ('bvalues', fsl_bvals_format)})

            # Create mask node
            pipeline.add(
                'dwi2mask',
                BrainMask(
                    out_file='brain_mask.nii.gz'),
                inputs={
                    'in_file': ('preproc', nifti_gz_format)},
                connect={
                    'grad_fsl': (grad_fsl, 'out')},
                outputs={
                    'out_file': ('brain_mask', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

        else:
            pipeline = super(DmriStudy, self).brain_extraction_pipeline(
                **name_maps)
        return pipeline
예제 #7
0
파일: dmri.py 프로젝트: amrka/banana
    def bias_correct_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Corrects B1 field inhomogeneities
        """

#             inputs=[FilesetSpec('preproc', nifti_gz_format),
#                     FilesetSpec('brain_mask', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('bias_correct', nifti_gz_format)],

        bias_method = self.parameter('bias_correct_method')
        pipeline = self.new_pipeline(
            name='bias_correct',
            desc="Corrects for B1 field inhomogeneity",
            references=[fast_cite,
                        (n4_cite if bias_method == 'ants' else fsl_cite)],
            name_maps=name_maps)
        # Create bias correct node
        bias_correct = pipeline.add(
            "bias_correct", DWIBiasCorrect(),
            requirements=(
                [mrtrix_req.v('3.0rc3')] +
                [ants_req.v('2.0')
                 if bias_method == 'ants' else fsl_req.v('5.0.9')]))
        bias_correct.inputs.method = bias_method
        # Gradient merge node
        fsl_grads = pipeline.add(
            "fsl_grads",
            MergeTuple(2))
        # Connect nodes
        pipeline.connect(fsl_grads, 'out', bias_correct, 'grad_fsl')
        # Connect to inputs
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_input('preproc', bias_correct, 'in_file')
        pipeline.connect_input('brain_mask', bias_correct, 'mask')
        # Connect to outputs
        pipeline.connect_output('bias_correct', bias_correct, 'out_file')
        # Check inputs/output are connected
        return pipeline
예제 #8
0
파일: dmri.py 프로젝트: amrka/banana
    def global_tracking_pipeline(self, **name_maps):

#         inputs=[FilesetSpec('fod', mrtrix_format),
#                 FilesetSpec('bias_correct', nifti_gz_format),
#                 FilesetSpec('brain_mask', nifti_gz_format),
#                 FilesetSpec('wm_response', text_format),
#                 FilesetSpec('grad_dirs', fsl_bvecs_format),
#                 FilesetSpec('bvalues', fsl_bvals_format)],
#         outputs=[FilesetSpec('global_tracks', mrtrix_track_format)],

        pipeline = self.new_pipeline(
            name='global_tracking',
            desc="Extract b0 image from a DWI study",
            references=[mrtrix_cite],
            name_maps=name_maps)
        tck = pipeline.add(
            'tracking',
            Tractography())
        tck.inputs.n_tracks = self.parameter('num_global_tracks')
        tck.inputs.cutoff = self.parameter(
            'global_tracks_cutoff')
        mask = pipeline.add(
            'mask',
            DWI2Mask())
        # Add gradients to input image
        fsl_grads = pipeline.add(
            "fsl_grads",
            MergeTuple(2))
        pipeline.connect(fsl_grads, 'out', mask, 'grad_fsl')
        pipeline.connect(mask, 'out_file', tck, 'seed_image')
        pipeline.connect_input('wm_odf', tck, 'in_file')
        pipeline.connect_input('bias_correct', mask, 'in_file')
        pipeline.connect_input('grad_dirs', fsl_grads, 'in1')
        pipeline.connect_input('bvalues', fsl_grads, 'in2')
        pipeline.connect_output('global_tracks', tck, 'out_file')
        return pipeline
예제 #9
0
파일: dmri.py 프로젝트: amrka/banana
    def intensity_normalisation_pipeline(self, **name_maps):

#             inputs=[FilesetSpec('bias_correct', nifti_gz_format),
#                     FilesetSpec('brain_mask', nifti_gz_format),
#                     FilesetSpec('grad_dirs', fsl_bvecs_format),
#                     FilesetSpec('bvalues', fsl_bvals_format)],
#             outputs=[FilesetSpec('norm_intensity', mrtrix_format),
#                      FilesetSpec('norm_intens_fa_template', mrtrix_format,
#                                  frequency='per_study'),
#                      FilesetSpec('norm_intens_wm_mask', mrtrix_format,
#                                  frequency='per_study')],
        pipeline = self.new_pipeline(
            name='intensity_normalization',
            desc="Corrects for B1 field inhomogeneity",
            references=[mrtrix_req.v('3.0rc3')],
            name_maps=name_maps)
        # Convert from nifti to mrtrix format
        grad_merge = pipeline.add("grad_merge", MergeTuple(2))
        mrconvert = pipeline.add('mrconvert', MRConvert())
        mrconvert.inputs.out_ext = '.mif'
        # Set up join nodes
        fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
        join_subjects = pipeline.add(
            'join_subjects',
            IdentityInterface(fields),
            joinsource=self.SUBJECT_ID,
            joinfield=fields)
        join_visits = pipeline.add(
            'join_visits',
            Chain(fields),
            joinsource=self.VISIT_ID,
            joinfield=fields)
        # Set up expand nodes
        select = pipeline.add(
            'expand', SelectSession())
        # Intensity normalization
        intensity_norm = pipeline.add(
            'dwiintensitynorm', DWIIntensityNorm())
        # Connect inputs
        pipeline.connect_input('bias_correct', mrconvert, 'in_file')
        pipeline.connect_input('grad_dirs', grad_merge, 'in1')
        pipeline.connect_input('bvalues', grad_merge, 'in2')
        pipeline.connect_subject_id(join_subjects, 'subject_ids')
        pipeline.connect_visit_id(join_subjects, 'visit_ids')
        pipeline.connect_subject_id(select, 'subject_id')
        pipeline.connect_visit_id(select, 'visit_id')
        pipeline.connect_input('brain_mask', join_subjects, 'masks')
        # Internal connections
        pipeline.connect(grad_merge, 'out', mrconvert, 'grad_fsl')
        pipeline.connect(mrconvert, 'out_file', join_subjects, 'dwis')
        pipeline.connect(join_subjects, 'dwis', join_visits, 'dwis')
        pipeline.connect(join_subjects, 'masks', join_visits, 'masks')
        pipeline.connect(join_subjects, 'subject_ids', join_visits,
                         'subject_ids')
        pipeline.connect(join_subjects, 'visit_ids', join_visits,
                         'visit_ids')
        pipeline.connect(join_visits, 'dwis', intensity_norm, 'in_files')
        pipeline.connect(join_visits, 'masks', intensity_norm, 'masks')
        pipeline.connect(join_visits, 'subject_ids', select, 'subject_ids')
        pipeline.connect(join_visits, 'visit_ids', select, 'visit_ids')
        pipeline.connect(intensity_norm, 'out_files', select, 'items')
        # Connect outputs
        pipeline.connect_output('norm_intensity', select, 'item')
        pipeline.connect_output('norm_intens_fa_template', intensity_norm,
                                'fa_template')
        pipeline.connect_output('norm_intens_wm_mask', intensity_norm,
                                'wm_mask')
        return pipeline
예제 #10
0
파일: dmri.py 프로젝트: amrka/banana
    def preprocess_pipeline(self, **name_maps):  # @UnusedVariable @IgnorePep8
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            references=references)

        # Create nodes to gradients to FSL format
        if self.input('magnitude').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('magnitude', dicom_format)},
                outputs={
                    'bvecs_file': ('grad_dirs', fsl_bvecs_format),
                    'bvals_file': ('bvalues', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_kwargs = {
                'connect': {'in1': (extract_grad, 'bvecs_file'),
                            'in2': (extract_grad, 'bvals_file')}}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_kwargs = {
                'inputs': {'in1': ('grad_dirs', fsl_bvecs_format),
                           'in2': ('bvalues', fsl_bvals_format)}}
        else:
            raise ArcanaDesignError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            **grad_fsl_kwargs)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': ('magnitude', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': ('magnitude', nifti_gz_format)},
                connect={
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                connect={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'out_file': ('noise_residual', mrtrix_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

        # Preproc kwargs
        preproc_kwargs = {}

        if (self.provided('dwi_reference') or
                self.provided('reverse_phase')):
            # Extract b=0 volumes
            dwiextract = pipeline.add(
                'dwiextract',
                ExtractDWIorB0(
                    bzero=True,
                    out_ext='.nii.gz'),
                inputs={
                    'in_file': ('magnitude', dicom_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Get first b=0 from dwi b=0 volumes
            mrconvert = pipeline.add(
                "mrconvert",
                MRConvert(
                    coord=(3, 0)),
                connect={
                    'in_file': (dwiextract, 'out_file')},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Concatenate extracted forward rpe with reverse rpe
            mrcat = pipeline.add(
                'mrcat',
                MRCat(),
                inputs={
                    'second_scan': ((
                        'dwi_reference' if self.provided('dwi_reference')
                        else 'reverse_phase'), mrtrix_format)},
                connect={
                    'first_scan': (mrconvert, 'out_file')},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_conns = {'connect': {'se_epi': (mrcat, 'out_file')}}
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True
            preproc_conns = {}

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            connect={
                'grad_fsl': (grad_fsl, 'out')},
            outputs={
                'eddy_parameters': ('eddy_par', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60,
            **preproc_conns)
        if self.branch('preproc_denoise'):
            pipeline.connect(denoise, 'out_file', preproc, 'in_file')
        else:
            pipeline.connect_input('magnitude', preproc, 'in_file',
                                   nifti_gz_format)
        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        # Create node to reorient preproc out_file
        pipeline.add(
            'fslreorient2std',
            fsl.utils.Reorient2Std(),
            connect={
                'in_file': (preproc, 'out_file')},
            outputs={
                'out_file': ('preproc', nifti_gz_format)},
            requirements=[fsl_req.v('5.0.9')])

        return pipeline
예제 #11
0
    def preprocess_pipeline(self, **name_maps):
        """
        Performs a series of FSL preprocessing steps, including Eddy and Topup

        Parameters
        ----------
        phase_dir : str{AP|LR|IS}
            The phase encode direction
        """

        # Determine whether we can correct for distortion, i.e. if reference
        # scans are provided
        # Include all references
        references = [fsl_cite, eddy_cite, topup_cite,
                      distort_correct_cite, n4_cite]
        if self.branch('preproc_denoise'):
            references.extend(dwidenoise_cites)

        pipeline = self.new_pipeline(
            name='preprocess',
            name_maps=name_maps,
            desc=(
                "Preprocess dMRI studies using distortion correction"),
            citations=references)

        # Create nodes to gradients to FSL format
        if self.input('series').format == dicom_format:
            extract_grad = pipeline.add(
                "extract_grad",
                ExtractFSLGradients(),
                inputs={
                    'in_file': ('series', dicom_format)},
                outputs={
                    'grad_dirs': ('bvecs_file', fsl_bvecs_format),
                    'bvalues': ('bvals_file', fsl_bvals_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
                               'in2': (extract_grad, 'bvals_file')}
        elif self.provided('grad_dirs') and self.provided('bvalues'):
            grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
                               'in2': ('bvalues', fsl_bvals_format)}
        else:
            raise BananaUsageError(
                "Either input 'magnitude' image needs to be in DICOM format "
                "or gradient directions and b-values need to be explicitly "
                "provided to {}".format(self))

        # Gradient merge node
        grad_fsl = pipeline.add(
            "grad_fsl",
            MergeTuple(2),
            inputs=grad_fsl_inputs)

        gradients = (grad_fsl, 'out')

        # Create node to reorient preproc out_file
        if self.branch('reorient2std'):
            reorient = pipeline.add(
                'fslreorient2std',
                fsl.utils.Reorient2Std(
                    output_type='NIFTI_GZ'),
                inputs={
                    'in_file': ('series', nifti_gz_format)},
                requirements=[fsl_req.v('5.0.9')])
            reoriented = (reorient, 'out_file')
        else:
            reoriented = ('series', nifti_gz_format)

        # Denoise the dwi-scan
        if self.branch('preproc_denoise'):
            # Run denoising
            denoise = pipeline.add(
                'denoise',
                DWIDenoise(),
                inputs={
                    'in_file': reoriented},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Calculate residual noise
            subtract_operands = pipeline.add(
                'subtract_operands',
                Merge(2),
                inputs={
                    'in1': reoriented,
                    'in2': (denoise, 'noise')})

            pipeline.add(
                'subtract',
                MRCalc(
                    operation='subtract'),
                inputs={
                    'operands': (subtract_operands, 'out')},
                outputs={
                    'noise_residual': ('out_file', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
            denoised = (denoise, 'out_file')
        else:
            denoised = reoriented

        # Preproc kwargs
        preproc_kwargs = {}
        preproc_inputs = {'in_file': denoised,
                          'grad_fsl': gradients}

        if self.provided('reverse_phase'):

            if self.provided('magnitude', default_okay=False):
                dwi_reference = ('magnitude', mrtrix_image_format)
            else:
                # Extract b=0 volumes
                dwiextract = pipeline.add(
                    'dwiextract',
                    ExtractDWIorB0(
                        bzero=True,
                        out_ext='.nii.gz'),
                    inputs={
                        'in_file': denoised,
                        'fslgrad': gradients},
                    requirements=[mrtrix_req.v('3.0rc3')])

                # Get first b=0 from dwi b=0 volumes
                extract_first_b0 = pipeline.add(
                    "extract_first_vol",
                    MRConvert(
                        coord=(3, 0)),
                    inputs={
                        'in_file': (dwiextract, 'out_file')},
                    requirements=[mrtrix_req.v('3.0rc3')])

                dwi_reference = (extract_first_b0, 'out_file')

            # Concatenate extracted forward rpe with reverse rpe
            combined_images = pipeline.add(
                'combined_images',
                MRCat(),
                inputs={
                    'first_scan': dwi_reference,
                    'second_scan': ('reverse_phase', mrtrix_image_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            # Create node to assign the right PED to the diffusion
            prep_dwi = pipeline.add(
                'prepare_dwi',
                PrepareDWI(),
                inputs={
                    'pe_dir': ('ped', float),
                    'ped_polarity': ('pe_angle', float)})

            preproc_kwargs['rpe_pair'] = True

            distortion_correction = True
            preproc_inputs['se_epi'] = (combined_images, 'out_file')
        else:
            distortion_correction = False
            preproc_kwargs['rpe_none'] = True

        if self.parameter('preproc_pe_dir') is not None:
            preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')

        preproc = pipeline.add(
            'dwipreproc',
            DWIPreproc(
                no_clean_up=True,
                out_file_ext='.nii.gz',
                # FIXME: Need to determine this programmatically
                # eddy_parameters = '--data_is_shelled '
                temp_dir='dwipreproc_tempdir',
                **preproc_kwargs),
            inputs=preproc_inputs,
            outputs={
                'eddy_par': ('eddy_parameters', eddy_par_format)},
            requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
            wall_time=60)

        if distortion_correction:
            pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')

        mask = pipeline.add(
            'dwi2mask',
            BrainMask(
                out_file='brainmask.nii.gz'),
            inputs={
                'in_file': (preproc, 'out_file'),
                'grad_fsl': gradients},
            requirements=[mrtrix_req.v('3.0rc3')])

        # Create bias correct node
        pipeline.add(
            "bias_correct",
            DWIBiasCorrect(
                method='ants'),
            inputs={
                'grad_fsl': gradients,  # internal
                'in_file': (preproc, 'out_file'),
                'mask': (mask, 'out_file')},
            outputs={
                'series_preproc': ('out_file', nifti_gz_format)},
            requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])

        return pipeline