예제 #1
0
def register_ants(moving_image, atlas, output_image):
    reg = Registration()

    reg.inputs.fixed_image = atlas
    reg.inputs.moving_image = moving_image
    reg.inputs.output_transform_prefix = 'transform'
    reg.inputs.output_warped_image = output_image
    reg.inputs.output_transform_prefix = "stx-152"
    reg.inputs.transforms = ['Translation']
    reg.inputs.transform_parameters = [(0.1, )]
    reg.inputs.number_of_iterations = ([[10000, 111110, 11110]])
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.metric = ['Mattes']
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [32]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.sampling_percentage = [0.3]
    reg.inputs.convergence_threshold = [1.e-6]
    reg.inputs.convergence_window_size = [20]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[32, 16, 4]]
    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [False]
    reg.inputs.initial_moving_transform_com = True

    reg.run()
예제 #2
0
파일: test_base.py 프로젝트: dalejn/nipype
def test_BaseInterface_load_save_inputs():
    tmp_dir = tempfile.mkdtemp()
    tmp_json = os.path.join(tmp_dir, 'settings.json')

    class InputSpec(nib.TraitedSpec):
        input1 = nib.traits.Int()
        input2 = nib.traits.Float()
        input3 = nib.traits.Bool()
        input4 = nib.traits.Str()

    class DerivedInterface(nib.BaseInterface):
        input_spec = InputSpec

        def __init__(self, **inputs):
            super(DerivedInterface, self).__init__(**inputs)

    inputs_dict = {'input1': 12, 'input3': True, 'input4': 'some string'}
    bif = DerivedInterface(**inputs_dict)
    bif.save_inputs_to_json(tmp_json)
    bif2 = DerivedInterface()
    bif2.load_inputs_from_json(tmp_json)
    yield assert_equal, bif2.inputs.get_traitsfree(), inputs_dict

    bif3 = DerivedInterface(from_file=tmp_json)
    yield assert_equal, bif3.inputs.get_traitsfree(), inputs_dict

    inputs_dict2 = inputs_dict.copy()
    inputs_dict2.update({'input4': 'some other string'})
    bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4'])
    yield assert_equal, bif4.inputs.get_traitsfree(), inputs_dict2

    bif5 = DerivedInterface(input4=inputs_dict2['input4'])
    bif5.load_inputs_from_json(tmp_json, overwrite=False)
    yield assert_equal, bif5.inputs.get_traitsfree(), inputs_dict2

    bif6 = DerivedInterface(input4=inputs_dict2['input4'])
    bif6.load_inputs_from_json(tmp_json)
    yield assert_equal, bif6.inputs.get_traitsfree(), inputs_dict

    # test get hashval in a complex interface
    from nipype.interfaces.ants import Registration
    settings = example_data(
        example_data('smri_ants_registration_settings.json'))
    with open(settings) as setf:
        data_dict = json.load(setf)

    tsthash = Registration()
    tsthash.load_inputs_from_json(settings)
    yield assert_equal, {}, check_dict(data_dict,
                                       tsthash.inputs.get_traitsfree())

    tsthash2 = Registration(from_file=settings)
    yield assert_equal, {}, check_dict(data_dict,
                                       tsthash2.inputs.get_traitsfree())

    _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp')
    yield assert_equal, 'ec5755e07287e04a4b409e03b77a517c', hashvalue
예제 #3
0
파일: test_base.py 프로젝트: Conxz/nipype
def test_BaseInterface_load_save_inputs():
    tmp_dir = tempfile.mkdtemp()
    tmp_json = os.path.join(tmp_dir, 'settings.json')

    class InputSpec(nib.TraitedSpec):
        input1 = nib.traits.Int()
        input2 = nib.traits.Float()
        input3 = nib.traits.Bool()
        input4 = nib.traits.Str()

    class DerivedInterface(nib.BaseInterface):
        input_spec = InputSpec

        def __init__(self, **inputs):
            super(DerivedInterface, self).__init__(**inputs)

    inputs_dict = {'input1': 12, 'input3': True,
                   'input4': 'some string'}
    bif = DerivedInterface(**inputs_dict)
    bif.save_inputs_to_json(tmp_json)
    bif2 = DerivedInterface()
    bif2.load_inputs_from_json(tmp_json)
    yield assert_equal, bif2.inputs.get_traitsfree(), inputs_dict

    bif3 = DerivedInterface(from_file=tmp_json)
    yield assert_equal, bif3.inputs.get_traitsfree(), inputs_dict

    inputs_dict2 = inputs_dict.copy()
    inputs_dict2.update({'input4': 'some other string'})
    bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4'])
    yield assert_equal, bif4.inputs.get_traitsfree(), inputs_dict2

    bif5 = DerivedInterface(input4=inputs_dict2['input4'])
    bif5.load_inputs_from_json(tmp_json, overwrite=False)
    yield assert_equal, bif5.inputs.get_traitsfree(), inputs_dict2

    bif6 = DerivedInterface(input4=inputs_dict2['input4'])
    bif6.load_inputs_from_json(tmp_json)
    yield assert_equal, bif6.inputs.get_traitsfree(), inputs_dict

    # test get hashval in a complex interface
    from nipype.interfaces.ants import Registration
    settings = example_data(example_data('smri_ants_registration_settings.json'))
    with open(settings) as setf:
        data_dict = json.load(setf)

    tsthash = Registration()
    tsthash.load_inputs_from_json(settings)
    yield assert_equal, {}, check_dict(data_dict, tsthash.inputs.get_traitsfree())

    tsthash2 = Registration(from_file=settings)
    yield assert_equal, {}, check_dict(data_dict, tsthash2.inputs.get_traitsfree())

    _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp')
    yield assert_equal, 'ec5755e07287e04a4b409e03b77a517c', hashvalue
예제 #4
0
def test_BaseInterface_load_save_inputs(tmpdir):
    tmp_json = tmpdir.join("settings.json").strpath

    class InputSpec(nib.TraitedSpec):
        input1 = nib.traits.Int()
        input2 = nib.traits.Float()
        input3 = nib.traits.Bool()
        input4 = nib.traits.Str()

    class DerivedInterface(nib.BaseInterface):
        input_spec = InputSpec

        def __init__(self, **inputs):
            super(DerivedInterface, self).__init__(**inputs)

    inputs_dict = {"input1": 12, "input3": True, "input4": "some string"}
    bif = DerivedInterface(**inputs_dict)
    bif.save_inputs_to_json(tmp_json)
    bif2 = DerivedInterface()
    bif2.load_inputs_from_json(tmp_json)
    assert bif2.inputs.get_traitsfree() == inputs_dict

    bif3 = DerivedInterface(from_file=tmp_json)
    assert bif3.inputs.get_traitsfree() == inputs_dict

    inputs_dict2 = inputs_dict.copy()
    inputs_dict2.update({"input4": "some other string"})
    bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2["input4"])
    assert bif4.inputs.get_traitsfree() == inputs_dict2

    bif5 = DerivedInterface(input4=inputs_dict2["input4"])
    bif5.load_inputs_from_json(tmp_json, overwrite=False)
    assert bif5.inputs.get_traitsfree() == inputs_dict2

    bif6 = DerivedInterface(input4=inputs_dict2["input4"])
    bif6.load_inputs_from_json(tmp_json)
    assert bif6.inputs.get_traitsfree() == inputs_dict

    # test get hashval in a complex interface
    from nipype.interfaces.ants import Registration

    settings = example_data(
        example_data("smri_ants_registration_settings.json"))
    with open(settings) as setf:
        data_dict = json.load(setf)

    tsthash = Registration()
    tsthash.load_inputs_from_json(settings)
    assert {} == check_dict(data_dict, tsthash.inputs.get_traitsfree())

    tsthash2 = Registration(from_file=settings)
    assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree())

    _, hashvalue = tsthash.inputs.get_hashval(hash_method="timestamp")
    assert hashvalue == "e35bf07fea8049cc02de9235f85e8903"
예제 #5
0
def test_BaseInterface_load_save_inputs(tmpdir):
    tmp_json = tmpdir.join('settings.json').strpath

    class InputSpec(nib.TraitedSpec):
        input1 = nib.traits.Int()
        input2 = nib.traits.Float()
        input3 = nib.traits.Bool()
        input4 = nib.traits.Str()

    class DerivedInterface(nib.BaseInterface):
        input_spec = InputSpec

        def __init__(self, **inputs):
            super(DerivedInterface, self).__init__(**inputs)

    inputs_dict = {'input1': 12, 'input3': True, 'input4': 'some string'}
    bif = DerivedInterface(**inputs_dict)
    bif.save_inputs_to_json(tmp_json)
    bif2 = DerivedInterface()
    bif2.load_inputs_from_json(tmp_json)
    assert bif2.inputs.get_traitsfree() == inputs_dict

    bif3 = DerivedInterface(from_file=tmp_json)
    assert bif3.inputs.get_traitsfree() == inputs_dict

    inputs_dict2 = inputs_dict.copy()
    inputs_dict2.update({'input4': 'some other string'})
    bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4'])
    assert bif4.inputs.get_traitsfree() == inputs_dict2

    bif5 = DerivedInterface(input4=inputs_dict2['input4'])
    bif5.load_inputs_from_json(tmp_json, overwrite=False)
    assert bif5.inputs.get_traitsfree() == inputs_dict2

    bif6 = DerivedInterface(input4=inputs_dict2['input4'])
    bif6.load_inputs_from_json(tmp_json)
    assert bif6.inputs.get_traitsfree() == inputs_dict

    # test get hashval in a complex interface
    from nipype.interfaces.ants import Registration
    settings = example_data(
        example_data('smri_ants_registration_settings.json'))
    with open(settings) as setf:
        data_dict = json.load(setf)

    tsthash = Registration()
    tsthash.load_inputs_from_json(settings)
    assert {} == check_dict(data_dict, tsthash.inputs.get_traitsfree())

    tsthash2 = Registration(from_file=settings)
    assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree())

    _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp')
    assert '8562a5623562a871115eb14822ee8d02' == hashvalue
예제 #6
0
def test_BaseInterface_load_save_inputs(tmpdir):
    tmp_json = tmpdir.join('settings.json').strpath

    class InputSpec(nib.TraitedSpec):
        input1 = nib.traits.Int()
        input2 = nib.traits.Float()
        input3 = nib.traits.Bool()
        input4 = nib.traits.Str()

    class DerivedInterface(nib.BaseInterface):
        input_spec = InputSpec

        def __init__(self, **inputs):
            super(DerivedInterface, self).__init__(**inputs)

    inputs_dict = {'input1': 12, 'input3': True, 'input4': 'some string'}
    bif = DerivedInterface(**inputs_dict)
    bif.save_inputs_to_json(tmp_json)
    bif2 = DerivedInterface()
    bif2.load_inputs_from_json(tmp_json)
    assert bif2.inputs.get_traitsfree() == inputs_dict

    bif3 = DerivedInterface(from_file=tmp_json)
    assert bif3.inputs.get_traitsfree() == inputs_dict

    inputs_dict2 = inputs_dict.copy()
    inputs_dict2.update({'input4': 'some other string'})
    bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4'])
    assert bif4.inputs.get_traitsfree() == inputs_dict2

    bif5 = DerivedInterface(input4=inputs_dict2['input4'])
    bif5.load_inputs_from_json(tmp_json, overwrite=False)
    assert bif5.inputs.get_traitsfree() == inputs_dict2

    bif6 = DerivedInterface(input4=inputs_dict2['input4'])
    bif6.load_inputs_from_json(tmp_json)
    assert bif6.inputs.get_traitsfree() == inputs_dict

    # test get hashval in a complex interface
    from nipype.interfaces.ants import Registration
    settings = example_data(
        example_data('smri_ants_registration_settings.json'))
    with open(settings) as setf:
        data_dict = json.load(setf)

    tsthash = Registration()
    tsthash.load_inputs_from_json(settings)
    assert {} == check_dict(data_dict, tsthash.inputs.get_traitsfree())

    tsthash2 = Registration(from_file=settings)
    assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree())

    _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp')
    assert '8562a5623562a871115eb14822ee8d02' == hashvalue
 def reg_run(fixed_image, moving_image, output_transform_prefix,
             output_warped_image, ants_thread_count):
     # os.environ['PATH']+=':/path_to_antsbin'
     reg = Registration()
     reg.inputs.fixed_image = fixed_image
     reg.inputs.moving_image = moving_image
     reg.inputs.output_transform_prefix = output_transform_prefix
     reg.inputs.transforms = ['SyN']
     reg.inputs.transform_parameters = [(0.01, )]
     reg.inputs.number_of_iterations = [[200, 200, 200, 200, 150, 50]]
     # reg.inputs.number_of_iterations = [[50,50,50,40,30,20]]
     reg.inputs.dimension = 2
     reg.inputs.num_threads = ants_thread_count
     reg.inputs.metric = ['Mattes']
     # Default (value ignored currently by ANTs)
     reg.inputs.metric_weight = [1]
     reg.inputs.radius_or_number_of_bins = [32]
     reg.inputs.sampling_strategy = ['Regular']
     reg.inputs.sampling_percentage = [1.0]  # 0.3]
     reg.inputs.convergence_threshold = [1.e-8]
     reg.inputs.convergence_window_size = [10]
     reg.inputs.smoothing_sigmas = [[6, 5, 4, 3, 2, 1]]
     reg.inputs.sigma_units = ['vox']
     reg.inputs.shrink_factors = [[6, 5, 4, 3, 2, 1]]
     reg.inputs.use_estimate_learning_rate_once = [True]
     reg.inputs.use_histogram_matching = [True]  # This is the default
     reg.inputs.output_warped_image = output_warped_image
     reg1 = copy.deepcopy(reg)
     reg1.cmdline
     reg1.run()
def calculateRigidTransforms(frame1, frame2, saveFn):
    """
    Given the pair of images, calculate the rigid transformation from frame2
    to frame1 and save it using the saveFn prefix.

    Inputs:
    - frame1: image at timepoint n
    - frame2: image at timepoint n+1
    - saveFn: the prefix filename where the transform will be saved
    """
    # set up the registration
    reg = Registration()
    reg.inputs.fixed_image = frame1
    reg.inputs.moving_image = frame2
    reg.inputs.output_transform_prefix = saveFn
    reg.inputs.interpolation = 'NearestNeighbor'

    reg.inputs.transforms = ['Rigid']
    reg.inputs.transform_parameters = [(0.1, )]
    reg.inputs.number_of_iterations = [[100, 20]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = False
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initialize_transforms_per_stage = False
    reg.inputs.metric = ['CC']
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [5]
    reg.inputs.sampling_strategy = ['Random']
    reg.inputs.sampling_percentage = [0.05]
    reg.inputs.convergence_threshold = [1.e-2]
    reg.inputs.convergence_window_size = [20]
    reg.inputs.smoothing_sigmas = [[2, 1]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[2, 1]]

    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [True]
    reg.inputs.output_warped_image = False
    reg.inputs.num_threads = 50

    # run the registration
    reg.run()
예제 #9
0
def rigidRegFunc(path,fileName,input_fixed,input_moving):
    os.chdir(path)
    reg = Registration()
    # ants-registration parameters:
    reg.inputs.fixed_image = input_fixed  # fixed image
    reg.inputs.moving_image = input_moving  # moving image
    reg.inputs.output_transform_prefix = path  # file path
    reg.inputs.transforms = ['Rigid']  # list of transformations
    reg.inputs.transform_parameters = [(.5,)]
    reg.inputs.number_of_iterations = [[40, 20, 10]]
#    reg.inputs.number_of_iterations = [[1, 1, 1]]
    reg.inputs.dimension = 3
    reg.inputs.initial_moving_transform_com = True
    #reg.inputs.invert_initial_moving_transform = True
    reg.inputs.output_warped_image = True
    reg.inputs.output_inverse_warped_image = True
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.metric = ['MI']  # mutual information
    reg.inputs.metric_weight = [1]
    reg.inputs.radius_or_number_of_bins = [64]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.sampling_percentage = [0.5]
    reg.inputs.terminal_output = 'allatonce'
    reg.inputs.convergence_threshold = [1.e-6]
    reg.inputs.convergence_window_size = [10]
    reg.inputs.smoothing_sigmas = [[3, 1, 0]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.shrink_factors = [[ 2, 1, 0]]
    reg.inputs.use_estimate_learning_rate_once = [True]
    reg.inputs.use_histogram_matching = [True]
    reg.terminal_output = 'none'
    reg.inputs.num_threads = 4  # ?
    reg.inputs.winsorize_lower_quantile = 0.025
    reg.inputs.winsorize_upper_quantile = 0.95
    reg.inputs.output_warped_image = True
    #reg.inputs.collapse_linear_transforms_to_fixed_image_header = False
    reg.inputs.output_warped_image = path + 'rigid_reg_'+ fileName
    reg.cmdline
    reg.run()
    return
예제 #10
0
    def __init__(self,
                 moving_image=['path'],
                 fixed_image=['path'],
                 metric=['CC', 'MeanSquares', 'Demons'],
                 metric_weight=[1.0],
                 transforms=['Affine', 'BSplineSyN'],
                 shrink_factors=[[2, 1], [3, 2, 1]],
                 smoothing_sigmas=[[1, 0], [2, 1, 0]], **options):

        from nipype.interfaces.ants import Registration
        reg = Registration()
        reg.inputs.moving_image = moving_image
        reg.inputs.fixed_image = fixed_image
        reg.inputs.metric = metric
        reg.inputs.metric_weight = metric_weight
        reg.inputs.transforms = transforms
        reg.inputs.shrink_factors = shrink_factors
        reg.inputs.smoothing_sigmas = smoothing_sigmas
        for ef in options:
            setattr(reg.inputs, ef, options[ef])
        self.res = reg.run()
예제 #11
0
def make_w_coreg_3T_ants():
    """Contains examples on how to convert from struct to func and from func to
    struct"""
    w = Workflow('coreg_7T')

    n_in = Node(IdentityInterface(fields=[
        'T1w',
        'mean',
    ]), name='input')

    n_out = Node(IdentityInterface(fields=[
        'mat_func2struct',
    ]),
                 name='output')

    n_coreg = Node(Registration(), name='antsReg')
    n_coreg.inputs.winsorize_lower_quantile = 0.005
    n_coreg.inputs.winsorize_upper_quantile = 0.995
    n_coreg.inputs.dimension = 3
    n_coreg.inputs.float = True
    n_coreg.inputs.transforms = ['Rigid', 'Affine']
    n_coreg.inputs.transform_parameters = [[
        0.1,
    ], [
        0.1,
    ]]
    n_coreg.inputs.metric = ['MI', 'MI']
    n_coreg.inputs.metric_weight = [1, 1]
    n_coreg.inputs.radius_or_number_of_bins = [32, 32]
    n_coreg.inputs.sampling_strategy = ['Regular', 'Regular']
    n_coreg.inputs.sampling_percentage = [0.25, 0.25]
    n_coreg.inputs.sigma_units = ['vox', 'vox']
    n_coreg.inputs.convergence_threshold = [1e-6, 1e-6]
    n_coreg.inputs.smoothing_sigmas = [[4, 2, 0], [1, 0]]
    n_coreg.inputs.shrink_factors = [[5, 3, 1], [3, 1]]
    n_coreg.inputs.convergence_window_size = [20, 10]
    n_coreg.inputs.number_of_iterations = [[1000, 500, 200], [500, 200]]
    # n_coreg.inputs.restrict_deformation = [[], [1, 1, 0]]
    n_coreg.inputs.use_histogram_matching = [True, True]
    n_coreg.inputs.output_warped_image = True
    n_coreg.inputs.output_inverse_warped_image = True
    n_coreg.inputs.output_transform_prefix = 'ants_func_to_struct'
    n_coreg.inputs.interpolation = 'Linear'

    w.connect(n_in, 'T1w', n_coreg, 'fixed_image')
    w.connect(n_in, 'mean', n_coreg, 'moving_image')
    w.connect(n_coreg, 'forward_transforms', n_out, 'mat_func2struct')

    return w
예제 #12
0
파일: mincants.py 프로젝트: ekunnii/APPIAN
    def _run_interface(self, runtime):

        mnc2nii_sh = pe.Node(interface=mnc2nii_shCommand(), name="mnc2nii_sh")
        nii2mnc_sh = pe.Node(interface=nii2mnc_shCommand(), name="nii2mnc_sh")
        reg = pe.Node(interface=Registration(), name="registration")

        mnc2nii_sh.inputs.in_file = self.inputs.moving_image
        mnc2nii_sh.run()

        mnc2nii_sh_nodes = []
        inputs = [
            "fixed_image", "fixed_image_mask", "fixed_image_mask",
            "fixed_image_mask", "moving_image", "moving_image_mask",
            "moving_image_masks"
        ]
        self_inputs = [
            self.inputs.fixed_image, self.inputs.fixed_image_mask,
            self.inputs.fixed_image_mask, self.inputs.fixed_image_mask,
            self.inputs.moving_image, self.inputs.moving_image_mask,
            self.inputs.moving_image_masks
        ]
        reg_inputs = [
            reg.inputs.fixed_image, reg.inputs.fixed_image_mask,
            reg.inputs.fixed_image_mask, reg.inputs.fixed_image_mask,
            reg.inputs.moving_image, reg.inputs.moving_image_mask,
            reg.inputs.moving_image_masks
        ]
        for s, r, i in zip(self_inputs, reg_inputs, inputs):
            if isdefined(s):
                mnc2nii_sh = pe.Node(interface=mnc2nii_shCommand(),
                                     name="mnc2nii_sh_" + i,
                                     in_file=self.inputs.fixed_image)
                mnc2nii_sh.run()
                r = mnc2nii_sh.out_file

        if isdefined(self.inputs.dimension):
            reg.inputs.dimension = self.inputs.dimension
        if isdefined(self.inputs.save_state):
            reg.inputs.save_state = self.inputs.save_state
        if isdefined(self.inputs.restore_state):
            reg.inputs.restore_state = self.inputs.restore_state
        if isdefined(self.inputs.initial_moving_transform):
            reg.inputs.initial_moving_tr = self.inputs.initial_moving_tr
        if isdefined(self.inputs.invert_initial_moving_transform):
            reg.inputs.invert_initial_moving_tr = self.inputs.invert_initial_moving_tr
        if isdefined(self.inputs.initial_moving_transform_com):
            reg.inputs.initial_moving_transform_com = self.inputs.initial_moving_transform_com
        if isdefined(self.inputs.metric_item_trait):
            reg.inputs.metric_item_trait = self.inputs.metric_item_trait
        if isdefined(self.inputs.metric_stage_trait):
            reg.inputs.metric_stage_trait = self.inputs.metric_stage_trait
        if isdefined(self.inputs.metric):
            reg.inputs.metric = self.inputs.metric
        if isdefined(self.inputs.metric_weight_item_trait):
            reg.inputs.metric_weight_item_trait = self.inputs.metric_weight_item_trait
        if isdefined(self.inputs.metric_weight_stage_trait):
            reg.inputs.metric_weight_stage_trait = self.inputs.metric_weight_stage_trait
        if isdefined(self.inputs.metric_weight):
            reg.inputs.metric_weight = self.inputs.metric_weight
        if isdefined(self.inputs.radius_bins_item_trait):
            reg.inputs.radius_bins_item_trait = self.inputs.radius_bins_item_trait
        if isdefined(self.inputs.radius_bins_stage_trait):
            reg.inputs.radius_bins_stage_trait = self.inputs.radius_bins_stage_trait
        if isdefined(self.inputs.radius_or_number_of_bins):
            reg.inputs.radius_or_number_of_bins = self.inputs.radius_or_number_of_bins
        if isdefined(self.inputs.sampling_strategy_item_trait):
            reg.inputs.sampling_strategy_item_trait = self.inputs.sampling_strategy_item_trait
        if isdefined(self.inputs.sampling_strategy_stage_trait):
            reg.inputs.sampling_strategy_stage_trait = self.inputs.sampling_strategy_stage_trait
        if isdefined(self.inputs.sampling_strategy):
            reg.inputs.sampling_strategy = self.inputs.sampling_strategy
        if isdefined(self.inputs.sampling_percentage_item_trait):
            reg.inputs.sampling_percentage_item_trait = self.inputs.sampling_percentage_item_trait
        if isdefined(self.inputs.sampling_percentage_stage_trait):
            reg.inputs.sampling_percentage_stage_trait = self.inputs.sampling_percentage_stage_trait
        if isdefined(self.inputs.sampling_percentage):
            reg.inputs.sampling_percentage = self.inputs.sampling_percentage
        if isdefined(self.inputs.use_estimate_learning_rate_once):
            reg.inputs.use_estimate_learning_rate_once = self.inputs.use_estimate_learning_rate_once
        if isdefined(self.inputs.use_histogram_matching):
            reg.inputs.use_histogram_matching = self.inputs.use_histogram_matching
        if isdefined(self.inputs.interpolation):
            reg.inputs.interpolation = self.inputs.interpolation
        if isdefined(self.inputs.interpolation_parameters):
            reg.inputs.interpolation_parameters = self.inputs.interpolation_parameters
        if isdefined(self.inputs.write_composite_transform):
            reg.inputs.write_composite_transform = self.inputs.write_composite_transform
        if isdefined(self.inputs.collapse_output_transforms):
            reg.inputs.collapse_output_transforms = self.inputs.collapse_output_transforms
        if isdefined(self.inputs.initialize_transforms_per_stage):
            reg.inputs.initialize_transforms_per_stage = self.inputs.initialize_transforms_per_stage
        if isdefined(self.inputs.float): reg.inputs.float = self.inputs.float
        if isdefined(self.inputs.transform_parameters):
            reg.inputs.transform_parameters = self.inputs.transform_parameters
        if isdefined(self.inputs.restrict_deformation):
            reg.inputs.restrict_deformation = self.inputs.restrict_deformation
        if isdefined(self.inputs.number_of_iterations):
            reg.inputs.number_of_iterations = self.inputs.number_of_iterations
        if isdefined(self.inputs.smoothing_sigmas):
            reg.inputs.smoothing_sigmas = self.inputs.smoothing_sigmas
        if isdefined(self.inputs.sigma_units):
            reg.inputs.sigma_units = self.inputs.sigma_units
        if isdefined(self.inputs.shrink_factors):
            reg.inputs.shrink_factors = self.inputs.shrink_factors
        if isdefined(self.inputs.convergence_threshold):
            reg.inputs.convergence_threshold = self.inputs.convergence_threshold
        if isdefined(self.inputs.convergence_window_size):
            reg.inputs.convergence_window_size = self.inputs.convergence_window_size
        if isdefined(self.inputs.output_transform_prefix):
            reg.inputs.output_transform_prefix = self.inputs.output_transform_prefix
        if isdefined(self.inputs.output_warped_image):
            reg.inputs.output_warped_image = self.inputs.output_warped_image
        if isdefined(self.inputs.output_inverse_warped_image):
            reg.inputs.output_inverse_warped_image = self.inputs.output_inverse_warped_image
        if isdefined(self.inputs.winsorize_upper_quantile):
            reg.inputs.winsorize_upper_quantile = self.inputs.winsorize_upper_quantile
        if isdefined(self.inputs.winsorize_lower_quantile):
            reg.inputs.winsorize_lower_quantile = self.inputs.winsorize_lower_quantile
        if isdefined(self.inputs.verbose):
            reg.inputs.verbose = self.inputs.verbose

        reg.run()

        nii2mnc_sh.inputs.in_file = reg.inputs.warped_image
        nii2mnc_sh.run()
        self.outputs.warped_image = nii2mnc_sh.inputs.warped_image
        return (runtime)
예제 #13
0
"""Test ANTs registration interface
    Parameters values are taken from: https://github.com/ANTsX/ANTs/wiki/Anatomy-of-an-antsRegistration-call
    Note: Runtime is quite long with current parameters
"""
from nipype.interfaces.ants import Registration

#Get template path in your system
from nipype.interfaces.fsl import Info

template_path = Info.standard_image('MNI152_T1_1mm_brain.nii.gz')

reg = Registration()
reg.inputs.fixed_image = template_path
reg.inputs.moving_image = 'test-data/haxby2001/subj2/anat.nii.gz'

# Choose Mutual Information as the metric and relevant metric inputs
reg.inputs.metric = ['MI', 'MI', 'CC']
reg.inputs.metric_weight = [1] * 3
reg.inputs.radius_or_number_of_bins = [32] * 3
reg.inputs.sampling_strategy = ['Regular', 'Regular', None]
reg.inputs.sampling_percentage = [0.25, 0.25, None]

# Choose the type of transforms and in what order to implement
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']

# Parameters are (GradientStep, updateFieldVarianceInVoxelSpace, totalFieldVarianceInVoxelSpace)
reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, 3.0, 0.0)]

# Specify where to save results
reg.output_warped_image = 'out-ants-reg/anat_norm.nii.gz'
예제 #14
0
def make_w_coreg_7T_7T():

    w = Workflow('coreg_7T_7T')

    n_in = Node(IdentityInterface(fields=[
        'T1w_SE',
        'T1w_GE',
    ]), name='input')

    n_out = Node(IdentityInterface(fields=[
        'mat_ants',
        'func',
    ]),
                 name='output')

    n_coreg = Node(Registration(), name='antsReg')
    n_coreg.inputs.dimension = 3
    n_coreg.inputs.winsorize_lower_quantile = 0.005
    n_coreg.inputs.winsorize_upper_quantile = 0.995
    n_coreg.inputs.float = True
    n_coreg.inputs.interpolation = 'Linear'
    n_coreg.inputs.transforms = ['Rigid', 'Affine']
    n_coreg.inputs.transform_parameters = [[
        0.2,
    ], [
        0.1,
    ]]
    n_coreg.inputs.metric = ['MI', 'MI']
    n_coreg.inputs.metric_weight = [1, 1]
    n_coreg.inputs.radius_or_number_of_bins = [32, 32]
    n_coreg.inputs.sampling_strategy = ['Regular', 'Regular']
    n_coreg.inputs.sampling_percentage = [0.25, 0.25]
    n_coreg.inputs.sigma_units = ['vox', 'vox']
    n_coreg.inputs.convergence_threshold = [1e-6, 1e-6]
    n_coreg.inputs.smoothing_sigmas = [[4, 2, 0], [1, 0]]
    n_coreg.inputs.shrink_factors = [[3, 2, 1], [2, 1]]
    n_coreg.inputs.convergence_window_size = [20, 10]
    n_coreg.inputs.number_of_iterations = [[1000, 500, 200], [500, 200]]
    n_coreg.inputs.restrict_deformation = [[], [1, 1, 0]]
    n_coreg.inputs.output_warped_image = True
    n_coreg.inputs.output_inverse_warped_image = True

    n_ge2se = Node(ApplyTransforms(), name='ants_ge2se')
    n_ge2se.inputs.dimension = 3
    n_ge2se.inputs.invert_transform_flags = True
    n_ge2se.inputs.interpolation = 'Linear'

    n_se2ge = Node(ApplyTransforms(), name='ants_se2ge')
    n_se2ge.inputs.dimension = 3
    n_se2ge.inputs.interpolation = 'Linear'
    n_se2ge.inputs.default_value = 0

    w.connect(n_in, 'T1w_SE', n_coreg, 'moving_image')
    w.connect(n_in, 'T1w_GE', n_coreg, 'fixed_image')
    w.connect(n_coreg, 'forward_transforms', n_out, 'mat_ants')

    w.connect(n_coreg, 'forward_transforms', n_ge2se, 'transforms')
    w.connect(n_in, 'T1w_GE', n_ge2se, 'input_image')
    w.connect(n_in, 'T1w_SE', n_ge2se, 'reference_image')

    w.connect(n_coreg, 'forward_transforms', n_se2ge, 'transforms')
    w.connect(n_in, 'T1w_SE', n_se2ge, 'input_image')
    w.connect(n_in, 'T1w_GE', n_se2ge, 'reference_image')

    return w
def antsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''):
    """

    Inputs::

           inputspec.images :
           inputspec.fixed_image :
           inputspec.ListOfPassiveImagesDictionaries :
           inputspec.interpolationMapping :

    Outputs::

           outputspec.template :
           outputspec.transforms_list :
           outputspec.passive_deformed_templates :
    """
    TemplateBuildSingleIterationWF = pe.Workflow(
        name='antsRegistrationTemplateBuildSingleIterationWF_' +
        str(iterationPhasePrefix))

    inputSpec = pe.Node(interface=util.IdentityInterface(fields=[
        'ListOfImagesDictionaries', 'registrationImageTypes',
        'interpolationMapping', 'fixed_image'
    ]),
                        run_without_submitting=True,
                        name='inputspec')
    ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that
    ##             they do not change due to re-indenting.  Otherwise re-indenting for flow control will trigger
    ##             their hash to change.
    ## HACK: TODO: REMOVE 'transforms_list' it is not used.  That will change all the hashes
    ## HACK: TODO: Need to run all python files through the code beutifiers.  It has gotten pretty ugly.
    outputSpec = pe.Node(interface=util.IdentityInterface(
        fields=['template', 'transforms_list', 'passive_deformed_templates']),
                         run_without_submitting=True,
                         name='outputspec')

    ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template
    BeginANTS = pe.MapNode(interface=Registration(),
                           name='BeginANTS',
                           iterfield=['moving_image'])
    BeginANTS.inputs.dimension = 3
    BeginANTS.inputs.output_transform_prefix = str(
        iterationPhasePrefix) + '_tfm'
    BeginANTS.inputs.transforms = ["Affine", "SyN"]
    BeginANTS.inputs.transform_parameters = [[0.9], [0.25, 3.0, 0.0]]
    BeginANTS.inputs.metric = ['Mattes', 'CC']
    BeginANTS.inputs.metric_weight = [1.0, 1.0]
    BeginANTS.inputs.radius_or_number_of_bins = [32, 5]
    BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000], [50, 35, 15]]
    BeginANTS.inputs.use_histogram_matching = [True, True]
    BeginANTS.inputs.use_estimate_learning_rate_once = [False, False]
    BeginANTS.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
    BeginANTS.inputs.smoothing_sigmas = [[3, 2, 0], [3, 2, 0]]

    GetMovingImagesNode = pe.Node(interface=util.Function(
        function=GetMovingImages,
        input_names=[
            'ListOfImagesDictionaries', 'registrationImageTypes',
            'interpolationMapping'
        ],
        output_names=['moving_images', 'moving_interpolation_type']),
                                  run_without_submitting=True,
                                  name='99_GetMovingImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetMovingImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetMovingImagesNode,
                                           'registrationImageTypes')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           GetMovingImagesNode,
                                           'interpolationMapping')

    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', BeginANTS,
                                           'moving_image')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_interpolation_type',
                                           BeginANTS, 'interpolation')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS,
                                           'fixed_image')

    ## Now warp all the input_images images
    wimtdeformed = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=['transforms', 'invert_transform_flags', 'input_image'],
        name='wimtdeformed')
    wimtdeformed.inputs.interpolation = 'Linear'
    wimtdeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',
                                           wimtdeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags',
                                           wimtdeformed,
                                           'invert_transform_flags')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', wimtdeformed,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image',
                                           wimtdeformed, 'reference_image')

    ##  Shape Update Next =====
    ## Now  Average All input_images deformed images together to create an updated template average
    AvgDeformedImages = pe.Node(interface=AverageImages(),
                                name='AvgDeformedImages')
    AvgDeformedImages.inputs.dimension = 3
    AvgDeformedImages.inputs.output_average_image = str(
        iterationPhasePrefix) + '.nii.gz'
    AvgDeformedImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image",
                                           AvgDeformedImages, 'images')

    ## Now average all affine transforms together
    AvgAffineTransform = pe.Node(interface=AverageAffineTransform(),
                                 name='AvgAffineTransform')
    AvgAffineTransform.inputs.dimension = 3
    AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str(
        iterationPhasePrefix) + '_Affine.mat'

    SplitAffineAndWarpsNode = pe.Node(interface=util.Function(
        function=SplitAffineAndWarpComponents,
        input_names=['list_of_transforms_lists'],
        output_names=['affine_component_list', 'warp_component_list']),
                                      run_without_submitting=True,
                                      name='99_SplitAffineAndWarpsNode')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',
                                           SplitAffineAndWarpsNode,
                                           'list_of_transforms_lists')
    TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode,
                                           'affine_component_list',
                                           AvgAffineTransform, 'transforms')

    ## Now average the warp fields togther
    AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages')
    AvgWarpImages.inputs.dimension = 3
    AvgWarpImages.inputs.output_average_image = str(
        iterationPhasePrefix) + 'warp.nii.gz'
    AvgWarpImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode,
                                           'warp_component_list',
                                           AvgWarpImages, 'images')

    ## Now average the images together
    ## TODO:  For now GradientStep is set to 0.25 as a hard coded default value.
    GradientStep = 0.25
    GradientStepWarpImage = pe.Node(interface=MultiplyImages(),
                                    name='GradientStepWarpImage')
    GradientStepWarpImage.inputs.dimension = 3
    GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep
    GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str(
        iterationPhasePrefix) + '_warp.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgWarpImages,
                                           'output_average_image',
                                           GradientStepWarpImage,
                                           'first_input')

    ## Now create the new template shape based on the average of all deformed images
    UpdateTemplateShape = pe.Node(interface=ApplyTransforms(),
                                  name='UpdateTemplateShape')
    UpdateTemplateShape.inputs.invert_transform_flags = [True]
    UpdateTemplateShape.inputs.interpolation = 'Linear'
    UpdateTemplateShape.default_value = 0

    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           UpdateTemplateShape,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect([
        (AvgAffineTransform, UpdateTemplateShape,
         [(('affine_transform', makeListOfOneElement), 'transforms')]),
    ])
    TemplateBuildSingleIterationWF.connect(GradientStepWarpImage,
                                           'output_product_image',
                                           UpdateTemplateShape, 'input_image')

    ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(
        interface=util.Function(
            function=MakeTransformListWithGradientWarps,
            input_names=['averageAffineTranform', 'gradientStepWarp'],
            output_names=['TransformListWithGradientWarps']),
        run_without_submitting=True,
        name='99_MakeTransformListWithGradientWarps')
    ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True

    TemplateBuildSingleIterationWF.connect(
        AvgAffineTransform, 'affine_transform',
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'averageAffineTranform')
    TemplateBuildSingleIterationWF.connect(
        UpdateTemplateShape, 'output_image',
        ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp')

    ReshapeAverageImageWithShapeUpdate = pe.Node(
        interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate')
    ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear'
    ReshapeAverageImageWithShapeUpdate.default_value = 0
    ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate,
        'transforms')
    TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate,
                                           'output_image', outputSpec,
                                           'template')

    ######
    ######
    ######  Process all the passive deformed images in a way similar to the main image used for registration
    ######
    ######
    ######
    ##############################################
    ## Now warp all the ListOfPassiveImagesDictionaries images
    FlattenTransformAndImagesListNode = pe.Node(
        Function(function=FlattenTransformAndImagesList,
                 input_names=[
                     'ListOfPassiveImagesDictionaries', 'transforms',
                     'invert_transform_flags', 'interpolationMapping'
                 ],
                 output_names=[
                     'flattened_images', 'flattened_transforms',
                     'flattened_invert_transform_flags',
                     'flattened_image_nametypes',
                     'flattened_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_FlattenTransformAndImagesList")

    GetPassiveImagesNode = pe.Node(interface=util.Function(
        function=GetPassiveImages,
        input_names=['ListOfImagesDictionaries', 'registrationImageTypes'],
        output_names=['ListOfPassiveImagesDictionaries']),
                                   run_without_submitting=True,
                                   name='99_GetPassiveImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetPassiveImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetPassiveImagesNode,
                                           'registrationImageTypes')

    TemplateBuildSingleIterationWF.connect(GetPassiveImagesNode,
                                           'ListOfPassiveImagesDictionaries',
                                           FlattenTransformAndImagesListNode,
                                           'ListOfPassiveImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           FlattenTransformAndImagesListNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',
                                           FlattenTransformAndImagesListNode,
                                           'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags',
                                           FlattenTransformAndImagesListNode,
                                           'invert_transform_flags')
    wimtPassivedeformed = pe.MapNode(interface=ApplyTransforms(),
                                     iterfield=[
                                         'transforms',
                                         'invert_transform_flags',
                                         'input_image', 'interpolation'
                                     ],
                                     name='wimtPassivedeformed')
    wimtPassivedeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           wimtPassivedeformed,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_interpolation_type',
                                           wimtPassivedeformed,
                                           'interpolation')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_images',
                                           wimtPassivedeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_transforms',
                                           wimtPassivedeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_invert_transform_flags',
                                           wimtPassivedeformed,
                                           'invert_transform_flags')

    RenestDeformedPassiveImagesNode = pe.Node(
        Function(function=RenestDeformedPassiveImages,
                 input_names=[
                     'deformedPassiveImages', 'flattened_image_nametypes',
                     'interpolationMapping'
                 ],
                 output_names=[
                     'nested_imagetype_list', 'outputAverageImageName_list',
                     'image_type_list', 'nested_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_RenestDeformedPassiveImages")
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           RenestDeformedPassiveImagesNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image',
                                           RenestDeformedPassiveImagesNode,
                                           'deformedPassiveImages')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_image_nametypes',
                                           RenestDeformedPassiveImagesNode,
                                           'flattened_image_nametypes')
    ## Now  Average All passive input_images deformed images together to create an updated template average
    AvgDeformedPassiveImages = pe.MapNode(
        interface=AverageImages(),
        iterfield=['images', 'output_average_image'],
        name='AvgDeformedPassiveImages')
    AvgDeformedPassiveImages.inputs.dimension = 3
    AvgDeformedPassiveImages.inputs.normalize = False
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "nested_imagetype_list",
                                           AvgDeformedPassiveImages, 'images')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "outputAverageImageName_list",
                                           AvgDeformedPassiveImages,
                                           'output_average_image')

    ## -- TODO:  Now neeed to reshape all the passive images as well
    ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=[
            'input_image', 'reference_image', 'output_image', 'interpolation'
        ],
        name='ReshapeAveragePassiveImageWithShapeUpdate')
    ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'nested_interpolation_type',
        ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation')
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'outputAverageImageName_list',
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps',
        ReshapeAveragePassiveImageWithShapeUpdate, 'transforms')
    TemplateBuildSingleIterationWF.connect(
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec,
        'passive_deformed_templates')

    return TemplateBuildSingleIterationWF
예제 #16
0
def anat2mni_ants_workflow_nipype(SinkTag="anat_preproc",
                                  wf_name="anat2mni_ants"):
    """
    Register skull and brain extracted image to MNI space and return the transformation martices.
    Using ANTS, doing it in the nipype way.

    Workflow inputs:
        :param skull: The reoriented anatomical file.
        :param brain: The brain extracted anat.
        :param ref_skull: MNI152 skull file.
        :param ref_brain: MNI152 brain file.
        :param SinkDir:
        :param SinkTag: The output directiry in which the returned images (see workflow outputs) could be found.

    Workflow outputs:




        :return: anat2mni_workflow - workflow


        anat="/home/balint/Dokumentumok/phd/essen/PAINTER/probe/MS001/highres.nii.gz",
                      brain="/home/balint/Dokumentumok/phd/essen/PAINTER/probe/MS001/highres_brain.nii.gz",


    Tamas Spisak
    [email protected]
    2018


    """
    SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
    if not os.path.exists(SinkDir):
        os.makedirs(SinkDir)

    # Define inputs of workflow
    inputspec = pe.Node(utility.IdentityInterface(
        fields=['brain', 'skull', 'reference_brain', 'reference_skull']),
                        name='inputspec')

    inputspec.inputs.reference_brain = globals._FSLDIR_ + globals._brainref  #TODO_ready: 1 or 2mm???
    inputspec.inputs.reference_skull = globals._FSLDIR_ + globals._headref

    # Multi-stage registration node with ANTS
    reg = pe.MapNode(
        interface=Registration(),
        iterfield=['moving_image'],  # 'moving_image_mask'],
        name="ANTS")
    """
    reg.inputs.transforms = ['Affine', 'SyN']
    reg.inputs.transform_parameters = [(2.0,), (0.1, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.initialize_transforms_per_stage = False
    reg.inputs.metric = ['Mattes', 'Mattes']
    reg.inputs.metric_weight = [1] * 2  # Default (value ignored currently by ANTs)
    reg.inputs.radius_or_number_of_bins = [32] * 2
    reg.inputs.sampling_strategy = ['Random', None]
    reg.inputs.sampling_percentage = [0.05, None]
    reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
    reg.inputs.convergence_window_size = [20] * 2
    reg.inputs.smoothing_sigmas = [[1, 0], [2, 1, 0]]
    reg.inputs.sigma_units = ['vox'] * 2
    reg.inputs.shrink_factors = [[2, 1], [4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True, True]
    reg.inputs.use_histogram_matching = [True, True]  # This is the default
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.winsorize_lower_quantile = 0.01
    reg.inputs.winsorize_upper_quantile = 0.99
    """

    #satra says:
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 2 +
                                       [[100, 50, 30]])
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.args = '--float'

    # Create png images for quality check
    myqc = qc.vol2png("anat2mni", "ANTS3", overlayiterated=False)
    myqc.inputs.inputspec.overlay_image = globals._FSLDIR_ + globals._brainref  #TODO_ready: 1 or 2mm???
    myqc.inputs.slicer.image_width = 500  # 5000 # for the 1mm template
    myqc.inputs.slicer.threshold_edges = 0.1  # 0.1  # for the 1mm template

    # Save outputs which are important
    ds = pe.Node(interface=io.DataSink(), name='ds_nii')
    ds.inputs.base_directory = SinkDir
    ds.inputs.regexp_substitutions = [("(\/)[^\/]*$", ".nii.gz")]

    # Define outputs of the workflow
    outputspec = pe.Node(utility.IdentityInterface(fields=[
        'output_brain', 'linear_xfm', 'invlinear_xfm', 'nonlinear_xfm',
        'invnonlinear_xfm', 'std_template'
    ]),
                         name='outputspec')

    outputspec.inputs.std_template = inputspec.inputs.reference_brain

    # Create workflow nad connect nodes
    analysisflow = pe.Workflow(name=wf_name)

    analysisflow.connect(inputspec, 'reference_skull', reg, 'fixed_image')
    #analysisflow.connect(inputspec, 'reference_brain', reg, 'fixed_image_mask')
    analysisflow.connect(inputspec, 'skull', reg, 'moving_image')
    #analysisflow.connect(inputspec, 'brain', reg, 'moving_image_mask')

    analysisflow.connect(reg, 'composite_transform', outputspec,
                         'nonlinear_xfm')
    analysisflow.connect(reg, 'inverse_composite_transform', outputspec,
                         'invnonlinear_xfm')
    analysisflow.connect(reg, 'warped_image', outputspec, 'output_brain')
    analysisflow.connect(reg, 'warped_image', ds, 'anat2mni_std')
    analysisflow.connect(reg, 'composite_transform', ds, 'anat2mni_warpfield')
    analysisflow.connect(reg, 'warped_image', myqc, 'inputspec.bg_image')

    return analysisflow
예제 #17
0
def BAWantsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''):
    """

    Inputs::

           inputspec.images :
           inputspec.fixed_image :
           inputspec.ListOfPassiveImagesDictionaries :
           inputspec.interpolationMapping :

    Outputs::

           outputspec.template :
           outputspec.transforms_list :
           outputspec.passive_deformed_templates :
    """
    TemplateBuildSingleIterationWF = pe.Workflow(
        name='antsRegistrationTemplateBuildSingleIterationWF_' +
        str(iterationPhasePrefix))

    inputSpec = pe.Node(
        interface=util.IdentityInterface(fields=[
            'ListOfImagesDictionaries',
            'registrationImageTypes',
            #'maskRegistrationImageType',
            'interpolationMapping',
            'fixed_image'
        ]),
        run_without_submitting=True,
        name='inputspec')
    ## HACK: TODO: We need to have the AVG_AIR.nii.gz be warped with a default voxel value of 1.0
    ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that
    ##             they do not change due to re-indenting.  Otherwise re-indenting for flow control will trigger
    ##             their hash to change.
    ## HACK: TODO: REMOVE 'transforms_list' it is not used.  That will change all the hashes
    ## HACK: TODO: Need to run all python files through the code beutifiers.  It has gotten pretty ugly.
    outputSpec = pe.Node(interface=util.IdentityInterface(
        fields=['template', 'transforms_list', 'passive_deformed_templates']),
                         run_without_submitting=True,
                         name='outputspec')

    ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template
    BeginANTS = pe.MapNode(interface=Registration(),
                           name='BeginANTS',
                           iterfield=['moving_image'])
    BeginANTS.inputs.dimension = 3
    """ This is the recommended set of parameters from the ANTS developers """
    BeginANTS.inputs.output_transform_prefix = str(
        iterationPhasePrefix) + '_tfm'
    BeginANTS.inputs.transforms = ["Rigid", "Affine", "SyN", "SyN", "SyN"]
    BeginANTS.inputs.transform_parameters = [[0.1], [0.1], [0.1, 3.0, 0.0],
                                             [0.1, 3.0, 0.0], [0.1, 3.0, 0.0]]
    BeginANTS.inputs.metric = ['MI', 'MI', 'CC', 'CC', 'CC']
    BeginANTS.inputs.sampling_strategy = [
        'Regular', 'Regular', None, None, None
    ]
    BeginANTS.inputs.sampling_percentage = [0.27, 0.27, 1.0, 1.0, 1.0]
    BeginANTS.inputs.metric_weight = [1.0, 1.0, 1.0, 1.0, 1.0]
    BeginANTS.inputs.radius_or_number_of_bins = [32, 32, 4, 4, 4]
    BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000, 1000],
                                             [1000, 1000, 1000, 1000],
                                             [1000, 250], [140], [25]]
    BeginANTS.inputs.convergence_threshold = [5e-8, 5e-8, 5e-7, 5e-6, 5e-5]
    BeginANTS.inputs.convergence_window_size = [10, 10, 10, 10, 10]
    BeginANTS.inputs.use_histogram_matching = [True, True, True, True, True]
    BeginANTS.inputs.shrink_factors = [[8, 4, 2, 1], [8, 4, 2, 1], [8, 4], [2],
                                       [1]]
    BeginANTS.inputs.smoothing_sigmas = [[3, 2, 1, 0], [3, 2, 1, 0], [3, 2],
                                         [1], [0]]
    BeginANTS.inputs.sigma_units = ["vox", "vox", "vox", "vox", "vox"]
    BeginANTS.inputs.use_estimate_learning_rate_once = [
        False, False, False, False, False
    ]
    BeginANTS.inputs.write_composite_transform = True
    BeginANTS.inputs.collapse_output_transforms = False
    BeginANTS.inputs.initialize_transforms_per_stage = True
    BeginANTS.inputs.winsorize_lower_quantile = 0.01
    BeginANTS.inputs.winsorize_upper_quantile = 0.99
    BeginANTS.inputs.output_warped_image = 'atlas2subject.nii.gz'
    BeginANTS.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'
    BeginANTS.inputs.save_state = 'SavedBeginANTSSyNState.h5'
    BeginANTS.inputs.float = True

    GetMovingImagesNode = pe.Node(interface=util.Function(
        function=GetMovingImages,
        input_names=[
            'ListOfImagesDictionaries', 'registrationImageTypes',
            'interpolationMapping'
        ],
        output_names=['moving_images', 'moving_interpolation_type']),
                                  run_without_submitting=True,
                                  name='99_GetMovingImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetMovingImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetMovingImagesNode,
                                           'registrationImageTypes')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           GetMovingImagesNode,
                                           'interpolationMapping')

    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', BeginANTS,
                                           'moving_image')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_interpolation_type',
                                           BeginANTS, 'interpolation')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS,
                                           'fixed_image')

    ## Now warp all the input_images images
    wimtdeformed = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=['transforms', 'input_image'],
        #iterfield=['transforms', 'invert_transform_flags', 'input_image'],
        name='wimtdeformed')
    wimtdeformed.inputs.interpolation = 'Linear'
    wimtdeformed.default_value = 0
    # HACK: Should try using forward_composite_transform
    ##PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transform', wimtdeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'composite_transform',
                                           wimtdeformed, 'transforms')
    ##PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', wimtdeformed, 'invert_transform_flags')
    ## NOTE: forward_invert_flags:: List of flags corresponding to the forward transforms
    #wimtdeformed.inputs.invert_transform_flags = [False,False,False,False,False]
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', wimtdeformed,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image',
                                           wimtdeformed, 'reference_image')

    ##  Shape Update Next =====
    ## Now  Average All input_images deformed images together to create an updated template average
    AvgDeformedImages = pe.Node(interface=AverageImages(),
                                name='AvgDeformedImages')
    AvgDeformedImages.inputs.dimension = 3
    AvgDeformedImages.inputs.output_average_image = str(
        iterationPhasePrefix) + '.nii.gz'
    AvgDeformedImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image",
                                           AvgDeformedImages, 'images')

    ## Now average all affine transforms together
    AvgAffineTransform = pe.Node(interface=AverageAffineTransform(),
                                 name='AvgAffineTransform')
    AvgAffineTransform.inputs.dimension = 3
    AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str(
        iterationPhasePrefix) + '_Affine.h5'

    SplitCompositeTransform = pe.MapNode(
        interface=util.Function(
            function=SplitCompositeToComponentTransforms,
            input_names=['composite_transform_as_list'],
            output_names=['affine_component_list', 'warp_component_list']),
        iterfield=['composite_transform_as_list'],
        run_without_submitting=True,
        name='99_SplitCompositeTransform')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'composite_transform',
                                           SplitCompositeTransform,
                                           'composite_transform_as_list')
    ## PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', SplitCompositeTransform, 'composite_transform_as_list')
    TemplateBuildSingleIterationWF.connect(SplitCompositeTransform,
                                           'affine_component_list',
                                           AvgAffineTransform, 'transforms')

    ## Now average the warp fields togther
    AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages')
    AvgWarpImages.inputs.dimension = 3
    AvgWarpImages.inputs.output_average_image = str(
        iterationPhasePrefix) + 'warp.nii.gz'
    AvgWarpImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(SplitCompositeTransform,
                                           'warp_component_list',
                                           AvgWarpImages, 'images')

    ## Now average the images together
    ## TODO:  For now GradientStep is set to 0.25 as a hard coded default value.
    GradientStep = 0.25
    GradientStepWarpImage = pe.Node(interface=MultiplyImages(),
                                    name='GradientStepWarpImage')
    GradientStepWarpImage.inputs.dimension = 3
    GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep
    GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str(
        iterationPhasePrefix) + '_warp.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgWarpImages,
                                           'output_average_image',
                                           GradientStepWarpImage,
                                           'first_input')

    ## Now create the new template shape based on the average of all deformed images
    UpdateTemplateShape = pe.Node(interface=ApplyTransforms(),
                                  name='UpdateTemplateShape')
    UpdateTemplateShape.inputs.invert_transform_flags = [True]
    UpdateTemplateShape.inputs.interpolation = 'Linear'
    UpdateTemplateShape.default_value = 0

    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           UpdateTemplateShape,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect([
        (AvgAffineTransform, UpdateTemplateShape,
         [(('affine_transform', makeListOfOneElement), 'transforms')]),
    ])
    TemplateBuildSingleIterationWF.connect(GradientStepWarpImage,
                                           'output_product_image',
                                           UpdateTemplateShape, 'input_image')

    ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(
        interface=util.Function(
            function=MakeTransformListWithGradientWarps,
            input_names=['averageAffineTranform', 'gradientStepWarp'],
            output_names=['TransformListWithGradientWarps']),
        run_without_submitting=True,
        name='99_MakeTransformListWithGradientWarps')
    ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True

    TemplateBuildSingleIterationWF.connect(
        AvgAffineTransform, 'affine_transform',
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'averageAffineTranform')
    TemplateBuildSingleIterationWF.connect(
        UpdateTemplateShape, 'output_image',
        ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp')

    ReshapeAverageImageWithShapeUpdate = pe.Node(
        interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate')
    ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear'
    ReshapeAverageImageWithShapeUpdate.default_value = 0
    ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate,
        'transforms')
    TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate,
                                           'output_image', outputSpec,
                                           'template')

    ######
    ######
    ######  Process all the passive deformed images in a way similar to the main image used for registration
    ######
    ######
    ######
    ##############################################
    ## Now warp all the ListOfPassiveImagesDictionaries images
    FlattenTransformAndImagesListNode = pe.Node(
        Function(function=FlattenTransformAndImagesList,
                 input_names=[
                     'ListOfPassiveImagesDictionaries', 'transforms',
                     'interpolationMapping', 'invert_transform_flags'
                 ],
                 output_names=[
                     'flattened_images', 'flattened_transforms',
                     'flattened_invert_transform_flags',
                     'flattened_image_nametypes',
                     'flattened_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_FlattenTransformAndImagesList")

    GetPassiveImagesNode = pe.Node(interface=util.Function(
        function=GetPassiveImages,
        input_names=['ListOfImagesDictionaries', 'registrationImageTypes'],
        output_names=['ListOfPassiveImagesDictionaries']),
                                   run_without_submitting=True,
                                   name='99_GetPassiveImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetPassiveImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetPassiveImagesNode,
                                           'registrationImageTypes')

    TemplateBuildSingleIterationWF.connect(GetPassiveImagesNode,
                                           'ListOfPassiveImagesDictionaries',
                                           FlattenTransformAndImagesListNode,
                                           'ListOfPassiveImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           FlattenTransformAndImagesListNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'composite_transform',
                                           FlattenTransformAndImagesListNode,
                                           'transforms')
    ## FlattenTransformAndImagesListNode.inputs.invert_transform_flags = [False,False,False,False,False,False]
    ## TODO: Please check of invert_transform_flags has a fixed number.
    ## PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', FlattenTransformAndImagesListNode, 'invert_transform_flags')
    wimtPassivedeformed = pe.MapNode(interface=ApplyTransforms(),
                                     iterfield=[
                                         'transforms',
                                         'invert_transform_flags',
                                         'input_image', 'interpolation'
                                     ],
                                     name='wimtPassivedeformed')
    wimtPassivedeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           wimtPassivedeformed,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_interpolation_type',
                                           wimtPassivedeformed,
                                           'interpolation')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_images',
                                           wimtPassivedeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_transforms',
                                           wimtPassivedeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_invert_transform_flags',
                                           wimtPassivedeformed,
                                           'invert_transform_flags')

    RenestDeformedPassiveImagesNode = pe.Node(
        Function(function=RenestDeformedPassiveImages,
                 input_names=[
                     'deformedPassiveImages', 'flattened_image_nametypes',
                     'interpolationMapping'
                 ],
                 output_names=[
                     'nested_imagetype_list', 'outputAverageImageName_list',
                     'image_type_list', 'nested_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_RenestDeformedPassiveImages")
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           RenestDeformedPassiveImagesNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image',
                                           RenestDeformedPassiveImagesNode,
                                           'deformedPassiveImages')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_image_nametypes',
                                           RenestDeformedPassiveImagesNode,
                                           'flattened_image_nametypes')
    ## Now  Average All passive input_images deformed images together to create an updated template average
    AvgDeformedPassiveImages = pe.MapNode(
        interface=AverageImages(),
        iterfield=['images', 'output_average_image'],
        name='AvgDeformedPassiveImages')
    AvgDeformedPassiveImages.inputs.dimension = 3
    AvgDeformedPassiveImages.inputs.normalize = False
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "nested_imagetype_list",
                                           AvgDeformedPassiveImages, 'images')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "outputAverageImageName_list",
                                           AvgDeformedPassiveImages,
                                           'output_average_image')

    ## -- TODO:  Now neeed to reshape all the passive images as well
    ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=[
            'input_image', 'reference_image', 'output_image', 'interpolation'
        ],
        name='ReshapeAveragePassiveImageWithShapeUpdate')
    ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'nested_interpolation_type',
        ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation')
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'outputAverageImageName_list',
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps',
        ReshapeAveragePassiveImageWithShapeUpdate, 'transforms')
    TemplateBuildSingleIterationWF.connect(
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec,
        'passive_deformed_templates')

    return TemplateBuildSingleIterationWF
예제 #18
0
    def distortion_correction_workflow(self):
        # The initial script created by Vinit Srivastava was:
        # antsIntermodalityIntrasubject.sh -d 3 -i eddy_corr_brain_b0.nii.gz -r
        # T1-nonGdE_brain_N4bfc_masked.nii.gz -x T1-nonGdE_brain_mask.nii.gz -w
        # template -o B0toT1SmallWarp -t 2
        #
        # Note: the script antsIntermodalityIntrasubject.sh returns an error regarding a missing template file:
        #  template1Warp.nii.gz does not exist - please specify in order to proceed to steps that map to the template
        # This is expected and means that the second half of the script is not executed nor necessary for this step.
        # https://github.com/ANTsX/ANTs/blob/master/Scripts/antsIntermodalityIntrasubject.sh
        #
        # Additionally, the anatomical T1 brain mask is used in the second part of the script and is not useful in our
        # case.
        #
        # The ants interface from nipype doesn't wrap the antsIntermodalityIntrasubject.sh script
        #
        # antsIntermodalityIntrasubject.sh Script breakdown:
        # Usage: `basename $0`
        #        -d imageDimension
        #        -r anatomicalT1image(brain or whole - head, depending on modality) to align to
        #        -R anatomicalReference image to warp to(often higher resolution thananatomicalT1image)
        #        -i scalarImageToMatch(such as avgerage bold, averge dwi, etc.)
        #        -x anatomicalT1brainmask(should mask out regions that do not appear in scalarImageToMatch)
        #        -t transformType(0 = rigid, 1 = affine, 2 = rigid + small_def, 3 = affine + small_def)
        #        -w prefix of T1 to template transform
        #        -T template space
        #        < OPTARGS >
        #        -o outputPrefix
        #        -l labels in template space
        #        -a auxiliary scalar image/s to warp to template
        #        -b auxiliary dt image to warp to template

        # Initial command runs:
        #       /opt/ants-2.3.1/antsRegistration -d 3 -m MI[anatomicalImage(-r), scalarImage(-i),1,32,Regular,0.25]
        #        -c [1000x500x250x0,1e-7,5] -t Rigid[0.1] -f 8x4x2x1 -s 4x2x1x0
        #        -u 1 -m mattes[anatomicalImage(-r), scalarImage(-i),1,32] -c [50x50x0,1e-7,5] -t SyN[0.1,3,0] -f 4x2x1
        #        -s 2x1x0mm -u 1 -z 1 --winsorize-image-intensities [0.005, 0.995] -o B0toT1Warp
        # -d: dimensionality
        # -m: metric
        #       "MI[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]" );
        #       "Mattes[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]" );
        # -c: convergence
        #       "MxNxO"
        #       "[MxNxO,<convergenceThreshold=1e-6>,<convergenceWindowSize=10>]"
        # -t: transform
        #       0:rigid[GradientStep], 1:affine[], 2:composite affine[], 3:similarity[], 4:translation[], 5:BSpline[]
        #       "SyN[gradientStep,<updateFieldVarianceInVoxelSpace=3>,<totalFieldVarianceInVoxelSpace=0>]"
        # -f: shrink-factors
        #       "MxNxO..."
        # -s: smoothing-sigmas
        #       "MxNxO..."
        # -u: use-histogram-matching
        # -z: collapse-output-transforms
        # -o: output transform prefix

        b0_T1w_Reg = Node(Registration(), name="b0_T1w_Reg")
        b0_T1w_Reg.btn_string = 'dwi b0 to T1w Registration'
        # -r, -i, -x will get set via workflow implementation
        # -d
        b0_T1w_Reg.inputs.dimension = 3
        # -m
        b0_T1w_Reg.inputs.metric = ['MI', 'Mattes']
        b0_T1w_Reg.inputs.metric_weight = [1, 1]
        b0_T1w_Reg.inputs.radius_or_number_of_bins = [32, 32]
        b0_T1w_Reg.inputs.sampling_strategy = ['Regular', None]
        b0_T1w_Reg.inputs.sampling_percentage = [0.25, None]
        # -c
        b0_T1w_Reg.inputs.number_of_iterations = [[1000, 500, 250, 0],
                                                  [50, 50, 0]]
        b0_T1w_Reg.inputs.convergence_threshold = [1e-7, 1e-7]
        b0_T1w_Reg.inputs.convergence_window_size = [5, 5]
        # -t
        b0_T1w_Reg.inputs.transforms = ['Rigid', 'SyN']
        b0_T1w_Reg.inputs.transform_parameters = [(0.1, ), (0.1, 3, 0.0)]
        # -f
        b0_T1w_Reg.inputs.shrink_factors = [[8, 4, 2, 1], [4, 2, 1]]
        # -s
        b0_T1w_Reg.inputs.smoothing_sigmas = [[4, 2, 1, 0], [2, 1, 0]]
        b0_T1w_Reg.inputs.sigma_units = ['vox', 'mm']
        # -u
        b0_T1w_Reg.inputs.use_histogram_matching = [True, True]
        # -z
        b0_T1w_Reg.inputs.collapse_output_transforms = True
        # winsorize
        b0_T1w_Reg.inputs.winsorize_lower_quantile = 0.005
        b0_T1w_Reg.inputs.winsorize_upper_quantile = 0.995
        # -o
        b0_T1w_Reg.inputs.output_transform_prefix = 'dwiToT1Warp'

        # Since the antsApplyTransform interface in nipype only accepts the transform list in the reverse order (i.e.
        # the output from the antsRegistration script needs to be flipped) we save the transform files as a single
        # composite file.
        b0_T1w_Reg.inputs.write_composite_transform = True
        self.interfaces.append(b0_T1w_Reg)

        # Althought the antsRegistration interface can output a warped image, we keep the antsApplyTransform node to
        # replicate the original (i.e. not nipype) pipeline and to add the input_image_type parameter.\

        # second script: antsApplyTransforms
        # antsApplyTransforms -d 3 -e 3 -i data.nii.gz -o data_distcorr.nii.gz -r
        # eddy_corr_brain_b0.nii.gz -t B0toT1SmallWarp1Warp.nii.gz -t
        # B0toT1SmallWarp0GenericAffine.mat -v
        dwi_T1w_Tran = Node(ApplyTransforms(), name="dwi_T1w_Tran")
        dwi_T1w_Tran.btn_string = 'dwi to T1w Transformation'
        # -d: dimension
        dwi_T1w_Tran.inputs.dimension = 3
        # -e: input image type
        dwi_T1w_Tran.inputs.input_image_type = 3
        # the -i, -o, -r, -t options are from workflow
        self.interfaces.append(dwi_T1w_Tran)
def prep_for_fmriprep(bidsdir, rawdir, substr):
    #make subject dir, anat and func
    subid = substr.replace('-', '_').replace('_', '')
    anatdir = bidsdir + '/sub-' + subid + '/anat/'
    funcdir = bidsdir + '/sub-' + subid + '/func/'
    os.makedirs(anatdir, exist_ok=True)
    os.makedirs(funcdir, exist_ok=True)

    # get t1brain and MNI template
    t1brain = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/highres.nii.gz' % substr
    template = str(
        get_template('MNI152NLin2009cAsym',
                     resolution=2,
                     desc='brain',
                     suffix='T1w',
                     extension=['.nii', '.nii.gz']))

    ## registered T1w to template for fmriprep standard
    ### this reg files may not be used

    tranformfile = tempfile.mkdtemp()
    reg = Registration()
    reg.inputs.fixed_image = template
    reg.inputs.moving_image = t1brain
    reg.inputs.output_transform_prefix = tranformfile + '/t12mni_'
    reg.inputs.transforms = ['Affine', 'SyN']
    reg.inputs.transform_parameters = [(2.0, ), (0.25, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
    reg.inputs.dimension = 3
    reg.inputs.num_threads = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = False
    reg.inputs.initialize_transforms_per_stage = True
    reg.inputs.metric = ['Mattes'] * 2
    reg.inputs.metric_weight = [
        1
    ] * 2  # Default (value ignored currently by ANTs)
    reg.inputs.radius_or_number_of_bins = [32] * 2
    reg.inputs.sampling_strategy = ['Random', None]
    reg.inputs.sampling_percentage = [0.05, None]
    reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
    reg.inputs.convergence_window_size = [20] * 2
    reg.inputs.smoothing_sigmas = [[1, 0], [2, 1, 0]]
    reg.inputs.sigma_units = ['vox'] * 2
    reg.inputs.shrink_factors = [[2, 1], [3, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True, True]
    reg.inputs.use_histogram_matching = [True, True]  # This is the default
    #reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.cmdline
    reg.run()

    ## copy transform file to fmriprep directory
    mni2twtransform = anatdir + '/sub-' + subid + '_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5'
    t1w2mnitransform = anatdir + '/sub-' + subid + '_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5'
    copyfile(tranformfile + '/t12mni_Composite.h5', t1w2mnitransform)
    copyfile(tranformfile + '/t12mni_InverseComposite.h5', mni2twtransform)

    ### warp the non-processed/filtered/smooth bold to fmriprep

    ### now functional

    boldmask = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/mask.nii.gz' % substr
    boldref = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI_SBREF.nii.gz' % substr
    boldprep = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.nii.gz' % substr

    reffile = tempfile.mkdtemp() + '/reffile.nii.gz'
    boldstd = reffile = tempfile.mkdtemp() + '/boldstd.nii.gz'
    maskstd = reffile = tempfile.mkdtemp() + '/maskstd.nii.gz'
    aw = fsl.ApplyWarp()
    aw.inputs.in_file = boldref
    aw.inputs.ref_file = template
    aw.inputs.field_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/example_func2standard_warp.nii.gz' % substr
    aw.inputs.out_file = reffile
    aw.inputs.output_type = 'NIFTI_GZ'
    res = aw.run()

    aw1 = fsl.ApplyWarp()
    aw1.inputs.interp = 'spline'
    aw1.inputs.ref_file = template
    aw1.inputs.field_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/example_func2standard_warp.nii.gz' % substr
    aw1.inputs.in_file = boldprep
    aw1.inputs.out_file = boldstd
    aw1.inputs.output_type = 'NIFTI_GZ'
    res1 = aw1.run()

    aw2 = fsl.ApplyWarp()
    aw2.inputs.in_file = boldmask
    aw2.inputs.ref_file = template
    aw2.inputs.field_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/example_func2standard_warp.nii.gz' % substr
    aw2.inputs.out_file = maskstd
    aw2.inputs.output_type = 'NIFTI_GZ'
    res2 = aw2.run()

    tr = nb.load(boldprep).header.get_zooms()[-1]

    jsontis = {
        "RepetitionTime": np.float64(tr),
        "TaskName": 'rest',
        "SkullStripped": False,
    }

    jsmaks = {"mask": True}

    #newname
    preprocbold = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
    preprocboldjson = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold.json'
    preprocboldref = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_boldref.nii.gz'
    preprocmask = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
    preprocmaskjson = funcdir + '/sub-' + subid + '_task-rest_space-MNI152NLin2009cAsym_desc-brain_mask.json'

    copyfile(maskstd, preprocmask)
    copyfile(reffile, preprocboldref)
    copyfile(boldstd, preprocbold)
    writejson(jsontis, preprocboldjson)
    writejson(jsmaks, preprocmaskjson)

    # get wm and csf mask to extract mean signals for regressors
    ### first warp the anatomical to bold space
    wmask = rawdir + '/UKB_Pipeline/%s/T1/T1_fast/T1_brain_pve_2.nii.gz' % substr
    csfmask = rawdir + '/UKB_Pipeline/%s/T1/T1_fast/T1_brain_pve_0.nii.gz' % substr

    t2funcwmask = tempfile.mkdtemp() + '/wmask.nii.gz'
    t2funcwcsf = tempfile.mkdtemp() + '/csf.nii.gz'

    aw = fsl.preprocess.ApplyXFM()
    aw.inputs.in_file = wmask
    aw.inputs.reference = boldref
    aw.inputs.in_matrix_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/highres2example_func.mat' % substr
    aw.inputs.out_file = t2funcwmask
    aw.inputs.apply_xfm = True
    aw.inputs.interp = 'nearestneighbour'
    aw.inputs.output_type = 'NIFTI_GZ'
    res = aw.run()

    aw2 = fsl.preprocess.ApplyXFM()
    aw2.inputs.in_file = csfmask
    aw2.inputs.reference = boldref
    aw2.inputs.in_matrix_file = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/reg/highres2example_func.mat' % substr
    aw2.inputs.out_file = t2funcwcsf
    aw2.inputs.apply_xfm = True
    aw2.inputs.interp = 'nearestneighbour'
    aw2.inputs.output_type = 'NIFTI_GZ'
    res2 = aw2.run()

    # binarized and extract signals
    wmbin = nb.load(t2funcwmask).get_fdata()
    wmbin[wmbin < 0.99999] = 0

    csfbin = nb.load(t2funcwcsf).get_fdata()
    csfbin[csfbin < 0.99999] = 0

    maskbin = nb.load(boldmask).get_fdata()

    bolddata = nb.load(boldprep).get_fdata()
    wm_mean = bolddata[wmbin > 0, :].mean(axis=0)
    csf_mean = bolddata[csfbin > 0, :].mean(axis=0)
    global_mean = bolddata[maskbin > 0, :].mean(axis=0)

    #### combine all the regressors

    mcfile = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/mc/prefiltered_func_data_mcf.par' % substr
    rsmdfile = rawdir + '/UKB_Pipeline/%s/fMRI_nosmooth/rfMRI.ica/mc/prefiltered_func_data_mcf_abs.rms' % substr
    motionfile = np.loadtxt(mcfile)

    rsmd = np.loadtxt(rsmdfile)
    motionparam = pd.DataFrame(
        motionfile,
        columns=['rot_x', 'rot_y', 'rot_z', 'trans_x', 'trans_y', 'trans_z'])

    otherparam = pd.DataFrame({
        'global_signal': global_mean,
        'white_matter': wm_mean,
        'csf': csf_mean,
        'rmsd': rsmd
    })

    regressors = pd.concat([motionparam, otherparam], axis=1)
    jsonreg = {'regressor': 'not'}
    regcsv = funcdir + '/sub-' + subid + '_task-rest_desc-confounds_timeseries.tsv'
    regjson = funcdir + '/sub-' + subid + '_task-rest_desc-confounds_timeseries.json'

    regressors.to_csv(regcsv, index=None, sep='\t')
    writejson(jsonreg, regjson)
예제 #20
0
def mk_w_angio(freesurfer_dir, angiogram, out_dir):

    n_input = Node(IdentityInterface(fields=[
        'fs_dir',
        'fs_subj',
        'angiogram',
        'out_dir',
    ]),
                   name='input')

    n_input.inputs.fs_dir = str(freesurfer_dir.parent)
    n_input.inputs.fs_subj = freesurfer_dir.name
    n_input.inputs.angiogram = str(angiogram)
    n_input.inputs.out_dir = str(out_dir)

    n_coreg = Node(Registration(), name='antsReg')
    n_coreg.inputs.num_threads = 40
    n_coreg.inputs.use_histogram_matching = False
    n_coreg.inputs.dimension = 3
    n_coreg.inputs.winsorize_lower_quantile = 0.001
    n_coreg.inputs.winsorize_upper_quantile = 0.999
    n_coreg.inputs.float = True
    n_coreg.inputs.interpolation = 'Linear'
    n_coreg.inputs.transforms = [
        'Rigid',
    ]
    n_coreg.inputs.transform_parameters = [
        [
            0.1,
        ],
    ]
    n_coreg.inputs.metric = [
        'MI',
    ]
    n_coreg.inputs.metric_weight = [
        1,
    ]
    n_coreg.inputs.radius_or_number_of_bins = [
        32,
    ]
    n_coreg.inputs.sampling_strategy = [
        'Regular',
    ]
    n_coreg.inputs.sampling_percentage = [
        0.5,
    ]
    n_coreg.inputs.sigma_units = [
        'mm',
    ]
    n_coreg.inputs.convergence_threshold = [
        1e-6,
    ]
    n_coreg.inputs.smoothing_sigmas = [
        [1, 0],
    ]
    n_coreg.inputs.shrink_factors = [
        [1, 1],
    ]
    n_coreg.inputs.convergence_window_size = [
        10,
    ]
    n_coreg.inputs.number_of_iterations = [
        [250, 100],
    ]
    n_coreg.inputs.output_warped_image = True
    n_coreg.inputs.output_inverse_warped_image = True
    n_coreg.inputs.output_transform_prefix = 'angio_to_struct'

    n_apply = Node(ApplyTransforms(), name='ants_apply')
    n_apply.inputs.dimension = 3
    n_apply.inputs.interpolation = 'Linear'
    n_apply.inputs.default_value = 0

    n_convert = Node(MRIConvert(), 'convert')
    n_convert.inputs.out_type = 'niigz'

    n_binarize = Node(Threshold(), 'make_mask')
    n_binarize.inputs.thresh = .1
    n_binarize.inputs.args = '-bin'

    n_mask = Node(BinaryMaths(), 'mask')
    n_mask.inputs.operation = 'mul'

    n_veins = Node(Rename(), 'rename_veins')
    n_veins.inputs.format_string = 'angiogram.nii.gz'

    n_sink = Node(DataSink(), 'sink')
    n_sink.inputs.base_directory = '/Fridge/users/giovanni/projects/intraop/loenen/angiogram'
    n_sink.inputs.remove_dest_dir = True

    fs = Node(FreeSurferSource(), 'freesurfer')

    n_split = Node(Split(), 'split_pca')
    n_split.inputs.dimension = 't'

    w = Workflow('tmp_angiogram')
    w.base_dir = str(out_dir)

    w.connect(n_input, 'fs_dir', fs, 'subjects_dir')
    w.connect(n_input, 'fs_subj', fs, 'subject_id')
    w.connect(n_input, 'angiogram', n_split, 'in_file')
    w.connect(n_split, ('out_files', select_file, 0), n_coreg, 'moving_image')
    w.connect(fs, 'T1', n_coreg, 'fixed_image')

    w.connect(n_coreg, 'forward_transforms', n_apply, 'transforms')
    w.connect(n_split, ('out_files', select_file, 1), n_apply, 'input_image')
    w.connect(fs, 'T1', n_apply, 'reference_image')
    w.connect(fs, 'brain', n_convert, 'in_file')
    w.connect(n_convert, 'out_file', n_binarize, 'in_file')
    w.connect(n_apply, 'output_image', n_mask, 'in_file')
    w.connect(n_binarize, 'out_file', n_mask, 'operand_file')
    w.connect(n_mask, 'out_file', n_veins, 'in_file')
    w.connect(n_input, 'out_dir', n_sink, 'base_directory')
    w.connect(n_veins, 'out_file', n_sink, '@angiogram')
    w.connect(n_convert, 'out_file', n_sink, '@brain')

    return w
예제 #21
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    if session is not None:
        session = int(session)
        if session < 10:
            session = '0' + str(session)
        else:
            session = str(session)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file

    # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files
    if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')):
        os.makedirs(os.path.join(output_interm_dir, subId, 'logs'))
    log_dir = os.path.join(output_interm_dir, subId, 'logs')
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': write_logs
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass

    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length)
    # normalization = Node(Registration(), name='normalization')
    # normalization.inputs.float = False
    # normalization.inputs.collapse_output_transforms = True
    # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    # normalization.inputs.convergence_window_size = [10]
    # normalization.inputs.dimension = 3
    # normalization.inputs.fixed_image = MNItemplate
    # normalization.inputs.initial_moving_transform_com = True
    # normalization.inputs.metric = ['MI', 'MI', 'CC']
    # normalization.inputs.metric_weight = [1.0]*3
    # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
    #                                              [1000, 500, 250, 100],
    #                                              [100, 70, 50, 20]]
    # normalization.inputs.num_threads = ants_threads
    # normalization.inputs.output_transform_prefix = 'anat2template'
    # normalization.inputs.output_inverse_warped_image = True
    # normalization.inputs.output_warped_image = True
    # normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    # normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    # normalization.inputs.sampling_strategy = ['Regular',
    #                                           'Regular',
    #                                           'None']
    # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3
    # normalization.inputs.sigma_units = ['vox']*3
    # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    # normalization.inputs.transform_parameters = [(0.1,),
    #                                              (0.1,),
    #                                              (0.1, 3.0, 0.0)]
    # normalization.inputs.use_histogram_matching = True
    # normalization.inputs.winsorize_lower_quantile = 0.005
    # normalization.inputs.winsorize_upper_quantile = 0.995
    # normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    if session:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + [
            'ses-' + session
        ] + ['func']
    else:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    # If we have sessions provide the full path to the subject's intermediate directory
    # and only rely on workflow init to create the session container *within* that directory
    # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory
    if session:
        workflow = Workflow(name='ses_' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    # realignment parms -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads != 8:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
예제 #22
0
# Registration - computes registration between subject's structural and MNI template.
antsreg = Node(Registration(args='--float',
                            collapse_output_transforms=True,
                            fixed_image=template,
                            initial_moving_transform_com=True,
                            num_threads=1,
                            output_inverse_warped_image=True,
                            output_warped_image=True,
                            sigma_units=['vox'] * 3,
                            transforms=['Rigid', 'Affine', 'SyN'],
                            terminal_output='file',
                            winsorize_lower_quantile=0.005,
                            winsorize_upper_quantile=0.995,
                            convergence_threshold=[1e-06],
                            convergence_window_size=[10],
                            metric=['MI', 'MI', 'CC'],
                            metric_weight=[1.0] * 3,
                            number_of_iterations=[[1000, 500, 250, 100],
                                                  [1000, 500, 250, 100],
                                                  [100, 70, 50, 20]],
                            radius_or_number_of_bins=[32, 32, 4],
                            sampling_percentage=[0.25, 0.25, 1],
                            sampling_strategy=['Regular', 'Regular', 'None'],
                            shrink_factors=[[8, 4, 2, 1]] * 3,
                            smoothing_sigmas=[[3, 2, 1, 0]] * 3,
                            transform_parameters=[(0.1, ), (0.1, ),
                                                  (0.1, 3.0, 0.0)],
                            use_histogram_matching=True,
                            write_composite_transform=True),
               name='antsreg')
예제 #23
0
def registerToTemplate(fixedImgFn,
                       movingImgFn,
                       outFn,
                       outDir,
                       transformPrefix,
                       initialize=False,
                       initialRegFile=0,
                       regType='nonlinear'):
    """
    Register 2 images taken at different timepoints.

    Inputs:
    - fixedImgFn: filename of the fixed image (should be the template image)
    - movingImgFn: filename of the moving image (should be the Jn image)
    - outFn: name of the file to write the transformed image to.
    - outDir: path to the tmp directory
    - transformPrefix: prefix for the transform function
    - initialize: optional parameter to specify the location of the
        transformation matrix from the previous registration
    - initialRegFile: optional parameter to be used with the initialize paramter;
        specifies which output_#Affine.mat file to use
    - regType: optional parameter to specify the type of registration to use
        (affine ['Affine'] or nonlinear ['SyN'])

    Outputs:
    - None

    Effects:
    - Saves the registered image and the registration files
    """
    # Set up the registration
    # For both Affine and SyN transforms
    reg = Registration()
    reg.inputs.fixed_image = fixedImgFn
    reg.inputs.moving_image = movingImgFn
    reg.inputs.output_transform_prefix = transformPrefix
    reg.inputs.interpolation = 'NearestNeighbor'
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = False
    reg.inputs.collapse_output_transforms = False
    reg.inputs.initialize_transforms_per_stage = False

    # Specify certain parameters for the nonlinear/['SyN'] registration
    if regType == 'nonlinear':
        reg.inputs.transforms = ['Affine', 'SyN']
        reg.inputs.transform_parameters = [(2.0, ), (0.25, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
        reg.inputs.metric = ['CC'] * 2
        reg.inputs.metric_weight = [1] * 2
        reg.inputs.radius_or_number_of_bins = [5] * 2
        reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
        reg.inputs.convergence_window_size = [20] * 2
        reg.inputs.smoothing_sigmas = [[1, 0], [2, 1, 0]]
        reg.inputs.sigma_units = ['vox'] * 2
        reg.inputs.shrink_factors = [[2, 1], [3, 2, 1]]
        reg.inputs.use_estimate_learning_rate_once = [True, True]
        reg.inputs.use_histogram_matching = [
            True, True
        ]  # This is the default value, but specify it anyway

    # Specify certain parameters for the affine/['Affine'] registration
    elif regType == 'affine':
        reg.inputs.transforms = ['Affine']
        reg.inputs.transform_parameters = [(2.0, )]
        reg.inputs.number_of_iterations = [[1500, 200]]
        reg.inputs.metric = ['CC']
        reg.inputs.metric_weight = [1]
        reg.inputs.radius_or_number_of_bins = [5]
        reg.inputs.convergence_threshold = [1.e-8]
        reg.inputs.convergence_window_size = [20]
        reg.inputs.smoothing_sigmas = [[1, 0]]
        reg.inputs.sigma_units = ['vox']
        reg.inputs.shrink_factors = [[2, 1]]
        reg.inputs.use_estimate_learning_rate_once = [True]
        reg.inputs.use_histogram_matching = [
            True
        ]  # This is the default, but specify it anyway

    reg.inputs.output_warped_image = outFn
    reg.inputs.num_threads = 50

    # If the registration is initialized, set a few more parameters
    if initialize is True:
        reg.inputs.initial_moving_transform = transformPrefix + str(
            initialRegFile) + 'Affine.mat'
        reg.inputs.invert_initial_moving_transform = False

    # Keep the user updated with the status of the registration
    print("Starting", regType, "registration for", outFn)
    # Run the registration
    reg.run()
    # Keep the user updated with the status of the registration
    print("Finished", regType, "registration for", outFn)
예제 #24
0
def preproc_workflow(input_dir,
                     output_dir,
                     subject_list,
                     ses_list,
                     anat_file,
                     func_file,
                     scan_size=477,
                     bet_frac=0.37):
    """
    The preprocessing workflow used in the preparation of the psilocybin vs escitalopram rsFMRI scans.
    Workflows and notes are defined throughout. Inputs are designed to be general and masks/default MNI space is provided

    :param input_dir: The input file directory containing all scans in BIDS format
    :param output_dir: The output file directory
    :param subject_list: a list of subject numbers
    :param ses_list: a list of scan numbers (session numbers)
    :param anat_file: The format of the anatomical scan within the input directory
    :param func_file: The format of the functional scan within the input directory
    :param scan_size: The length of the scan by number of images, most 10 minutes scans are around 400-500 depending
    upon scanner defaults and parameters - confirm by looking at your data
    :param bet_frac: brain extraction fractional intensity threshold
    :return: the preprocessing workflow
    """
    preproc = Workflow(name='preproc')
    preproc.base_dir = output_dir

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'ses']),
                      name="infosource")

    infosource.iterables = [('subject_id', subject_list), ('ses', ses_list)]

    # SelectFiles - to grab the data (alternative to DataGrabber)
    templates = {
        'anat': anat_file,
        'func': func_file
    }  # define the template of each file input

    selectfiles = Node(SelectFiles(templates, base_directory=input_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=output_dir, container=output_dir),
                    name="datasink")

    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('ses', 'ses')])])
    ''' 
    This is your functional processing workflow, used to trim scans, despike the signal, slice-time correct, 
    and motion correct your data 
    '''

    fproc = Workflow(name='fproc')  # the functional processing workflow

    # ExtractROI - skip dummy scans at the beginning of the recording by removing the first three
    trim = Node(ExtractROI(t_min=3, t_size=scan_size, output_type='NIFTI_GZ'),
                name="trim")

    # 3dDespike - despike
    despike = Node(Despike(outputtype='NIFTI_GZ', args='-NEW'), name="despike")
    fproc.connect([(trim, despike, [('roi_file', 'in_file')])])
    preproc.connect([(selectfiles, fproc, [('func', 'trim.in_file')])])

    # 3dTshift - slice time correction
    slicetime = Node(TShift(outputtype='NIFTI_GZ', tpattern='alt+z2'),
                     name="slicetime")
    fproc.connect([(despike, slicetime, [('out_file', 'in_file')])])

    # 3dVolreg - correct motion and output 1d matrix
    moco = Node(Volreg(outputtype='NIFTI_GZ',
                       interp='Fourier',
                       zpad=4,
                       args='-twopass'),
                name="moco")
    fproc.connect([(slicetime, moco, [('out_file', 'in_file')])])

    moco_bpfdt = Node(
        MOCObpfdt(), name='moco_bpfdt'
    )  # use the matlab function to correct the motion regressor
    fproc.connect([(moco, moco_bpfdt, [('oned_file', 'in_file')])])
    '''
    This is the co-registration workflow using FSL and ANTs
    '''

    coreg = Workflow(name='coreg')

    # BET - structural data brain extraction
    bet_anat = Node(BET(output_type='NIFTI_GZ', frac=bet_frac, robust=True),
                    name="bet_anat")

    # FSL segmentation process to get WM map
    seg = Node(FAST(bias_iters=6,
                    img_type=1,
                    output_biascorrected=True,
                    output_type='NIFTI_GZ'),
               name="seg")
    coreg.connect([(bet_anat, seg, [('out_file', 'in_files')])])

    # functional to structural registration
    mean = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'), name="mean")

    # BBR using linear methods for initial transform fit
    func2struc = Node(FLIRT(cost='bbr', dof=6, output_type='NIFTI_GZ'),
                      name='func2struc')
    coreg.connect([(seg, func2struc, [('restored_image', 'reference')])])
    coreg.connect([(mean, func2struc, [('mean_img', 'in_file')])])
    coreg.connect([(seg, func2struc, [(('tissue_class_files', pickindex, 2),
                                       'wm_seg')])])

    # convert the FSL linear transform into a C3d format for AFNI
    f2s_c3d = Node(C3dAffineTool(itk_transform=True, fsl2ras=True),
                   name='f2s_c3d')
    coreg.connect([(func2struc, f2s_c3d, [('out_matrix_file', 'transform_file')
                                          ])])
    coreg.connect([(mean, f2s_c3d, [('mean_img', 'source_file')])])
    coreg.connect([(seg, f2s_c3d, [('restored_image', 'reference_file')])])

    # Functional to structural registration via ANTs non-linear registration
    reg = Node(Registration(
        fixed_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        transforms=['Affine', 'SyN'],
        transform_parameters=[(0.1, ), (0.1, 3.0, 0.0)],
        number_of_iterations=[[1500, 1000, 1000], [100, 70, 50, 20]],
        dimension=3,
        write_composite_transform=True,
        collapse_output_transforms=True,
        metric=['MI'] + ['CC'],
        metric_weight=[1] * 2,
        radius_or_number_of_bins=[32] + [4],
        convergence_threshold=[1.e-8, 1.e-9],
        convergence_window_size=[20] + [10],
        smoothing_sigmas=[[2, 1, 0], [4, 2, 1, 0]],
        sigma_units=['vox'] * 2,
        shrink_factors=[[4, 2, 1], [6, 4, 2, 1]],
        use_histogram_matching=[False] + [True],
        use_estimate_learning_rate_once=[True, True],
        output_warped_image=True),
               name='reg')

    coreg.connect([(seg, reg, [('restored_image', 'moving_image')])
                   ])  # connect segmentation node to registration node

    merge1 = Node(niu.Merge(2), iterfield=['in2'],
                  name='merge1')  # merge the linear and nonlinear transforms
    coreg.connect([(f2s_c3d, merge1, [('itk_transform', 'in2')])])
    coreg.connect([(reg, merge1, [('composite_transform', 'in1')])])

    # warp the functional images into MNI space using the transforms from FLIRT and SYN
    warp = Node(ApplyTransforms(
        reference_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        input_image_type=3),
                name='warp')
    coreg.connect([(moco, warp, [('out_file', 'input_image')])])
    coreg.connect([(merge1, warp, [('out', 'transforms')])])

    preproc.connect([(selectfiles, coreg, [('anat', 'bet_anat.in_file')])])
    preproc.connect([(fproc, coreg, [('moco.out_file', 'mean.in_file')])])
    '''
    Scrubbing workflow - find the motion outliers, bandpass filter, re-mean the data after bpf
    '''

    scrub = Workflow(name='scrub')

    # Generate the Scrubbing Regressor
    scrub_metrics = Node(MotionOutliers(dummy=4,
                                        out_file='FD_outliers.1D',
                                        metric='fd',
                                        threshold=0.4),
                         name="scrub_metrics")

    # regress out timepoints
    scrub_frames = Node(Bandpass(highpass=0,
                                 lowpass=99999,
                                 outputtype='NIFTI_GZ'),
                        name='scrub_frames')
    scrub.connect([(scrub_metrics, scrub_frames, [('out_file',
                                                   'orthogonalize_file')])])
    preproc.connect([(coreg, scrub, [('warp.output_image',
                                      'scrub_frames.in_file')])])
    preproc.connect([(selectfiles, scrub, [('func', 'scrub_metrics.in_file')])
                     ])

    # mean image for remeaning after bandpass
    premean = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='premean')
    # remean the image
    remean2 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean2')
    scrub.connect([(scrub_frames, remean2, [('out_file', 'in_file_a')])])
    scrub.connect([(premean, remean2, [('out_file', 'in_file_b')])])
    preproc.connect([(coreg, scrub, [('warp.output_image', 'premean.in_file')])
                     ])
    '''
    Regressors for final cleaning steps
    '''

    regressors = Workflow(name='regressors')

    # Using registered structural image to create the masks for both WM and CSF
    regbet = Node(BET(robust=True, frac=0.37, output_type='NIFTI_GZ'),
                  name='regbet')

    regseg = Node(FAST(img_type=1,
                       output_type='NIFTI_GZ',
                       no_pve=True,
                       no_bias=True,
                       segments=True),
                  name='regseg')
    regressors.connect([(regbet, regseg, [('out_file', 'in_files')])])
    preproc.connect([(coreg, regressors, [('reg.warped_image',
                                           'regbet.in_file')])])
    '''
    Create a cerebrospinal fluid (CSF) regressor 
    '''

    # subtract subcortical GM from the CSF mask
    subcortgm = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                     name='subcortgm')
    regressors.connect([(regseg, subcortgm, [(('tissue_class_files', pickindex,
                                               0), 'in_file')])])

    # Fill the mask holes

    fillcsf = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                   name='fillcsf')
    regressors.connect([(subcortgm, fillcsf, [('out_file', 'in_file')])])

    # Erode the mask

    erocsf = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                  name='erocsf')
    regressors.connect([(fillcsf, erocsf, [('out_file', 'in_file')])])

    # Take mean csf signal from functional image
    meancsf = Node(ImageMeants(output_type='NIFTI_GZ'), name='meancsf')
    regressors.connect([(erocsf, meancsf, [('out_file', 'mask')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'meancsf.in_file')])])

    bpf_dt_csf = Node(CSFbpfdt(), name='bpf_dt_csf')
    regressors.connect([(meancsf, bpf_dt_csf, [('out_file', 'in_file')])])
    '''
    Creates a local white matter regressor
    '''

    # subtract subcortical gm
    subcortgm2 = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                      name='subcortgm2')
    regressors.connect([(regseg, subcortgm2, [(('tissue_class_files',
                                                pickindex, 2), 'in_file')])])

    # fill mask
    fillwm = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                  name='fillwm')
    regressors.connect([(subcortgm2, fillwm, [('out_file', 'in_file')])])

    # erod mask
    erowm = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                 name='erowm')
    regressors.connect([(fillwm, erowm, [('out_file', 'in_file')])])

    # generate local wm
    localwm = Node(Localstat(neighborhood=('SPHERE', 25),
                             stat='mean',
                             nonmask=True,
                             outputtype='NIFTI_GZ'),
                   name='localwm')
    regressors.connect([(erowm, localwm, [('out_file', 'mask_file')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'localwm.in_file')])])

    # bandpass filter the local wm regressor
    localwm_bpf = Node(Fourier(highpass=0.01,
                               lowpass=0.08,
                               args='-retrend',
                               outputtype='NIFTI_GZ'),
                       name='loacwm_bpf')
    regressors.connect([(localwm, localwm_bpf, [('out_file', 'in_file')])])

    # detrend the local wm regressor

    localwm_bpf_dt = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                          name='localwm_bpf_dt')
    regressors.connect([(localwm_bpf, localwm_bpf_dt, [('out_file', 'in_file')
                                                       ])])
    '''
    Clean up your functional image with the regressors you have created above
    '''

    # create a mask for blurring filtering, and detrending

    clean = Workflow(name='clean')

    mask = Node(BET(mask=True, functional=True), name='mask')

    mean_mask = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'),
                     name="mean_mask")

    dilf = Node(DilateImage(operation='max', output_type='NIFTI_GZ'),
                name='dilf')
    clean.connect([(mask, dilf, [('mask_file', 'in_file')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mask.in_file')])])

    fill = Node(MaskTool(in_file='default_images/MNI152_T1_2mm_brain.nii.gz',
                         fill_holes=True,
                         outputtype='NIFTI_GZ'),
                name='fill')

    axb = Node(Calc(expr='a*b', outputtype='NIFTI_GZ'), name='axb')
    clean.connect([(dilf, axb, [('out_file', 'in_file_a')])])
    clean.connect([(fill, axb, [('out_file', 'in_file_b')])])

    bxc = Node(Calc(expr='ispositive(a)*b', outputtype='NIFTI_GZ'), name='bxc')
    clean.connect([(mean_mask, bxc, [('mean_img', 'in_file_a')])])
    clean.connect([(axb, bxc, [('out_file', 'in_file_b')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mean_mask.in_file')
                                     ])])

    #### BLUR, FOURIER BPF, and DETREND

    blurinmask = Node(BlurInMask(fwhm=6, outputtype='NIFTI_GZ'),
                      name='blurinmask')
    clean.connect([(bxc, blurinmask, [('out_file', 'mask')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'blurinmask.in_file')
                                     ])])

    fourier = Node(Fourier(highpass=0.01,
                           lowpass=0.08,
                           retrend=True,
                           outputtype='NIFTI_GZ'),
                   name='fourier')
    clean.connect([(blurinmask, fourier, [('out_file', 'in_file')])])

    tstat = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='tstat')
    clean.connect([(fourier, tstat, [('out_file', 'in_file')])])

    detrend = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                   name='detrend')
    clean.connect([(fourier, detrend, [('out_file', 'in_file')])])

    remean = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean')
    clean.connect([(detrend, remean, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean, [('out_file', 'in_file_b')])])

    concat = Node(ConcatModel(), name='concat')

    # Removes nuisance regressors via regression function
    clean_rs = Node(Bandpass(highpass=0, lowpass=99999, outputtype='NIFTI_GZ'),
                    name='clean_rs')

    clean.connect([(concat, clean_rs, [('out_file', 'orthogonalize_file')])])

    remean1 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean1')
    clean.connect([(clean_rs, remean1, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean1, [('out_file', 'in_file_b')])])

    preproc.connect([(regressors, clean, [('bpf_dt_csf.out_file',
                                           'concat.in_file_a')])])
    preproc.connect([(fproc, clean, [('moco_bpfdt.out_file',
                                      'concat.in_file_b')])])

    preproc.connect([(regressors, clean, [('localwm_bpf_dt.out_file',
                                           'clean_rs.orthogonalize_dset')])])
    clean.connect([(remean, clean_rs, [('out_file', 'in_file')])])
    '''
    Write graphical output detailing the workflows and nodes 
    '''

    fproc.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc.dot')
    fproc.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc_color.dot')

    coreg.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg.dot')
    coreg.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg_color.dot')

    scrub.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub.dot')
    scrub.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub_color.dot')

    regressors.write_graph(graph2use='flat',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg.dot')
    regressors.write_graph(graph2use='colored',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg_color.dot')

    preproc.write_graph(graph2use='flat',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc.dot')
    preproc.write_graph(graph2use='colored',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc_color.dot')

    return preproc
예제 #25
0
def COMPOSER(verbose=False, is_bruker=False):
    inputnode = Node(IdentityInterface(fields=['in_file', 'ref_file']),
                     name='input')
    outputnode = Node(IdentityInterface(fields=['out_file']), name='output')
    wf = Workflow(name='COMPOSER')

    in_mag = Node(Complex(magnitude_out_file='in_mag.nii.gz', verbose=verbose),
                  name='in_magnitude')
    ref_mag = Node(Complex(magnitude_out_file='ref_mag.nii.gz',
                           verbose=verbose),
                   name='ref_magnitude')
    if is_bruker:
        wf.connect([(inputnode, in_mag, [('in_file', 'realimag')])])
        wf.connect([(inputnode, ref_mag, [('ref_file', 'realimag')])])
    else:
        wf.connect([(inputnode, in_mag, [('in_file', 'complex')])])
        wf.connect([(inputnode, ref_mag, [('ref_file', 'complex')])])

    in_mean = Node(maths.MeanImage(), name='in_mean')
    ref_mean = Node(maths.MeanImage(), name='ref_mean')
    wf.connect([(in_mag, in_mean, [('magnitude_out_file', 'in_file')]),
                (ref_mag, ref_mean, [('magnitude_out_file', 'in_file')])])

    register = Node(Registration(dimension=3,
                                 initial_moving_transform_com=1,
                                 transforms=['Rigid'],
                                 metric=['Mattes'],
                                 metric_weight=[1],
                                 transform_parameters=[(0.1, )],
                                 number_of_iterations=[[1000, 500, 250]],
                                 collapse_output_transforms=False,
                                 initialize_transforms_per_stage=False,
                                 radius_or_number_of_bins=[32],
                                 sampling_strategy=['Regular', None],
                                 sampling_percentage=[0.25, None],
                                 convergence_threshold=[1.e-6],
                                 smoothing_sigmas=[[4, 2, 1]],
                                 shrink_factors=[[8, 4, 2]],
                                 sigma_units=['vox'],
                                 output_warped_image=True,
                                 verbose=True),
                    name='register')
    wf.connect([(in_mean, register, [('out_file', 'moving_image')]),
                (ref_mean, register, [('out_file', 'fixed_image')])])

    if is_bruker:
        resample = Node(ApplyTransforms(dimension=3, input_image_type=3),
                        name='resample_reference')
        in_x = Node(Complex(complex_out_file='in_x.nii.gz', verbose=verbose),
                    name='in_x')
        ref_x = Node(Complex(complex_out_file='ref_x.nii.gz', verbose=verbose),
                     name='ref_x')
        cc = Node(CoilCombine(), name='cc')
        wf.connect([(inputnode, resample, [('ref_file', 'input_image')]),
                    (in_mean, resample, [('out_file', 'reference_image')]),
                    (register, resample, [('reverse_transforms', 'transforms')
                                          ]),
                    (inputnode, in_x, [('in_file', 'realimag')]),
                    (resample, ref_x, [('output_image', 'realimag')]),
                    (in_x, cc, [('complex_out_file', 'in_file')]),
                    (ref_x, cc, [('complex_out_file', 'composer_file')]),
                    (cc, outputnode, [('out_file', 'out_file')])])
    else:
        raise ('Not Yet Supported')

    return wf
예제 #26
0
    def __init__(self, parent, dir_dic, bids):
        super().__init__(parent, dir_dic, bids)

        # Create interfaces ============================================================================================
        # BET
        MNI_BET = Node(BET(), name="MNI_BET")
        MNI_BET.btn_string = 'MNI Template Brain Extraction'
        self.interfaces.append(MNI_BET)

        # Registration
        postopCT_T1_Reg = Node(Registration(), name="postopCT_T1_Reg")
        postopCT_T1_Reg.btn_string = 'post-op CT to T1w Registration'
        self.interfaces.append(postopCT_T1_Reg)

        preopCT_T1_Reg = Node(Registration(), name="preopCT_T1_Reg")
        preopCT_T1_Reg.btn_string = 'pre-op CT to T1w Registration'
        self.interfaces.append(preopCT_T1_Reg)

        T1_MNI_Reg = Node(Registration(), name="T1_MNI_Reg")
        T1_MNI_Reg.btn_string = 'T1w to MNI template Registration'
        self.interfaces.append(T1_MNI_Reg)

        # Transformations
        postopCT_T1_Tran = Node(ApplyTransforms(), name="postopCT_T1_Tran")
        postopCT_T1_Tran.btn_string = 'post-op CT to T1w Transformation'
        self.interfaces.append(postopCT_T1_Tran)

        preopCT_T1_Tran = Node(ApplyTransforms(), name="preopCT_T1_Tran")
        preopCT_T1_Tran.btn_string = 'pre-op CT to T1w Transformation'
        self.interfaces.append(preopCT_T1_Tran)

        T1_MNI_Tran = Node(ApplyTransforms(), name="T1_MNI_Tran")
        T1_MNI_Tran.btn_string = 'T1w to MNI template Transformation'
        self.interfaces.append(T1_MNI_Tran)

        # Data output (i.e. sink) ======================================================================================
        self.sink = Node(DataSink(), name="sink")
        self.sink.btn_string = 'data sink'
        self.sink.inputs.base_directory = self.dir_dic['data_dir']

        self.jsink = Node(JSONFileSink(), name="jsink")
        self.jsink.btn_string = 'json sink'
        self.jsink.inputs.base_directory = self.dir_dic['data_dir']

        # Initialize workflow ==========================================================================================
        self.wf = Workflow(name='co_registration')

        # Brain extracted MNI template to antsRegistration
        # MI[mni_t1_brain.nii.gz,t1_nonGdE_brain_N4bfc_masked.nii.gz,1,32,Regular,0.25]
        # MI[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]
        self.wf.connect([(self.return_interface("MNI_BET"),
                          self.return_interface("T1_MNI_Reg"),
                          [("out_file", "fixed_image")])])

        self.wf.connect([(self.return_interface("MNI_BET"),
                          self.return_interface("T1_MNI_Tran"),
                          [("out_file", "reference_image")])])

        # T1 -> MNI Reg to Tran
        self.wf.connect([(self.return_interface("T1_MNI_Reg"),
                          self.return_interface("T1_MNI_Tran"),
                          [("composite_transform", "transforms")])])

        # postop CT -> T1 Reg to Tran
        self.wf.connect([(self.return_interface("postopCT_T1_Reg"),
                          self.return_interface("postopCT_T1_Tran"),
                          [("composite_transform", "transforms")])])

        # preop CT -> T1 Reg to Tran
        self.wf.connect([(self.return_interface("preopCT_T1_Reg"),
                          self.return_interface("preopCT_T1_Tran"),
                          [("composite_transform", "transforms")])])

        # BaseInterface generates a dict mapping button strings to the workflow nodes
        self.wf.base_dir = self.dir_dic['temp_dir']

        graph_file = self.wf.write_graph("co_registration", graph2use='flat')
        self.graph_file = graph_file.replace("co_registration.png",
                                             "co_registration_detailed.png")

        self.init_settings()
        self.init_ui()
예제 #27
0
def BAWantsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix,
                                                      CLUSTER_QUEUE,
                                                      CLUSTER_QUEUE_LONG):
    """

    Inputs::

           inputspec.images :
           inputspec.fixed_image :
           inputspec.ListOfPassiveImagesDictionaries :
           inputspec.interpolationMapping :

    Outputs::

           outputspec.template :
           outputspec.transforms_list :
           outputspec.passive_deformed_templates :
    """
    TemplateBuildSingleIterationWF = pe.Workflow(
        name='antsRegistrationTemplateBuildSingleIterationWF_' +
        str(iterationPhasePrefix))

    inputSpec = pe.Node(
        interface=util.IdentityInterface(fields=[
            'ListOfImagesDictionaries',
            'registrationImageTypes',
            # 'maskRegistrationImageType',
            'interpolationMapping',
            'fixed_image'
        ]),
        run_without_submitting=True,
        name='inputspec')
    ## HACK: TODO: We need to have the AVG_AIR.nii.gz be warped with a default voxel value of 1.0
    ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that
    ##             they do not change due to re-indenting.  Otherwise re-indenting for flow control will trigger
    ##             their hash to change.
    ## HACK: TODO: REMOVE 'transforms_list' it is not used.  That will change all the hashes
    ## HACK: TODO: Need to run all python files through the code beutifiers.  It has gotten pretty ugly.
    outputSpec = pe.Node(interface=util.IdentityInterface(
        fields=['template', 'transforms_list', 'passive_deformed_templates']),
                         run_without_submitting=True,
                         name='outputspec')

    ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template
    BeginANTS = pe.MapNode(interface=Registration(),
                           name='BeginANTS',
                           iterfield=['moving_image'])
    # SEE template.py many_cpu_BeginANTS_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,4,2,8), 'overwrite': True}
    ## This is set in the template.py file BeginANTS.plugin_args = BeginANTS_cpu_sge_options_dictionary
    CommonANTsRegistrationSettings(
        antsRegistrationNode=BeginANTS,
        registrationTypeDescription="SixStageAntsRegistrationT1Only",
        output_transform_prefix=str(iterationPhasePrefix) + '_tfm',
        output_warped_image='atlas2subject.nii.gz',
        output_inverse_warped_image='subject2atlas.nii.gz',
        save_state='SavedantsRegistrationNodeSyNState.h5',
        invert_initial_moving_transform=False,
        initial_moving_transform=None)

    GetMovingImagesNode = pe.Node(interface=util.Function(
        function=GetMovingImages,
        input_names=[
            'ListOfImagesDictionaries', 'registrationImageTypes',
            'interpolationMapping'
        ],
        output_names=['moving_images', 'moving_interpolation_type']),
                                  run_without_submitting=True,
                                  name='99_GetMovingImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetMovingImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetMovingImagesNode,
                                           'registrationImageTypes')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           GetMovingImagesNode,
                                           'interpolationMapping')

    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', BeginANTS,
                                           'moving_image')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_interpolation_type',
                                           BeginANTS, 'interpolation')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS,
                                           'fixed_image')

    ## Now warp all the input_images images
    wimtdeformed = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=['transforms', 'input_image'],
        # iterfield=['transforms', 'invert_transform_flags', 'input_image'],
        name='wimtdeformed')
    wimtdeformed.inputs.interpolation = 'Linear'
    wimtdeformed.default_value = 0
    # HACK: Should try using forward_composite_transform
    ##PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transform', wimtdeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'composite_transform',
                                           wimtdeformed, 'transforms')
    ##PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', wimtdeformed, 'invert_transform_flags')
    ## NOTE: forward_invert_flags:: List of flags corresponding to the forward transforms
    # wimtdeformed.inputs.invert_transform_flags = [False,False,False,False,False]
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', wimtdeformed,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image',
                                           wimtdeformed, 'reference_image')

    ##  Shape Update Next =====
    ## Now  Average All input_images deformed images together to create an updated template average
    AvgDeformedImages = pe.Node(interface=AverageImages(),
                                name='AvgDeformedImages')
    AvgDeformedImages.inputs.dimension = 3
    AvgDeformedImages.inputs.output_average_image = str(
        iterationPhasePrefix) + '.nii.gz'
    AvgDeformedImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image",
                                           AvgDeformedImages, 'images')

    ## Now average all affine transforms together
    AvgAffineTransform = pe.Node(interface=AverageAffineTransform(),
                                 name='AvgAffineTransform')
    AvgAffineTransform.inputs.dimension = 3
    AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str(
        iterationPhasePrefix) + '_Affine.h5'

    SplitCompositeTransform = pe.MapNode(interface=util.Function(
        function=SplitCompositeToComponentTransforms,
        input_names=['transformFilename'],
        output_names=['affine_component_list', 'warp_component_list']),
                                         iterfield=['transformFilename'],
                                         run_without_submitting=True,
                                         name='99_SplitCompositeTransform')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'composite_transform',
                                           SplitCompositeTransform,
                                           'transformFilename')
    ## PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', SplitCompositeTransform, 'transformFilename')
    TemplateBuildSingleIterationWF.connect(SplitCompositeTransform,
                                           'affine_component_list',
                                           AvgAffineTransform, 'transforms')

    ## Now average the warp fields togther
    AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages')
    AvgWarpImages.inputs.dimension = 3
    AvgWarpImages.inputs.output_average_image = str(
        iterationPhasePrefix) + 'warp.nii.gz'
    AvgWarpImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(SplitCompositeTransform,
                                           'warp_component_list',
                                           AvgWarpImages, 'images')

    ## Now average the images together
    ## TODO:  For now GradientStep is set to 0.25 as a hard coded default value.
    GradientStep = 0.25
    GradientStepWarpImage = pe.Node(interface=MultiplyImages(),
                                    name='GradientStepWarpImage')
    GradientStepWarpImage.inputs.dimension = 3
    GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep
    GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str(
        iterationPhasePrefix) + '_warp.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgWarpImages,
                                           'output_average_image',
                                           GradientStepWarpImage,
                                           'first_input')

    ## Now create the new template shape based on the average of all deformed images
    UpdateTemplateShape = pe.Node(interface=ApplyTransforms(),
                                  name='UpdateTemplateShape')
    UpdateTemplateShape.inputs.invert_transform_flags = [True]
    UpdateTemplateShape.inputs.interpolation = 'Linear'
    UpdateTemplateShape.default_value = 0

    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           UpdateTemplateShape,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect([
        (AvgAffineTransform, UpdateTemplateShape,
         [(('affine_transform', makeListOfOneElement), 'transforms')]),
    ])
    TemplateBuildSingleIterationWF.connect(GradientStepWarpImage,
                                           'output_product_image',
                                           UpdateTemplateShape, 'input_image')

    ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(
        interface=util.Function(
            function=MakeTransformListWithGradientWarps,
            input_names=['averageAffineTranform', 'gradientStepWarp'],
            output_names=['TransformListWithGradientWarps']),
        run_without_submitting=True,
        name='99_MakeTransformListWithGradientWarps')
    ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True

    TemplateBuildSingleIterationWF.connect(
        AvgAffineTransform, 'affine_transform',
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'averageAffineTranform')
    TemplateBuildSingleIterationWF.connect(
        UpdateTemplateShape, 'output_image',
        ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp')

    ReshapeAverageImageWithShapeUpdate = pe.Node(
        interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate')
    ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear'
    ReshapeAverageImageWithShapeUpdate.default_value = 0
    ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate,
        'transforms')
    TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate,
                                           'output_image', outputSpec,
                                           'template')

    ######
    ######
    ######  Process all the passive deformed images in a way similar to the main image used for registration
    ######
    ######
    ######
    ##############################################
    ## Now warp all the ListOfPassiveImagesDictionaries images
    FlattenTransformAndImagesListNode = pe.Node(
        Function(function=FlattenTransformAndImagesList,
                 input_names=[
                     'ListOfPassiveImagesDictionaries', 'transforms',
                     'interpolationMapping', 'invert_transform_flags'
                 ],
                 output_names=[
                     'flattened_images', 'flattened_transforms',
                     'flattened_invert_transform_flags',
                     'flattened_image_nametypes',
                     'flattened_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_FlattenTransformAndImagesList")

    GetPassiveImagesNode = pe.Node(interface=util.Function(
        function=GetPassiveImages,
        input_names=['ListOfImagesDictionaries', 'registrationImageTypes'],
        output_names=['ListOfPassiveImagesDictionaries']),
                                   run_without_submitting=True,
                                   name='99_GetPassiveImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetPassiveImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetPassiveImagesNode,
                                           'registrationImageTypes')

    TemplateBuildSingleIterationWF.connect(GetPassiveImagesNode,
                                           'ListOfPassiveImagesDictionaries',
                                           FlattenTransformAndImagesListNode,
                                           'ListOfPassiveImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           FlattenTransformAndImagesListNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'composite_transform',
                                           FlattenTransformAndImagesListNode,
                                           'transforms')
    ## FlattenTransformAndImagesListNode.inputs.invert_transform_flags = [False,False,False,False,False,False]
    ## TODO: Please check of invert_transform_flags has a fixed number.
    ## PREVIOUS TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', FlattenTransformAndImagesListNode, 'invert_transform_flags')
    wimtPassivedeformed = pe.MapNode(interface=ApplyTransforms(),
                                     iterfield=[
                                         'transforms',
                                         'invert_transform_flags',
                                         'input_image', 'interpolation'
                                     ],
                                     name='wimtPassivedeformed')
    wimtPassivedeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           wimtPassivedeformed,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_interpolation_type',
                                           wimtPassivedeformed,
                                           'interpolation')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_images',
                                           wimtPassivedeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_transforms',
                                           wimtPassivedeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_invert_transform_flags',
                                           wimtPassivedeformed,
                                           'invert_transform_flags')

    RenestDeformedPassiveImagesNode = pe.Node(
        Function(function=RenestDeformedPassiveImages,
                 input_names=[
                     'deformedPassiveImages', 'flattened_image_nametypes',
                     'interpolationMapping'
                 ],
                 output_names=[
                     'nested_imagetype_list', 'outputAverageImageName_list',
                     'image_type_list', 'nested_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_RenestDeformedPassiveImages")
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           RenestDeformedPassiveImagesNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image',
                                           RenestDeformedPassiveImagesNode,
                                           'deformedPassiveImages')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_image_nametypes',
                                           RenestDeformedPassiveImagesNode,
                                           'flattened_image_nametypes')
    ## Now  Average All passive input_images deformed images together to create an updated template average
    AvgDeformedPassiveImages = pe.MapNode(
        interface=AverageImages(),
        iterfield=['images', 'output_average_image'],
        name='AvgDeformedPassiveImages')
    AvgDeformedPassiveImages.inputs.dimension = 3
    AvgDeformedPassiveImages.inputs.normalize = False
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "nested_imagetype_list",
                                           AvgDeformedPassiveImages, 'images')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "outputAverageImageName_list",
                                           AvgDeformedPassiveImages,
                                           'output_average_image')

    ## -- TODO:  Now neeed to reshape all the passive images as well
    ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=[
            'input_image', 'reference_image', 'output_image', 'interpolation'
        ],
        name='ReshapeAveragePassiveImageWithShapeUpdate')
    ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'nested_interpolation_type',
        ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation')
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'outputAverageImageName_list',
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps',
        ReshapeAveragePassiveImageWithShapeUpdate, 'transforms')
    TemplateBuildSingleIterationWF.connect(
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec,
        'passive_deformed_templates')

    return TemplateBuildSingleIterationWF
예제 #28
0
path = '/Users/m131199/Documents/segData/Brats2014/'
fileName = 'T2.nii'


for i in range(len(folderNames)):
    folder = path+folderNames[i] + '/'
    input_fixed = folder + fileName
    os.chdir(folder)
    folderName = 'atlasTransformations_'+fileName[:len(fileName)-4]
    os.mkdir(folderName)    
    newPath = folder + folderName + '/'
    os.chdir(newPath)
#    atlasPath = '/Users/m131199/Documents/LGG_GUI/LGG/'
    atlasPath = homedir 
    input_moving = atlasPath + 'atlas_stripped.nii'
    reg = Registration()
    # ants-registration parameters:
    reg.inputs.fixed_image = input_fixed  # fixed image
    reg.inputs.moving_image = input_moving  # moving image
    reg.inputs.output_transform_prefix = newPath  # file path
    reg.inputs.transforms = ['Affine','SyN']  # list of transformations
    reg.inputs.transform_parameters = [(.3,),(0.1,3.0,0.0)]
    reg.inputs.number_of_iterations = [[40, 20, 10],[40, 20, 10]]
#    reg.inputs.number_of_iterations = [[1, 1, 1],[1, 1, 1]]
    reg.inputs.dimension = 3
    reg.inputs.initial_moving_transform_com = True
    #reg.inputs.invert_initial_moving_transform = True
    reg.inputs.output_warped_image = True
    reg.inputs.output_inverse_warped_image = True
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
예제 #29
0
AT_to_T1_Fit.inputs.numberOfMatchPoints = 20
AT_to_T1_Fit.inputs.numberOfHistogramBins = 50
AT_to_T1_Fit.inputs.minimumStepLength = [0.001, 0.0001, 0.0001]
AT_to_T1_Fit.inputs.outputVolume = 'AT_to_T1.nii.gz'
AT_to_T1_Fit.inputs.outputVolumePixelType = 'int'
### AT_to_T1_Fit.inputs.interpolationMode='BSpline'
AT_to_T1_Fit.inputs.initializeTransformMode = 'useMomentsAlign'  # 'useGeometryAlign'
### AT_to_T1_Fit.inputs.maskProcessingMode="ROIAUTO"  ## Images are choppped already, so ROIAUTO should work
### AT_to_T1_Fit.inputs.ROIAutoClosingSize=2  ## Mini pig brains are much smalle than human brains
### AT_to_T1_Fit.inputs.ROIAutoDilateSize=.5  ## Auto dilate a very small amount

minipigWF.connect(chopT1, 'outFN', AT_to_T1_Fit, 'fixedVolume')
minipigWF.connect(fixAtlas, 'outFN', AT_to_T1_Fit, 'movingVolume')

######===========================
BeginANTS = pe.Node(interface=Registration(), name="antsA2S")
##many_cpu_sge_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,8,8,24), 'overwrite': True}
##ComputeAtlasToSubjectTransform.plugin_args = many_cpu_sge_options_dictionary

BeginANTS.inputs.dimension = 3
""" This is the recommended set of parameters from the ANTS developers """
BeginANTS.inputs.output_transform_prefix = 'A2S_output_tfm'
BeginANTS.inputs.transforms = ["Affine", "SyN", "SyN", "SyN"]
BeginANTS.inputs.transform_parameters = [[0.1], [0.1, 3.0, 0.0],
                                         [0.1, 3.0, 0.0], [0.1, 3.0, 0.0]]
BeginANTS.inputs.metric = ['MI', 'CC', 'CC', 'CC']
BeginANTS.inputs.sampling_strategy = ['Regular', None, None, None]
BeginANTS.inputs.sampling_percentage = [0.27, 1.0, 1.0, 1.0]
BeginANTS.inputs.metric_weight = [1.0, 1.0, 1.0, 1.0]
BeginANTS.inputs.radius_or_number_of_bins = [32, 3, 3, 3]
BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000, 1000], [1000, 250],
예제 #30
0
        print("Downloaded file: {0}".format(localFilename))
    else:
        print("File previously downloaded {0}".format(localFilename))

input_images = [
    os.path.join(mydatadir, '01_T1_half.nii.gz'),
    os.path.join(mydatadir, '02_T1_half.nii.gz'),
]
"""
3. Define the parameters of the registration. Settings are
found in the file ``smri_ants_registration_settings.json``
distributed with the ``example_data`` of `nipype`.

"""

reg = Registration(
    from_file=example_data('smri_ants_registration_settings.json'))
reg.inputs.fixed_image = input_images[0]
reg.inputs.moving_image = input_images[1]
"""
Alternatively to the use of the ``from_file`` feature to load ANTs settings,
the user can manually set all those inputs instead::

    reg.inputs.output_transform_prefix = 'thisTransform'
    reg.inputs.output_warped_image = 'INTERNAL_WARPED.nii.gz'
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3 +
                                       [[100, 50, 30]])
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
예제 #31
0
def preprocess(data_dir, subject, atlas_dir, output_dir):
    with tempfile.TemporaryDirectory() as temp_dir:
        if not os.path.exists(os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')):
            if os.path.exists(os.path.join(data_dir, subject, 'FLAIR.nii.gz')):
                # reorient to MNI standard direction
                reorient = fsl.utils.Reorient2Std()
                reorient.inputs.in_file = os.path.join(data_dir, subject, 'FLAIR.nii.gz')
                reorient.inputs.out_file = os.path.join(temp_dir, 'FLAIR_reorient.nii.gz')
                reorient.run()

                # robust fov to remove neck and lower head automatically
                rf = fsl.utils.RobustFOV()
                rf.inputs.in_file = os.path.join(temp_dir, 'FLAIR_reorient.nii.gz')
                rf.inputs.out_roi = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                rf.run()

                # skull stripping first run
                print('BET pre-stripping...')
                btr1 = fsl.BET()
                btr1.inputs.in_file = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                btr1.inputs.robust = True
                btr1.inputs.frac = 0.2
                btr1.inputs.out_file = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                btr1.run()

                # N4 bias field correction
                print('N4 Bias Field Correction running...')
                input_image = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                output_image = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                subprocess.call('N4BiasFieldCorrection --bspline-fitting [ 300 ] -d 3 '
                                '--input-image %s --convergence [ 50x50x30x20 ] --output %s --shrink-factor 3'
                                % (input_image, output_image), shell=True)

                # registration of FLAIR to MNI152 FLAIR template
                print('ANTs registration...')
                reg = Registration()
                reg.inputs.fixed_image = atlas_dir + '/flair_test.nii.gz'
                reg.inputs.moving_image = os.path.join(temp_dir, 'FLAIR_RF.nii.gz')
                reg.inputs.output_transform_prefix = os.path.join(output_dir, subject, 'FLAIR_r_transform.mat')
                reg.inputs.winsorize_upper_quantile = 0.995
                reg.inputs.winsorize_lower_quantile = 0.005
                reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
                reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1, 3.0, 0.0)]
                reg.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]]
                reg.inputs.dimension = 3
                reg.inputs.initial_moving_transform_com = 0
                reg.inputs.write_composite_transform = True
                reg.inputs.collapse_output_transforms = False
                reg.inputs.initialize_transforms_per_stage = False
                reg.inputs.metric = ['Mattes', 'Mattes', ['Mattes', 'CC']]
                reg.inputs.metric_weight = [1, 1, [.5, .5]]  # Default (value ignored currently by ANTs)
                reg.inputs.radius_or_number_of_bins = [32, 32, [32, 4]]
                reg.inputs.sampling_strategy = ['Random', 'Random', None]
                reg.inputs.sampling_percentage = [0.25, 0.25, [0.05, 0.10]]
                reg.inputs.convergence_threshold = [1e-6, 1.e-6, 1.e-6]
                reg.inputs.convergence_window_size = [10] * 3
                reg.inputs.smoothing_sigmas = [[3, 2, 1, 0], [3, 2, 1, 0], [3, 2, 1, 0]]
                reg.inputs.sigma_units = ['vox'] * 3
                reg.inputs.shrink_factors = [[8, 4, 2, 1], [8, 4, 2, 1], [8, 4, 2, 1]]
                reg.inputs.use_estimate_learning_rate_once = [True, True, True]
                reg.inputs.use_histogram_matching = [True, True, True]  # This is the default
                reg.inputs.output_warped_image = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                reg.inputs.verbose = True
                reg.run()

                # second pass of BET skull stripping
                print('BET skull stripping...')
                btr2 = fsl.BET()
                btr2.inputs.in_file = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                btr2.inputs.robust = True
                btr2.inputs.frac = 0.1
                btr2.inputs.mask = True
                btr2.inputs.out_file = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                btr2.run()

                # copy mask file to output folder
                shutil.copy2(os.path.join(output_dir, subject, 'ANTS_FLAIR_r_mask.nii.gz'),
                             os.path.join(temp_dir, 'ANTS_FLAIR_r_mask.nii.gz'))

                # z score normalization
                FLAIR_path = os.path.join(output_dir, subject, 'ANTS_FLAIR_r.nii.gz')
                FLAIR_final = nib.load(FLAIR_path)
                FLAIR_mask_path = os.path.join(temp_dir, 'ANTS_FLAIR_r_mask.nii.gz')
                mask = nib.load(FLAIR_mask_path)
                FLAIR_norm = zscore_normalize(FLAIR_final, mask)
                nib.save(FLAIR_norm, FLAIR_path)

                print('.........................')
                print('patient %s registration done' % subject)
            else:
                pass
        else:
            pass
예제 #32
0
        localFile.write(remotefile.read())
        localFile.close()
        print("Downloaded file: {0}".format(localFilename))
    else:
        print("File previously downloaded {0}".format(localFilename))

input_images=[
os.path.join(mydatadir,'01_T1_half.nii.gz'),
os.path.join(mydatadir,'02_T1_half.nii.gz'),
]

"""
3. Define the parameters of the registration
"""

reg = Registration()
reg.inputs.fixed_image =  input_images[0]
reg.inputs.moving_image = input_images[1]
reg.inputs.output_transform_prefix = 'thisTransform'
reg.inputs.output_warped_image = 'INTERNAL_WARPED.nii.gz'

reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.3, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 0, 0], [10000, 0, 0], [10000, 0, 0], [10, 0, 0]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.metric = ['Mattes']*4
reg.inputs.metric_weight = [1]*4 # Default (value ignored currently by ANTs)
reg.inputs.radius_or_number_of_bins = [32]*4
reg.inputs.sampling_strategy = ['Regular']*3 + [None]
def main(args=None):

    args = arg_parser().parse_args(args)
    FLAIR = args.FLAIR
    MPRAGE = args.T1
    
    prefix=args.prefix + '.'

    if args.mask is None:
        args.temp_mask = os.path.abspath (args.temp_mask)
        args.brain_template = os.path.abspath(args.brain_template)
        args.temp_prob = os.path.abspath(args.temp_prob)
        if not os.path.isfile(args.temp_mask):
            raise Exception("template mask not foud")
        if not os.path.isfile(args.brain_template):
            raise Exception("brain template mask not foud")
        if not os.path.isfile(args.temp_prob):
            raise Exception("template probability mask not foud")
    elif not os.path.isfile(args.mask):
            raise Exception("T1 mask file not foud")

    if not os.path.isfile(MPRAGE):
        raise Exception("Input T1 file not found")
    if not os.path.isfile(FLAIR):
        raise Exception("Input FLAIR file not found")

    if args.outfolder is not None:
        abs_out = os.path.abspath(args.outfolder)
        #print(abs_out)
        if not os.path.exists(abs_out):
            #if selecting a new folder copy the files (not sure how to specify different folder under nipype when it runs sh scripts from ants)
            os.mkdir(abs_out)
        copyfile(os.path.abspath(MPRAGE),os.path.join(abs_out,os.path.basename(MPRAGE)))
        copyfile(os.path.abspath(FLAIR),os.path.join(abs_out,os.path.basename(FLAIR)))
        if args.mask is not None:
            if os.path.isfile(args.mask):
                copyfile(os.path.abspath(args.mask),os.path.join(abs_out, prefix + 'MPRAGE.mask.nii.gz'))
        os.chdir(args.outfolder)
    elif args.mask is not None:
        copyfile(os.path.abspath(args.mask),os.path.join(os.path.abspath(args.mask), prefix + 'MPRAGE.mask.nii.gz'))

    if args.mask is None:
        # T1 brain extraction
        brainextraction = BrainExtraction()
        brainextraction.inputs.dimension = 3
        brainextraction.inputs.anatomical_image = MPRAGE
        brainextraction.inputs.brain_template = args.brain_template
        brainextraction.inputs.brain_probability_mask = args.temp_prob
        brainextraction.inputs.extraction_registration_mask= args.temp_mask
        brainextraction.inputs.debug=True
        print("brain extraction")
        print(' ')
        print(brainextraction.cmdline)
        print('-'*30)
        brainextraction.run()
        os.rename('highres001_BrainExtractionMask.nii.gz',prefix +'MPRAGE.mask.nii.gz')
        os.rename('highres001_BrainExtractionBrain.nii.gz',prefix +'MPRAGE.brain.nii.gz')
        os.remove('highres001_BrainExtractionPrior0GenericAffine.mat')
        os.rmdir('highres001_')

    #two step registration with ants (step1)

    reg = Registration()
    reg.inputs.fixed_image = FLAIR
    reg.inputs.moving_image = MPRAGE
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.output_warped_image = prefix + 'output_warped_image.nii.gz'
    reg.inputs.dimension = 3
    reg.inputs.transforms = ['Rigid']
    reg.inputs.transform_parameters = [[0.1]]
    reg.inputs.radius_or_number_of_bins = [32]
    reg.inputs.metric = ['MI']
    reg.inputs.sampling_percentage = [0.1]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.shrink_factors = [[4,3,2,1]]
    reg.inputs.smoothing_sigmas = [[3,2,1,0]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.use_histogram_matching = [False]
    reg.inputs.number_of_iterations = [[1000,500,250,100]]
    reg.inputs.winsorize_lower_quantile = 0.025
    reg.inputs.winsorize_upper_quantile = 0.975
    print("first pass registration")
    print(' ')
    print(reg.cmdline)
    print('-'*30)
    reg.run()

    os.rename('output_0GenericAffine.mat',prefix + 'MPRAGE_to_FLAIR.firstpass.mat')

    #apply tranform MPRAGE mask to FLAIR

    at = ApplyTransforms()
    at.inputs.dimension = 3
    at.inputs.input_image = prefix + 'MPRAGE.mask.nii.gz'
    at.inputs.reference_image = FLAIR
    at.inputs.output_image = prefix + 'FLAIR.mask.nii.gz'
    at.inputs.interpolation = 'MultiLabel'
    at.inputs.default_value = 0
    at.inputs.transforms = [ prefix + 'MPRAGE_to_FLAIR.firstpass.mat']
    at.inputs.invert_transform_flags = [False]
    print("apply stranform to T1 maks")
    print(' ')
    print(at.cmdline)
    print('-'*30)    
    at.run()

    # bias correct FLAIR and MPRAGE

    n4m = N4BiasFieldCorrection()
    n4m.inputs.dimension = 3
    n4m.inputs.input_image = MPRAGE
    n4m.inputs.mask_image = prefix + 'MPRAGE.mask.nii.gz'
    n4m.inputs.bspline_fitting_distance = 300
    n4m.inputs.shrink_factor = 3
    n4m.inputs.n_iterations = [50,50,30,20]
    n4m.inputs.output_image = prefix + 'MPRAGE.N4.nii.gz'
    print("bias correcting T1")
    print(' ')
    print(n4m.cmdline)
    print('-'*30)
    n4m.run()

    n4f = copy.deepcopy(n4m)
    n4f.inputs.input_image = FLAIR
    n4f.inputs.mask_image = prefix + 'FLAIR.mask.nii.gz'
    n4f.inputs.output_image = prefix + 'FLAIR.N4.nii.gz'
    print("bias correcting FLAIR")
    print(' ')
    print(n4f.cmdline)
    print('-'*30)
    n4f.run()

    # mask bias corrected FLAIR and MPRAGE

    calc = afni.Calc()
    calc.inputs.in_file_a = prefix + 'FLAIR.N4.nii.gz'
    calc.inputs.in_file_b = prefix + 'FLAIR.mask.nii.gz'
    calc.inputs.expr='a*b'
    calc.inputs.out_file = prefix +  'FLAIR.N4.masked.nii.gz'
    calc.inputs.outputtype = 'NIFTI'
    calc.inputs.overwrite = True
    calc.run()

    calc1= copy.deepcopy(calc)
    calc1.inputs.in_file_a = prefix + 'MPRAGE.N4.nii.gz'
    calc1.inputs.in_file_b = prefix + 'MPRAGE.mask.nii.gz'
    calc1.inputs.out_file = prefix +  'MPRAGE.N4.masked.nii.gz'
    calc1.inputs.overwrite = True
    calc1.run()

    #register bias corrected

    reg1 = copy.deepcopy(reg)
    reg1.inputs.output_transform_prefix = "output_"
    reg1.inputs.output_warped_image = prefix + 'output_warped_image.nii.gz'
    reg1.inputs.initial_moving_transform = prefix +'MPRAGE_to_FLAIR.firstpass.mat'
    print("second pass registration")
    print(' ')
    print(reg1.cmdline)
    print('-'*30)
    reg1.run()
    os.rename('output_0GenericAffine.mat',prefix +'MPRAGE_to_FLAIR.secondpass.mat')
    
    
    #generate final mask in FLAIR space

    atf = ApplyTransforms()
    atf.inputs.dimension = 3
    atf.inputs.input_image = prefix + 'MPRAGE.N4.nii.gz'
    atf.inputs.reference_image = FLAIR
    atf.inputs.output_image = prefix + 'MPRAGE.N4.toFLAIR.nii.gz'
    atf.inputs.interpolation = 'BSpline'
    atf.inputs.interpolation_parameters = (3,)
    atf.inputs.default_value = 0
    atf.inputs.transforms = [prefix +  'MPRAGE_to_FLAIR.secondpass.mat']
    atf.inputs.invert_transform_flags = [False]
    print("final apply transform")
    print(' ')
    print(atf.cmdline)
    print('-'*30)
    atf.run()


    #cleanup

    os.remove(prefix + 'output_warped_image.nii.gz')

    if args.outfolder is not None:
        os.remove(os.path.join(abs_out,os.path.basename(MPRAGE)))
        os.remove(os.path.join(abs_out,os.path.basename(FLAIR)))
        
    if args.mask is None:
        os.remove(prefix + 'MPRAGE.brain.nii.gz')
        
    if not args.storetemp:
        os.remove(prefix + 'MPRAGE.mask.nii.gz')
        os.remove(prefix + 'MPRAGE_to_FLAIR.firstpass.mat')
        os.remove(prefix + 'FLAIR.N4.masked.nii.gz')
        os.remove(prefix + 'MPRAGE.N4.masked.nii.gz')
        os.remove(prefix + 'MPRAGE.N4.nii.gz')


    return
예제 #34
0
def embedded_antsreg_2d(source_image,
                        target_image,
                        run_rigid=False,
                        rigid_iterations=1000,
                        run_affine=False,
                        affine_iterations=1000,
                        run_syn=True,
                        coarse_iterations=40,
                        medium_iterations=50,
                        fine_iterations=40,
                        cost_function='MutualInformation',
                        interpolation='NearestNeighbor',
                        convergence=1e-6,
                        ignore_affine=False,
                        ignore_header=False,
                        save_data=False,
                        overwrite=False,
                        output_dir=None,
                        file_name=None):
    """ Embedded ANTS Registration 2D

    Runs the rigid and/or Symmetric Normalization (SyN) algorithm of ANTs and
    formats the output deformations into voxel coordinate mappings as used in
    CBSTools registration and transformation routines.

    Parameters
    ----------
    source_image: niimg
        Image to register
    target_image: niimg
        Reference image to match
    run_rigid: bool
        Whether or not to run a rigid registration first (default is False)
    rigid_iterations: float
        Number of iterations in the rigid step (default is 1000)
    run_affine: bool
        Whether or not to run a affine registration first (default is False)
    affine_iterations: float
        Number of iterations in the affine step (default is 1000)
    run_syn: bool
        Whether or not to run a SyN registration (default is True)
    coarse_iterations: float
        Number of iterations at the coarse level (default is 40)
    medium_iterations: float
        Number of iterations at the medium level (default is 50)
    fine_iterations: float
        Number of iterations at the fine level (default is 40)
    cost_function: {'CrossCorrelation', 'MutualInformation'}
        Cost function for the registration (default is 'MutualInformation')
    interpolation: {'NearestNeighbor', 'Linear'}
        Interpolation for the registration result (default is 'NearestNeighbor')
    convergence: flaot
        Threshold for convergence, can make the algorithm very slow
        (default is convergence)
    ignore_affine: bool
        Ignore the affine matrix information extracted from the image header
        (default is False)
    ignore_header: bool
        Ignore the orientation information and affine matrix information
        extracted from the image header (default is False)
    save_data: bool
        Save output data to file (default is False)
    overwrite: bool
        Overwrite existing results (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * transformed_source (niimg): Deformed source image (_ants-def)
        * mapping (niimg): Coordinate mapping from source to target (_ants-map)
        * inverse (niimg): Inverse coordinate mapping from target to source
          (_ants-invmap)

    Notes
    ----------
    Port of the CBSTools Java module by Pierre-Louis Bazin. The main algorithm
    is part of the ANTs software by Brian Avants and colleagues [1]_. The
    interfacing with ANTs is performed through Nipype [2]_. Parameters have been
    set to values commonly found in neuroimaging scripts online, but not
    necessarily optimal.

    References
    ----------
    .. [1] Avants et al (2008), Symmetric diffeomorphic
       image registration with cross-correlation: evaluating automated labeling
       of elderly and neurodegenerative brain, Med Image Anal. 12(1):26-41
    .. [2] Gorgolewski et al (2011) Nipype: a flexible, lightweight and
       extensible neuroimaging data processing framework in python.
       Front Neuroinform 5. doi:10.3389/fninf.2011.00013
    """

    print('\nEmbedded ANTs Registration')

    # for external tools: nipype
    try:
        from nipype.interfaces.ants import Registration
        from nipype.interfaces.ants import ApplyTransforms
    except ImportError:
        print(
            'Error: Nipype and/or ANTS could not be imported, they are required'
            + ' in order to run this module. \n (aborting)')
        return None

    # make sure that saving related parameters are correct
    output_dir = _output_dir_4saving(
        output_dir, source_image)  # needed for intermediate results
    if save_data:
        transformed_source_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-def'))

        mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-map'))

        inverse_mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-invmap'))
        if overwrite is False \
            and os.path.isfile(transformed_source_file) \
            and os.path.isfile(mapping_file) \
            and os.path.isfile(inverse_mapping_file) :

            print("skip computation (use existing results)")
            output = {
                'transformed_source': load_volume(transformed_source_file),
                'mapping': load_volume(mapping_file),
                'inverse': load_volume(inverse_mapping_file)
            }
            return output

    # load and get dimensions and resolution from input images
    source = load_volume(source_image)
    src_affine = source.affine
    src_header = source.header
    nsx = source.header.get_data_shape()[X]
    nsy = source.header.get_data_shape()[Y]
    nsz = 1
    rsx = source.header.get_zooms()[X]
    rsy = source.header.get_zooms()[Y]
    rsz = 1

    target = load_volume(target_image)
    trg_affine = target.affine
    trg_header = target.header
    ntx = target.header.get_data_shape()[X]
    nty = target.header.get_data_shape()[Y]
    ntz = 1
    rtx = target.header.get_zooms()[X]
    rty = target.header.get_zooms()[Y]
    rtz = 1

    # in case the affine transformations are not to be trusted: make them equal
    if ignore_affine or ignore_header:
        mx = np.argmax(np.abs(src_affine[0][0:3]))
        my = np.argmax(np.abs(src_affine[1][0:3]))
        mz = np.argmax(np.abs(src_affine[2][0:3]))
        new_affine = np.zeros((4, 4))
        if ignore_header:
            new_affine[0][0] = rsx
            new_affine[1][1] = rsy
            new_affine[2][2] = rsz
            new_affine[0][3] = -rsx * nsx / 2.0
            new_affine[1][3] = -rsy * nsy / 2.0
            new_affine[2][3] = -rsz * nsz / 2.0
        else:
            new_affine[0][mx] = rsx * np.sign(src_affine[0][mx])
            new_affine[1][my] = rsy * np.sign(src_affine[1][my])
            new_affine[2][mz] = rsz * np.sign(src_affine[2][mz])
            if (np.sign(src_affine[0][mx]) < 0):
                new_affine[0][3] = rsx * nsx / 2.0
            else:
                new_affine[0][3] = -rsx * nsx / 2.0

            if (np.sign(src_affine[1][my]) < 0):
                new_affine[1][3] = rsy * nsy / 2.0
            else:
                new_affine[1][3] = -rsy * nsy / 2.0

            if (np.sign(src_affine[2][mz]) < 0):
                new_affine[2][3] = rsz * nsz / 2.0
            else:
                new_affine[2][3] = -rsz * nsz / 2.0
        #if (np.sign(src_affine[0][mx])<0): new_affine[mx][3] = rsx*nsx
        #if (np.sign(src_affine[1][my])<0): new_affine[my][3] = rsy*nsy
        #if (np.sign(src_affine[2][mz])<0): new_affine[mz][3] = rsz*nsz
        #new_affine[0][3] = nsx/2.0
        #new_affine[1][3] = nsy/2.0
        #new_affine[2][3] = nsz/2.0
        new_affine[3][3] = 1.0

        src_img = nb.Nifti1Image(source.get_data(), new_affine, source.header)
        src_img.update_header()
        src_img_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='tmp_srcimg'))
        save_volume(src_img_file, src_img)
        source = load_volume(src_img_file)
        src_affine = source.affine
        src_header = source.header

        # create generic affine aligned with the orientation for the target
        mx = np.argmax(np.abs(trg_affine[0][0:3]))
        my = np.argmax(np.abs(trg_affine[1][0:3]))
        mz = np.argmax(np.abs(trg_affine[2][0:3]))
        new_affine = np.zeros((4, 4))
        if ignore_header:
            new_affine[0][0] = rtx
            new_affine[1][1] = rty
            new_affine[2][2] = rtz
            new_affine[0][3] = -rtx * ntx / 2.0
            new_affine[1][3] = -rty * nty / 2.0
            new_affine[2][3] = -rtz * ntz / 2.0
        else:
            new_affine[0][mx] = rtx * np.sign(trg_affine[0][mx])
            new_affine[1][my] = rty * np.sign(trg_affine[1][my])
            new_affine[2][mz] = rtz * np.sign(trg_affine[2][mz])
            if (np.sign(trg_affine[0][mx]) < 0):
                new_affine[0][3] = rtx * ntx / 2.0
            else:
                new_affine[0][3] = -rtx * ntx / 2.0

            if (np.sign(trg_affine[1][my]) < 0):
                new_affine[1][3] = rty * nty / 2.0
            else:
                new_affine[1][3] = -rty * nty / 2.0

            if (np.sign(trg_affine[2][mz]) < 0):
                new_affine[2][3] = rtz * ntz / 2.0
            else:
                new_affine[2][3] = -rtz * ntz / 2.0
        #if (np.sign(trg_affine[0][mx])<0): new_affine[mx][3] = rtx*ntx
        #if (np.sign(trg_affine[1][my])<0): new_affine[my][3] = rty*nty
        #if (np.sign(trg_affine[2][mz])<0): new_affine[mz][3] = rtz*ntz
        #new_affine[0][3] = ntx/2.0
        #new_affine[1][3] = nty/2.0
        #new_affine[2][3] = ntz/2.0
        new_affine[3][3] = 1.0

        trg_img = nb.Nifti1Image(target.get_data(), new_affine, target.header)
        trg_img.update_header()
        trg_img_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='tmp_trgimg'))
        save_volume(trg_img_file, trg_img)
        target = load_volume(trg_img_file)
        trg_affine = target.affine
        trg_header = target.header

    # build coordinate mapping matrices and save them to disk
    src_coord = np.zeros((nsx, nsy, 2))
    trg_coord = np.zeros((ntx, nty, 2))
    for x in range(nsx):
        for y in range(nsy):
            src_coord[x, y, X] = x
            src_coord[x, y, Y] = y
    src_map = nb.Nifti1Image(src_coord, source.affine, source.header)
    src_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_srccoord'))
    save_volume(src_map_file, src_map)
    for x in range(ntx):
        for y in range(nty):
            trg_coord[x, y, X] = x
            trg_coord[x, y, Y] = y
    trg_map = nb.Nifti1Image(trg_coord, target.affine, target.header)
    trg_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_trgcoord'))
    save_volume(trg_map_file, trg_map)

    # run the main ANTS software
    reg = Registration()
    reg.inputs.dimension = 2

    # add a prefix to avoid multiple names?
    prefix = _fname_4saving(file_name=file_name,
                            rootfile=source_image,
                            suffix='tmp_syn')
    prefix = os.path.basename(prefix)
    prefix = prefix.split(".")[0]
    reg.inputs.output_transform_prefix = prefix
    reg.inputs.fixed_image = [target.get_filename()]
    reg.inputs.moving_image = [source.get_filename()]

    print("registering " + source.get_filename() + "\n to " +
          target.get_filename())

    if run_rigid is True and run_affine is True and run_syn is True:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [rigid_iterations, rigid_iterations, rigid_iterations],
            [affine_iterations, affine_iterations, affine_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[3, 2, 1]
                                                     ] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence
                                                            ] + [convergence]
        reg.inputs.convergence_window_size = [10] + [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is True and run_affine is False and run_syn is True:
        reg.inputs.transforms = ['Rigid', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [rigid_iterations, rigid_iterations, rigid_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is True and run_syn is True:
        reg.inputs.transforms = ['Affine', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [affine_iterations, affine_iterations, affine_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [64, 64]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    if run_rigid is True and run_affine is True and run_syn is False:
        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.transform_parameters = [(0.1, ), (0.1, )]
        reg.inputs.number_of_iterations = [[
            rigid_iterations, rigid_iterations, rigid_iterations
        ], [affine_iterations, affine_iterations, affine_iterations]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [10]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is True and run_affine is False and run_syn is False:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[
            rigid_iterations, rigid_iterations, rigid_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is True and run_syn is False:
        reg.inputs.transforms = ['Affine']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[
            affine_iterations, affine_iterations, affine_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is False and run_syn is True:
        reg.inputs.transforms = ['SyN']
        reg.inputs.transform_parameters = [(0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [[
            coarse_iterations, coarse_iterations, medium_iterations,
            fine_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is False and run_syn is False:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[0]]
        reg.inputs.metric = ['CC']
        reg.inputs.metric_weight = [1.0]
        reg.inputs.radius_or_number_of_bins = [5]
        reg.inputs.shrink_factors = [[1]]
        reg.inputs.smoothing_sigmas = [[1]]

    print(reg.cmdline)
    result = reg.run()

    # Transforms the moving image
    at = ApplyTransforms()
    at.inputs.dimension = 2
    at.inputs.input_image = source.get_filename()
    at.inputs.reference_image = target.get_filename()
    at.inputs.interpolation = interpolation
    at.inputs.transforms = result.outputs.forward_transforms
    at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    print(at.cmdline)
    transformed = at.run()

    # Create coordinate mappings
    src_at = ApplyTransforms()
    src_at.inputs.dimension = 2
    src_at.inputs.input_image_type = 3
    src_at.inputs.input_image = src_map.get_filename()
    src_at.inputs.reference_image = target.get_filename()
    src_at.inputs.interpolation = 'Linear'
    src_at.inputs.transforms = result.outputs.forward_transforms
    src_at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    mapping = src_at.run()

    trg_at = ApplyTransforms()
    trg_at.inputs.dimension = 2
    trg_at.inputs.input_image_type = 3
    trg_at.inputs.input_image = trg_map.get_filename()
    trg_at.inputs.reference_image = source.get_filename()
    trg_at.inputs.interpolation = 'Linear'
    trg_at.inputs.transforms = result.outputs.reverse_transforms
    trg_at.inputs.invert_transform_flags = result.outputs.reverse_invert_flags
    inverse = trg_at.run()

    # pad coordinate mapping outside the image? hopefully not needed...

    # collect outputs and potentially save
    transformed_img = nb.Nifti1Image(
        nb.load(transformed.outputs.output_image).get_data(), target.affine,
        target.header)
    mapping_img = nb.Nifti1Image(
        nb.load(mapping.outputs.output_image).get_data(), target.affine,
        target.header)
    inverse_img = nb.Nifti1Image(
        nb.load(inverse.outputs.output_image).get_data(), source.affine,
        source.header)

    outputs = {
        'transformed_source': transformed_img,
        'mapping': mapping_img,
        'inverse': inverse_img
    }

    # clean-up intermediate files
    os.remove(src_map_file)
    os.remove(trg_map_file)
    if ignore_affine or ignore_header:
        os.remove(src_img_file)
        os.remove(trg_img_file)

    for name in result.outputs.forward_transforms:
        if os.path.exists(name): os.remove(name)
    for name in result.outputs.reverse_transforms:
        if os.path.exists(name): os.remove(name)
    os.remove(transformed.outputs.output_image)
    os.remove(mapping.outputs.output_image)
    os.remove(inverse.outputs.output_image)

    if save_data:
        save_volume(transformed_source_file, transformed_img)
        save_volume(mapping_file, mapping_img)
        save_volume(inverse_mapping_file, inverse_img)

    return outputs
예제 #35
0
"""Test low-iteration 2-stage (Affine, SyN) registration to save computations
    Parameters are chosen from John Muschelli's extrantsr::reg_write default parameters
    
    May upgrade with access to computational resources
    
    NOTE: These parameters were used in the preproc workflow
"""
from nipype.interfaces.ants import Registration

reg = Registration()

from nipype.interfaces.fsl import Info
template_path = Info.standard_image('MNI152_T1_1mm_brain.nii.gz')

reg = Registration()
reg.inputs.fixed_image = template_path
reg.inputs.moving_image = '../../data/ds000171/sub-control01/anat/sub-control01_T1w.nii.gz'

# Choose the type of transforms and in what order to implement
reg.inputs.transforms = ['Affine', 'SyN']
reg.inputs.metric = ['Mattes', 'Mattes']
reg.inputs.metric_weight = [1] * 2
reg.inputs.radius_or_number_of_bins = [32] * 2
reg.inputs.sampling_strategy = ['Regular', None]
reg.inputs.sampling_percentage = [0.2, 1]

# Parameters are (GradientStep, updateFieldVarianceInVoxelSpace, totalFieldVarianceInVoxelSpace)
reg.inputs.transform_parameters = [(0.25, ), (0.2, 3.0, 0.0)]

# Choose shinking factors and kernel size per iteration
reg.inputs.number_of_iterations = [[2100, 1200, 1200, 0], [40, 20, 0]]