def _reset_affines(in_file, out_file, overwrite=False, axes_to_permute=None, axes_to_flip=None, xyzscale=None, center_mass=None, verbose=1): """Sets the qform equal to the sform in the header, with optionally rescaling, setting the image center to 0, permuting or/and flipping axes """ if not os.path.isfile(out_file) or overwrite: shutil.copy(in_file, out_file) else: return if verbose: terminal_output = 'stream' else: terminal_output = 'none' in_file = out_file if xyzscale is not None: refit = afni.Refit() refit.inputs.in_file = in_file refit.inputs.xyzscale = xyzscale refit.set_default_terminal_output(terminal_output) result = refit.run() in_file = result.outputs.out_file if center_mass is not None: set_center_mass = afni.CenterMass() set_center_mass.inputs.in_file = in_file set_center_mass.inputs.cm_file = fname_presuffix(out_file, suffix='.txt', use_ext=False) set_center_mass.inputs.set_cm = center_mass # set_center_mass.set_default_terminal_output(terminal_output) # XXX BUG result = set_center_mass.run() in_file = result.outputs.out_file img = nibabel.load(in_file) header = img.header.copy() sform, code = header.get_sform(coded=True) if axes_to_flip: for axis in axes_to_flip: sform[axis] *= -1 if axes_to_permute: for (axis1, axis2) in axes_to_permute: sform[[axis1, axis2]] = sform[[axis2, axis1]] header.set_sform(sform) header.set_qform(sform, int(code)) nibabel.Nifti1Image(img.get_data(), sform, header).to_filename(out_file)
def fix_obliquity(to_fix_filename, reference_filename, caching=False, caching_dir=None, overwrite=False, verbose=True, environ=None): if caching_dir is None: caching_dir = os.getcwd() if caching: memory = Memory(caching_dir) if verbose: terminal_output = 'allatonce' else: terminal_output = 'none' if environ is None: if caching: environ = {} else: environ = {'AFNI_DECONFLICT': 'OVERWRITE'} if caching: copy = memory.cache(afni.Copy) refit = memory.cache(afni.Refit) copy.interface().set_default_terminal_output(terminal_output) refit.interface().set_default_terminal_output(terminal_output) else: copy = afni.Copy(terminal_output=terminal_output).run refit = afni.Refit(terminal_output=terminal_output).run tmp_folder = os.path.join(caching_dir, 'tmp') if not os.path.isdir(tmp_folder): os.makedirs(tmp_folder) reference_basename = os.path.basename(reference_filename) orig_reference_filename = fname_presuffix(os.path.join( tmp_folder, reference_basename), suffix='+orig.BRIK', use_ext=False) out_copy_oblique = copy(in_file=reference_filename, out_file=orig_reference_filename, environ=environ) orig_reference_filename = out_copy_oblique.outputs.out_file to_fix_basename = os.path.basename(to_fix_filename) orig_to_fix_filename = fname_presuffix(os.path.join( tmp_folder, to_fix_basename), suffix='+orig.BRIK', use_ext=False) out_copy = copy(in_file=to_fix_filename, out_file=orig_to_fix_filename, environ=environ) orig_to_fix_filename = out_copy.outputs.out_file out_refit = refit(in_file=orig_to_fix_filename, atrcopy=(orig_reference_filename, 'IJK_TO_DICOM_REAL')) out_copy = copy(in_file=out_refit.outputs.out_file, out_file=fname_presuffix(to_fix_filename, suffix='_oblique'), environ=environ) if not caching: shutil.rmtree(tmp_folder) return out_copy.outputs.out_file
def create_pipeline_graph(pipeline_name, graph_file, graph_kind='hierarchical'): """Creates pipeline graph for a given piepline. Parameters ---------- pipeline_name : one of {'anat_to_common_rigid', 'anat_to_common_affine', 'anat_to_common_nonlinear'} Pipeline name. graph_file : str. Path to save the graph image to. graph_kind : one of {'orig', 'hierarchical', 'flat', 'exec', 'colored'}, optional. The kind of the graph, passed to sammba.externals.nipype.pipeline.workflows.Workflow().write_graph """ pipeline_names = [ 'anats_to_common_rigid', 'anats_to_common_affine', 'anats_to_common_nonlinear' ] if pipeline_name not in pipeline_names: raise NotImplementedError( 'Pipeline name must be one of {0}, you entered {1}'.format( pipeline_names, pipeline_name)) graph_kinds = ['orig', 'hierarchical', 'flat', 'exec', 'colored'] if graph_kind not in graph_kinds: raise ValueError( 'Graph kind must be one of {0}, you entered {1}'.format( graph_kinds, graph_kind)) workflow = pe.Workflow(name=pipeline_name) ####################################################################### # Specify rigid body registration pipeline steps unifize = pe.Node(interface=afni.Unifize(), name='bias_correct') clip_level = pe.Node(interface=afni.ClipLevel(), name='compute_mask_threshold') compute_mask = pe.Node(interface=interfaces.MathMorphoMask(), name='compute_brain_mask') apply_mask = pe.Node(interface=afni.Calc(), name='apply_brain_mask') center_mass = pe.Node(interface=afni.CenterMass(), name='compute_and_set_cm_in_header') refit_copy = pe.Node(afni.Refit(), name='copy_cm_in_header') tcat1 = pe.Node(afni.TCat(), name='concatenate_across_individuals1') tstat1 = pe.Node(afni.TStat(), name='compute_average1') undump = pe.Node(afni.Undump(), name='create_empty_template') refit_set = pe.Node(afni.Refit(), name='set_cm_in_header') resample1 = pe.Node(afni.Resample(), name='resample1') resample2 = pe.Node(afni.Resample(), name='resample2') shift_rotate = pe.Node(afni.Allineate(), name='shift_rotate') apply_allineate1 = pe.Node(afni.Allineate(), name='apply_transform1') tcat2 = pe.Node(afni.TCat(), name='concatenate_across_individuals2') tstat2 = pe.Node(afni.TStat(), name='compute_average2') tcat3 = pe.Node(afni.TCat(), name='concatenate_across_individuals3') tstat3 = pe.Node(afni.TStat(), name='compute_average3') workflow.add_nodes([ unifize, clip_level, compute_mask, apply_mask, center_mass, refit_copy, tcat1, tstat1, undump, refit_set, resample1, resample2, shift_rotate, apply_allineate1, tcat2, tstat2, tcat3, tstat3 ]) ####################################################################### # and connections workflow.connect(unifize, 'out_file', clip_level, 'in_file') workflow.connect(clip_level, 'clip_val', compute_mask, 'intensity_threshold') workflow.connect(unifize, 'out_file', compute_mask, 'in_file') workflow.connect(compute_mask, 'out_file', apply_mask, 'in_file_a') workflow.connect(unifize, 'out_file', apply_mask, 'in_file_b') workflow.connect(apply_mask, 'out_file', center_mass, 'in_file') workflow.connect(unifize, 'out_file', refit_copy, 'in_file') workflow.connect(center_mass, 'out_file', refit_copy, 'duporigin_file') workflow.connect(center_mass, 'out_file', tcat1, 'in_files') workflow.connect(tcat1, 'out_file', tstat1, 'in_file') workflow.connect(tstat1, 'out_file', undump, 'in_file') workflow.connect(undump, 'out_file', refit_set, 'in_file') workflow.connect(refit_set, 'out_file', resample1, 'master') workflow.connect(refit_copy, 'out_file', resample1, 'in_file') workflow.connect(refit_set, 'out_file', resample2, 'master') workflow.connect(center_mass, 'out_file', resample2, 'in_file') workflow.connect(resample2, 'out_file', tcat2, 'in_files') workflow.connect(tcat2, 'out_file', tstat2, 'in_file') workflow.connect(tstat2, 'out_file', shift_rotate, 'reference') workflow.connect(resample2, 'out_file', shift_rotate, 'in_file') workflow.connect(tstat2, 'out_file', apply_allineate1, 'master') workflow.connect(resample1, 'out_file', apply_allineate1, 'in_file') workflow.connect(shift_rotate, 'out_matrix', apply_allineate1, 'in_matrix') workflow.connect(apply_allineate1, 'out_file', tcat3, 'in_files') workflow.connect(tcat3, 'out_file', tstat3, 'in_file') if pipeline_name in ['anats_to_common_affine', 'anat_to_common_nonlinear']: mask = pe.Node(afni.MaskTool(), name='generate_count_mask') allineate = pe.Node(afni.Allineate(), name='allineate') catmatvec = pe.Node(afni.CatMatvec(), name='concatenate_transforms') apply_allineate2 = pe.Node(afni.Allineate(), name='apply_transform2') tcat3 = pe.Node(afni.TCat(), name='concatenate_across_individuals4') tstat3 = pe.Node(afni.TStat(), name='compute_average4') workflow.add_nodes( [mask, allineate, catmatvec, apply_allineate2, tcat3, tstat3]) workflow.connect(tcat2, 'out_file', mask, 'in_file') workflow.connect(mask, 'out_file', allineate, 'weight') workflow.connect(apply_allineate1, 'out_file', allineate, 'in_file') workflow.connect(allineate, 'out_matrix', catmatvec, 'in_file') #XXX how can we enter multiple files ? workflow.connect(catmatvec, 'out_file', apply_allineate2, 'in_matrix') workflow.connect(resample1, 'out_file', apply_allineate2, 'in_file') workflow.connect(apply_allineate2, 'out_file', tcat3, 'in_files') workflow.connect(tcat3, 'out_file', tstat3, 'in_file') if pipeline_name == 'anats_to_common_nonlinear': pass graph_file_root, graph_file_ext = os.path.splitext(graph_file) if graph_file_ext: _ = workflow.write_graph(graph2use=graph_kind, format=graph_file_ext[1:], dotfilename=graph_file_root) else: _ = workflow.write_graph(graph2use=graph_kind, dotfilename=graph_file_root)
def fix_obliquity(to_fix_filename, reference_filename, caching=False, caching_dir=None, clear_memory=False, overwrite=False): if caching: memory = Memory(caching_dir) if caching_dir is None: caching_dir = os.getcwd() if overwrite: environ = {'AFNI_DECONFLICT': 'OVERWRITE'} else: environ = {} if caching: copy = memory.cache(afni.Copy) else: copy = afni.Copy().run tmp_folder = os.path.join(caching_dir, 'tmp') if not os.path.isdir(tmp_folder): os.makedirs(tmp_folder) reference_basename = os.path.basename(reference_filename) orig_reference_filename = fname_presuffix(os.path.join( tmp_folder, reference_basename), suffix='+orig.BRIK', use_ext=False) if not os.path.isfile(orig_reference_filename) or overwrite: out_copy_oblique = copy(in_file=reference_filename, out_file=orig_reference_filename, environ=environ) orig_reference_filename = out_copy_oblique.outputs.out_file to_fix_basename = os.path.basename(to_fix_filename) orig_to_fix_filename = fname_presuffix(os.path.join( tmp_folder, to_fix_basename), suffix='+orig.BRIK', use_ext=False) if not os.path.isfile(orig_to_fix_filename) or overwrite: out_copy = copy(in_file=to_fix_filename, out_file=orig_to_fix_filename, environ=environ) orig_to_fix_filename = out_copy.outputs.out_file if caching: refit = memory.cache(afni.Refit) else: refit = afni.Refit().run out_refit = refit(in_file=orig_to_fix_filename, atrcopy=(orig_reference_filename, 'IJK_TO_DICOM_REAL')) out_copy = copy(in_file=out_refit.outputs.out_file, environ={'AFNI_DECONFLICT': 'OVERWRITE'}, out_file=to_fix_filename) if clear_memory: shutil.rmtree(tmp_folder) memory.clear_previous_run() return out_copy.outputs.out_file
def anats_to_common(t1_filenames, write_dir, brain_volume=400, registration_kind='affine', nonlinear_levels=[1, 2, 3], nonlinear_minimal_patch=75, convergence=0.005, caching=False, verbose=0): """ Create common template from native T1 weighted images and achieve their registration to it. Parameters ---------- t1_filenames : list of str Paths to the T1 weighted images. write_dir : str Path to an existant directory to save output files to. brain_volume : float, optional Volumes of the brain as passed to Rats_MM brain extraction tool. Default to 400 for the mouse brain. registration_kind : one of {'rigid', 'affine', 'nonlinear'}, optional The allowed transform kind. nonlinear_levels : list of int, optional Maximal levels for each nonlinear warping iteration. Passed iteratively to sammba.externals.nipype.interfaces.afni.Qwarp nonlinear_minimal_patch : int, optional Minimal patch for the final nonlinear warp, passed to sammba.externals.nipype.interfaces.afni.Qwarp caching : bool, optional If True, caching is used for all the registration steps. convergence : float, optional Convergence limit, passed to verbose : bool, optional If True, all steps are verbose. Returns ------- data : sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : - 'registered' : list of str. Paths to registered images. Note that they have undergone a bias correction step before. - 'transforms' : list of str. Paths to the transforms from the raw images to the registered images. """ registration_kinds = ['rigid', 'affine', 'nonlinear'] if registration_kind not in registration_kinds: raise ValueError( 'Registration kind must be one of {0}, you entered {1}'.format( registration_kinds, registration_kind)) if verbose: terminal_output = 'allatonce' else: terminal_output = 'none' if caching: memory = Memory(write_dir) copy = memory.cache(afni.Copy) unifize = memory.cache(afni.Unifize) clip_level = memory.cache(afni.ClipLevel) rats = memory.cache(RatsMM) apply_mask = memory.cache(fsl.ApplyMask) center_mass = memory.cache(afni.CenterMass) refit = memory.cache(afni.Refit) tcat = memory.cache(afni.TCat) tstat = memory.cache(afni.TStat) undump = memory.cache(afni.Undump) resample = memory.cache(afni.Resample) allineate = memory.cache(afni.Allineate) allineate2 = memory.cache(afni.Allineate) mask_tool = memory.cache(afni.MaskTool) catmatvec = memory.cache(afni.CatMatvec) qwarp = memory.cache(afni.Qwarp) nwarp_cat = memory.cache(afni.NwarpCat) warp_apply = memory.cache(afni.NwarpApply) for func in [copy, unifize, rats, apply_mask, refit, tcat, tstat, undump, resample, allineate, allineate2, mask_tool, catmatvec, qwarp, nwarp_cat, warp_apply]: func.interface().set_default_terminal_output(terminal_output) else: copy = afni.Copy(terminal_output=terminal_output).run unifize = afni.Unifize(terminal_output=terminal_output).run clip_level = afni.ClipLevel().run # XXX fix nipype bug with 'none' rats = RatsMM(terminal_output=terminal_output).run apply_mask = fsl.ApplyMask(terminal_output=terminal_output).run center_mass = afni.CenterMass().run # XXX fix nipype bug with 'none' refit = afni.Refit(terminal_output=terminal_output).run tcat = afni.TCat(terminal_output=terminal_output).run tstat = afni.TStat(terminal_output=terminal_output).run undump = afni.Undump(terminal_output=terminal_output).run resample = afni.Resample(terminal_output=terminal_output).run allineate = afni.Allineate(terminal_output=terminal_output).run allineate2 = afni.Allineate(terminal_output=terminal_output).run mask_tool = afni.MaskTool(terminal_output=terminal_output).run catmatvec = afni.CatMatvec(terminal_output=terminal_output).run qwarp = afni.Qwarp(terminal_output=terminal_output).run nwarp_cat = afni.NwarpCat(terminal_output=terminal_output).run warp_apply = afni.NwarpApply(terminal_output=terminal_output).run current_dir = os.getcwd() os.chdir(write_dir) ########################################################################### # First copy anatomical files, to make sure they are never changed # and they have different names across individuals copied_t1_filenames = [] for n, anat_file in enumerate(t1_filenames): suffixed_file = fname_presuffix(anat_file, suffix='_{}'.format(n)) out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_copy = copy(in_file=anat_file, out_file=out_file) copied_t1_filenames.append(out_copy.outputs.out_file) ########################################################################### # Register using center of mass # ----------------------------- # An initial coarse registration is done using brain centre of mass (CoM). # # First we loop through anatomical scans and correct intensities for bias. unifized_files = [] for n, anat_file in enumerate(copied_t1_filenames): out_unifize = unifize(in_file=anat_file, outputtype='NIFTI_GZ') unifized_files.append(out_unifize.outputs.out_file) ########################################################################### # Second extract brains, aided by an approximate guessed brain volume, # and set the NIfTI image centre (as defined in the header) to the CoM # of the extracted brain. brain_files = [] for unifized_file in unifized_files: out_clip_level = clip_level(in_file=unifized_file) out_rats = rats( in_file=unifized_file, volume_threshold=brain_volume, intensity_threshold=int(out_clip_level.outputs.clip_val), terminal_output=terminal_output) out_apply_mask = apply_mask(in_file=unifized_file, mask_file=out_rats.outputs.out_file) out_center_mass = center_mass( in_file=out_apply_mask.outputs.out_file, cm_file=fname_presuffix(unifized_file, suffix='_cm.txt', use_ext=False), set_cm=(0, 0, 0)) brain_files.append(out_center_mass.outputs.out_file) ########################################################################### # Same header change, for head files. head_files = [] for unifized_file, brain_file in zip(unifized_files, brain_files): out_refit = refit(in_file=unifized_file, duporigin_file=brain_file) head_files.append(out_refit.outputs.out_file) ########################################################################### # The brain files with new image center are concatenated to produce # a quality check video out_tcat = tcat(in_files=brain_files, outputtype='NIFTI_GZ', terminal_output=terminal_output) ########################################################################### # and averaged out_tstat = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # to create an empty template, with origin placed at CoM out_undump = undump(in_file=out_tstat.outputs.out_file, outputtype='NIFTI_GZ') out_refit = refit(in_file=out_undump.outputs.out_file, xorigin='cen', yorigin='cen', zorigin='cen') ########################################################################### # Finally, we shift heads and brains within the images to place the CoM at # the image center. centered_head_files = [] for head_file in head_files: out_resample = resample(in_file=head_file, master=out_refit.outputs.out_file, outputtype='NIFTI_GZ') centered_head_files.append(out_resample.outputs.out_file) centered_brain_files = [] for brain_file in brain_files: out_resample = resample(in_file=brain_file, master=out_refit.outputs.out_file, outputtype='NIFTI_GZ') centered_brain_files.append(out_resample.outputs.out_file) ########################################################################### # Quality check videos and average brain out_tcat = tcat(in_files=centered_brain_files, out_file=os.path.join(write_dir, 'centered_brains.nii.gz')) out_tstat_centered_brain = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # At this point, we achieved a translation-only registration of the raw # anatomical images to each other's brain's (as defined by the brain # extractor) CoMs. ########################################################################### # Shift rotate # ------------ # Now we move to rigid-body registration of CoM brains, and application of # this registration to CoM heads. This registration requires a target # template. Here we use mean of all bias-corrected, brain-extracted, # mass-centered images. Other possibilities include an externally-sourced # image or, more biased, a nicely-aligned individual. shift_rotated_brain_files = [] rigid_transform_files = [] for centered_brain_file in centered_brain_files: suffixed_matrix = fname_presuffix(centered_brain_file, suffix='_shr.aff12.1D', use_ext=False) out_matrix = os.path.join(write_dir, os.path.basename(suffixed_matrix)) out_allineate = allineate( in_file=centered_brain_file, reference=out_tstat_centered_brain.outputs.out_file, out_matrix=out_matrix, convergence=convergence, two_blur=1, warp_type='shift_rotate', out_file=fname_presuffix(centered_brain_file, suffix='_shr')) rigid_transform_files.append(out_allineate.outputs.out_matrix) shift_rotated_brain_files.append(out_allineate.outputs.out_file) ########################################################################### # Application to the whole head image. can also be used for a good # demonstration of linear vs. non-linear registration quality shift_rotated_head_files = [] for centered_head_file, rigid_transform_file in zip(centered_head_files, rigid_transform_files): suffixed_file = fname_presuffix(centered_head_file, suffix='_shr') out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_allineate = allineate2( in_file=centered_head_file, master=out_tstat_centered_brain.outputs.out_file, in_matrix=rigid_transform_file, out_file=out_file) shift_rotated_head_files.append(out_allineate.outputs.out_file) ########################################################################### # Note that this rigid body registration may need to be run more than once. # Now we produce an average of rigid body registered heads out_tcat = tcat( in_files=shift_rotated_head_files, out_file=os.path.join(write_dir, 'rigid_body_registered_heads.nii.gz')) out_tstat_shr = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') if registration_kind == 'rigid': os.chdir(current_dir) return Bunch(registered=shift_rotated_head_files, transforms=rigid_transform_files) ########################################################################### # Affine transform # ---------------- # We begin by achieving an affine registration on aligned heads. # A weighting mask is used to ... out_mask_tool = mask_tool(in_file=out_tcat.outputs.out_file, count=True, outputtype='NIFTI_GZ') ########################################################################### # The count mask is also useful for looking at brain extraction efficiency # and differences in brain size. affine_transform_files = [] for shift_rotated_head_file, rigid_transform_file in zip( shift_rotated_head_files, rigid_transform_files): out_allineate = allineate( in_file=shift_rotated_head_file, reference=out_tstat_shr.outputs.out_file, out_matrix=fname_presuffix(shift_rotated_head_file, suffix='_affine.aff12.1D', use_ext=False), convergence=convergence, two_blur=1, one_pass=True, weight=out_mask_tool.outputs.out_file, out_file=fname_presuffix(shift_rotated_head_file, suffix='_affine')) suffixed_matrix = fname_presuffix(shift_rotated_head_file, suffix='_affine_catenated.aff12.1D', use_ext=False) catmatvec_out_file = os.path.join(write_dir, os.path.basename(suffixed_matrix)) out_catmatvec = catmatvec(in_file=[(rigid_transform_file, 'ONELINE'), (out_allineate.outputs.out_matrix, 'ONELINE')], out_file=catmatvec_out_file) affine_transform_files.append(catmatvec_out_file) ########################################################################### # Each resulting registration matrix is concatenated to the corresponding # rigid bory registration matrix then directly applied to the CoM brain # and head, reducing reslice errors in the final result. allineated_brain_files = [] for centered_brain_file, affine_transform_file in zip( centered_brain_files, affine_transform_files): out_allineate = allineate2( in_file=centered_brain_file, master=out_tstat_shr.outputs.out_file, in_matrix=affine_transform_file, out_file=fname_presuffix(centered_brain_file, suffix='_shr_affine_catenated')) allineated_brain_files.append(out_allineate.outputs.out_file) ########################################################################### # The application to the whole head image can also be used for a good # demonstration of linear vs. non-linear registration quality. allineated_head_files = [] for centered_head_file, affine_transform_file in zip( centered_head_files, affine_transform_files): suffixed_file = fname_presuffix(centered_head_file, suffix='_shr_affine_catenated') out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_allineate = allineate2( in_file=centered_head_file, master=out_tstat_shr.outputs.out_file, in_matrix=affine_transform_file, out_file=out_file) allineated_head_files.append(out_allineate.outputs.out_file) ########################################################################### # Quality check videos and template out_tcat_head = tcat( in_files=allineated_head_files, out_file=os.path.join(write_dir, 'affine_registered_heads.nii.gz')) out_tstat_allineated_head = tstat(in_file=out_tcat_head.outputs.out_file, outputtype='NIFTI_GZ') if registration_kind == 'affine': os.chdir(current_dir) return Bunch(registered=allineated_head_files, transforms=affine_transform_files) ########################################################################### # Non-linear registration # ----------------------- # A weight mask that extends beyond the brain, incorporating some # surrounding tissue, is needed to help better define the brain head # boundary. out_mask_tool = mask_tool(in_file=out_tcat.outputs.out_file, count=True, outputtype='NIFTI_GZ') out_mask_tool = mask_tool(in_file=out_tcat.outputs.out_file, union=True, outputtype='NIFTI_GZ') out_mask_tool = mask_tool(in_file=out_mask_tool.outputs.out_file, dilate_inputs='4', outputtype='NIFTI_GZ') ########################################################################### # The input source images are initially transformed prior to registration, # to ensure that they are already quite well-aligned to the template. # To save time, we only achieve one refinement level per step if nonlinear_levels is None: nonlinear_levels = [1, 2, 3] warped_files = [] warp_files = [] for affine_transform_file, centered_head_file in zip( affine_transform_files, centered_head_files): out_qwarp = qwarp( in_file=centered_head_file, base_file=out_tstat_allineated_head.outputs.out_file, nmi=True, noneg=True, iwarp=True, weight=out_mask_tool.outputs.out_file, iniwarp=[affine_transform_file], inilev=0, maxlev=nonlinear_levels[0], out_file=fname_presuffix(centered_head_file, suffix='_warped1')) warp_files.append(out_qwarp.outputs.source_warp) warped_files.append(out_qwarp.outputs.warped_source) out_tcat = tcat( in_files=warped_files, out_file=os.path.join(write_dir, 'warped_1iter_heads.nii.gz')) out_tstat_warp_head = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # Then iterative registration from a given level to another is achieved. # Note that any level below a patch size of 25 will not be done (see # 3dQwarp help for further detail). # The input transform is the former warp and needs to be concatenated to # IDENT initially; I forget why, I think it is to avoid some weird bug. if len(nonlinear_levels) > 1: previous_warp_files = warp_files warped_files = [] warp_files = [] for warp_file, centered_head_file in zip(previous_warp_files, centered_head_files): out_nwarp_cat = nwarp_cat( in_files=[('IDENT', out_tstat_warp_head.outputs.out_file), warp_file], out_file='iniwarp.nii.gz') out_qwarp = qwarp( in_file=centered_head_file, base_file=out_tstat_warp_head.outputs.out_file, nmi=True, noneg=True, iwarp=True, weight=out_mask_tool.outputs.out_file, iniwarp=[out_nwarp_cat.outputs.out_file], inilev=nonlinear_levels[0], maxlev=nonlinear_levels[1], out_file=fname_presuffix(centered_head_file, suffix='_warped2')) warp_files.append(out_qwarp.outputs.source_warp) warped_files.append(out_qwarp.outputs.warped_source) out_tcat = tcat(in_files=warped_files, out_file='warped_2iters_heads.nii.gz') out_tstat_warp_head = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # Using previous files and concatenated transforms can be exploited to # avoid building up reslice errors. # Warp with mini-patch # In this particular case, minpatch=75 corresponds to a level of 4 if len(nonlinear_levels) > 2: if nonlinear_minimal_patch is None: nonlinear_minimal_patch = 75 for n_iter, inilev in enumerate(nonlinear_levels[2:]): previous_warp_files = warp_files warped_files = [] warp_files = [] for warp_file, centered_head_file in zip(previous_warp_files, centered_head_files): suffixed_file = fname_presuffix( centered_head_file, suffix='_warped{}'.format(n_iter + 3)) if n_iter == len(nonlinear_levels): out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) else: out_file = suffixed_file out_qwarp = qwarp( in_file=centered_head_file, base_file=out_tstat_warp_head.outputs.out_file, nmi=True, noneg=True, iwarp=True, weight=out_mask_tool.outputs.out_file, iniwarp=[warp_file], inilev=inilev, minpatch=nonlinear_minimal_patch, out_file=out_file) warped_files.append(out_qwarp.outputs.warped_source) warp_files.append(out_qwarp.outputs.source_warp) out_tcat = tcat( in_files=warped_files, out_file=os.path.join( write_dir, 'warped_{0}iters_heads.nii.gz'.format(n_iter + 3))) out_tstat_warp_head = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # We can repeat this very last warp while using the last average until we # are satisfied with the template quality ########################################################################### # Register to template # -------------------- # Apply non-linear registration results to uncorrected images warped_files = [] for centered_head_file, warp_file in zip(centered_head_files, warp_files): suffixed_file = fname_presuffix( centered_head_file, suffix='affine_warp{}_catenated'.format(len(nonlinear_levels))) out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_warp_apply = warp_apply( in_file=centered_head_file, warp=warp_file, master=out_tstat_warp_head.outputs.out_file, out_file=out_file) warped_files.append(out_warp_apply.outputs.out_file) os.chdir(current_dir) return Bunch(registered=warped_files, transforms=warp_files)
import os import glob from sammba.registration import template_registrator from sammba.registration.utils import _reset_affines from sammba.externals.nipype.utils.filemanip import fname_presuffix from sammba.externals.nipype.interfaces import afni refit = afni.Refit().run anat_files = glob.glob( os.path.expanduser('~/mrm_bil2_transformed/bil2_transfo_C57*.nii.gz')) anat_files.remove( os.path.expanduser( '~/mrm_bil2_transformed/bil2_transfo_C57_Az1_invivo.nii.gz')) output_dir = os.path.join( os.path.expanduser('~/mrm_bil2_transformed_preprocessed')) template_file = os.path.expanduser( '~/nilearn_data/mrm_2010/Average_template_invivo.nii.gz') # Correct the header template_atlas_file = os.path.expanduser( '~/nilearn_data/mrm_2010/Average_atlas_invivo.nii.gz') correct_template_atlas_file = os.path.expanduser( '~/nilearn_data/mrm_2010/correct_headers/Average_atlas_invivo_corrected.nii.gz' ) _reset_affines(template_atlas_file, correct_template_atlas_file, xyzscale=1, overwrite=True, center_mass=(0, 0, 0))
def anats_to_common(anat_filenames, write_dir, brain_volume, registration_kind='affine', use_rats_tool=True, nonlinear_levels=[1, 2, 3], nonlinear_minimal_patches=[75], nonlinear_weight_file=None, convergence=0.005, blur_radius_coarse=1.1, caching=False, verbose=1, unifize_kwargs=None, brain_masking_unifize_kwargs=None): """ Create common template from native anatomical images and achieve their registration to it. Parameters ---------- anat_filenames : list of str Paths to the anatomical images. write_dir : str Path to an existant directory to save output files to. brain_volume : int Volume of the brain used for brain extraction. Typically 400 for mouse and 1800 for rat. registration_kind : one of {'rigid', 'affine', 'nonlinear'}, optional The allowed transform kind. use_rats_tool : bool, optional If True, brain mask is computed using RATS Mathematical Morphology. Otherwise, a histogram-based brain segmentation is used. nonlinear_levels : list of int, optional Maximal levels for each nonlinear warping iteration. Passed iteratively to sammba.externals.nipype.interfaces.afni.Qwarp nonlinear_minimal_patches : list of int, optional Minimal patches for the final nonlinear warps, passed to sammba.externals.nipype.interfaces.afni.Qwarp nonlinear_weight_file : str, optional Path to a mask used to weight non-linear registration. Ideally should include not just the whole brain but also extend in all directions to include some amount of surrounding head tissue. convergence : float, optional Convergence limit, passed to sammba.externals.nipype.interfaces.afni.Allineate blur_radius_coarse : float, optional Radius passed to sammba.externals.nipype.interfaces.afni.Allineate for the "-twoblur" option caching : bool, optional If True, caching is used for all the registration steps. verbose : int, optional Verbosity level. Note that caching implies some verbosity in any case. unifize_kwargs : dict, optional Is passed to sammba.externals.nipype.interfaces.afni.Unifize, to control bias correction of the template. brain_masking_unifize_kwargs : dict, optional Is passed to sammba.externals.nipype.interfaces.afni.Unifize, to tune the seperate bias correction step done prior to brain masking. Returns ------- data : sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : - `registered` : list of str. Paths to registered images. Note that they have undergone a bias correction step before. - `transforms` : list of str. Paths to the transforms from the raw images to the registered images. Notes ----- nonlinear_weight_file: Without this weight mask, for non-linear registration a mask is generated by binarizing the mean of images that have been brain and affine-registered, then evenly dilating it to include some surrounding head tissue. The non-linear registration is then weighted to work only within this mask. This substantially improves performance by 1) reducing the number of voxels to analyse, and 2) avoiding other parts of the head where structures are highly variable and signal often poor. However, this automatically-generated mask is frequently sub-optimal, usually due to missing paraflocculi and inappropriate dilation. Of course, it is impossible to know ahead of time where to weight the image before the brains/heads have been registered to each other. So in practice a first running of this script is done up until and including the affine stage (the default). The user should then manually use 3dmask_tool or some other software tool to create an intersect/frac/union mask of the _Unifized_for_brain_extraction_masked_resample_shr_affine_catenated files. These can then be dilated as needed to include some surrounding head tissue (which helps to better distinguish the brain-non-brain boundary). Missing regions can be added manually. This does not have to be done precisely, only roughly, and it is better to include too much tissue than too little. The procedure should then be rerun as non-linear, but using this weight mask. use_rats_tool: If `use_rats_tool` is turned on, RATS tool is used for brain extraction and has to be cited. For more information, see `RATS <http://www.iibi.uiowa.edu/content/rats-overview/>`_ """ registration_kinds = ['rigid', 'affine', 'nonlinear'] if registration_kind not in registration_kinds: raise ValueError( 'Registration kind must be one of {0}, you entered {1}'.format( registration_kinds, registration_kind)) if registration_kind is 'nonlinear' and len(anat_filenames) < 5: raise ValueError('At least 5 input files are required to make a ' 'template by non-linear \n registration. Only ' '{0} have been provided.'.format(len(anat_filenames))) if use_rats_tool: if segmentation.Info().version() is None: raise ValueError('Can not locate RATS') else: ComputeMask = segmentation.MathMorphoMask else: ComputeMask = segmentation.HistogramMask if verbose: terminal_output = 'stream' verbosity_kwargs = {'verb': verbose > 1} quietness_kwargs = {} verbosity_quietness_kwargs = {'verb': verbose > 2} else: terminal_output = 'none' verbosity_kwargs = {} quietness_kwargs = {'quiet': True} verbosity_quietness_kwargs = {'quiet': True} if caching: memory = Memory(write_dir) copy = memory.cache(afni.Copy) unifize = memory.cache(afni.Unifize) clip_level = memory.cache(afni.ClipLevel) compute_mask = memory.cache(ComputeMask) calc = memory.cache(afni.Calc) center_mass = memory.cache(afni.CenterMass) refit = memory.cache(afni.Refit) tcat = memory.cache(afni.TCat) tstat = memory.cache(afni.TStat) undump = memory.cache(afni.Undump) resample = memory.cache(afni.Resample) allineate = memory.cache(afni.Allineate) allineate2 = memory.cache(afni.Allineate) mask_tool = memory.cache(afni.MaskTool) catmatvec = memory.cache(afni.CatMatvec) qwarp = memory.cache(afni.Qwarp) qwarp2 = memory.cache(afni.Qwarp) # workaround to initialize inputs nwarp_cat = memory.cache(afni.NwarpCat) warp_apply = memory.cache(afni.NwarpApply) nwarp_adjust = memory.cache(afni.NwarpAdjust) for step in [ copy, unifize, compute_mask, calc, refit, tcat, tstat, undump, resample, allineate, allineate2, mask_tool, catmatvec, qwarp, nwarp_cat, warp_apply, nwarp_adjust ]: step.interface().set_default_terminal_output(terminal_output) else: copy = afni.Copy(terminal_output=terminal_output).run unifize = afni.Unifize(terminal_output=terminal_output).run clip_level = afni.ClipLevel().run # XXX fix nipype bug with 'none' compute_mask = ComputeMask(terminal_output=terminal_output).run calc = afni.Calc(terminal_output=terminal_output).run center_mass = afni.CenterMass().run # XXX fix nipype bug with 'none' refit = afni.Refit(terminal_output=terminal_output).run tcat = afni.TCat(terminal_output=terminal_output).run tstat = afni.TStat(terminal_output=terminal_output).run undump = afni.Undump(terminal_output=terminal_output).run resample = afni.Resample(terminal_output=terminal_output).run allineate = afni.Allineate(terminal_output=terminal_output).run allineate2 = afni.Allineate(terminal_output=terminal_output).run mask_tool = afni.MaskTool(terminal_output=terminal_output).run catmatvec = afni.CatMatvec(terminal_output=terminal_output).run qwarp = afni.Qwarp(terminal_output=terminal_output).run qwarp2 = afni.Qwarp(terminal_output=terminal_output).run nwarp_cat = afni.NwarpCat(terminal_output=terminal_output).run warp_apply = afni.NwarpApply(terminal_output=terminal_output).run nwarp_adjust = afni.NwarpAdjust(terminal_output=terminal_output).run current_dir = os.getcwd() os.chdir(write_dir) ########################################################################### # First copy anatomical files to make sure the originals are never changed # and they have different names across individuals. Then produce a video of # this raw data and a mean copied_anat_filenames = [] for n, anat_file in enumerate(anat_filenames): suffixed_file = fname_presuffix(anat_file, suffix='_{}'.format(n)) out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_copy = copy(in_file=anat_file, out_file=out_file, **verbosity_kwargs) copied_anat_filenames.append(out_copy.outputs.out_file) out_tcat = tcat(in_files=copied_anat_filenames, out_file=os.path.join(write_dir, 'raw_heads.nii.gz'), outputtype='NIFTI_GZ', **verbosity_kwargs) out_tstat = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # Bias correct and register using center of mass # ----------------------------- # An initial coarse registration is done using brain centre of mass (CoM). # # First we loop through anatomical scans and correct intensities for bias. # This is done twice with parameters that can be set differently: once to # create an image for automatic brain mask generation, and a another time # for the image that will actually have its brain extracted by this mask and # also be passed on to the rest of the function. This separation is useful # because in some circumstances the ideal bias correction can create zones # of signal and noise that confuse the brain masker, so it is best if that # calculation is performed on a differently-corrected image. In a lot(most?) # cases, the same parameters can be used for both bias correctors (the # default) as though the correction was only ever done once. # # Second, image centers are redefined based on the CoM of brains extracted # by the brain masks. The images are then translated to force the new # centers to all be at the same position: the center of the image matrix. # This is a crude form of translation-only registration amongst images that # simultaneously shifts the position of all brains to being in the centre of # the image if this was not already the case (which it often is not in small # mammal head imaging where the brain is usually in the upper half). # # Note that the heads created at the end will be the start point for all # subsequent transformations (meaning any transformation generated from now # on will be a concatenation of itself and previous ones for direct # application to CoM-registered heads). This avoids the accumulation of # reslice error from one registration to the next. Ideally, the start point # should be the bias-corrected images prior to center correction (which # itself involes reslicing). However, I have not yet figured out the best # way to convert CoM change into an affine transform and then use it. The # conversion should be relatively easy, using nibabel to extract the two # affines then numpy to calculate the difference. Using it is not so simple. # Unlike 3dQwarp, 3dAllineate does not have a simple initialization flag. # Instead, it is necessary to use -parini to initialize any given affine # parameter individually. However, -parini can be overidden by other flags, # so careful checks need to be made to ensure that this will never happen # with the particular command or set of commands used here. # bias correction for images to be used for brain mask creation if brain_masking_unifize_kwargs is None: brain_masking_unifize_kwargs = {} brain_masking_unifize_kwargs.update(quietness_kwargs) brain_masking_in_files = [] for n, anat_file in enumerate(copied_anat_filenames): out_unifize = unifize(in_file=anat_file, out_file='%s_Unifized_for_brain_masking', outputtype='NIFTI_GZ', **brain_masking_unifize_kwargs) brain_masking_in_files.append(out_unifize.outputs.out_file) # brain mask creation brain_mask_files = [] for n, brain_masking_in_file in enumerate(brain_masking_in_files): out_clip_level = clip_level(in_file=brain_masking_in_file) out_compute_mask = compute_mask( in_file=brain_masking_in_file, out_file=fname_presuffix(brain_masking_in_file, suffix='_mask'), volume_threshold=brain_volume, intensity_threshold=int(out_clip_level.outputs.clip_val), terminal_output=terminal_output) brain_mask_files.append(out_compute_mask.outputs.out_file) # bias correction for images to be both brain-extracted with the mask # generated above and then passed on to the rest of the function if unifize_kwargs is None: unifize_kwargs = {} unifize_kwargs.update(quietness_kwargs) unifized_files = [] for n, anat_file in enumerate(copied_anat_filenames): out_unifize = unifize(in_file=anat_file, out_file='%s_Unifized_for_brain_extraction', outputtype='NIFTI_GZ', **unifize_kwargs) unifized_files.append(out_unifize.outputs.out_file) # extrcat brains and set NIfTI image center (as defined in the header) to # the brain CoM brain_files = [] for (brain_mask_file, unifized_file) in zip(brain_mask_files, unifized_files): out_calc_mask = calc(in_file_a=unifized_file, in_file_b=brain_mask_file, expr='a*b', outputtype='NIFTI_GZ') out_center_mass = center_mass(in_file=out_calc_mask.outputs.out_file, cm_file=fname_presuffix(unifized_file, suffix='_cm.txt', use_ext=False), set_cm=(0, 0, 0)) brain_files.append(out_center_mass.outputs.out_file) # apply center change to head files too head_files = [] for unifized_file, brain_file in zip(unifized_files, brain_files): out_refit = refit(in_file=unifized_file, duporigin_file=brain_file) head_files.append(out_refit.outputs.out_file) # create an empty template with a center at the image matrix center out_undump = undump(in_file=out_tstat.outputs.out_file, out_file=os.path.join(write_dir, 'undump.nii.gz'), outputtype='NIFTI_GZ') out_refit = refit(in_file=out_undump.outputs.out_file, xorigin='cen', yorigin='cen', zorigin='cen') # shift brains to place their new centers at the same central position. # make a quality check video and mean centered_brain_files = [] for brain_file in brain_files: out_resample = resample(in_file=brain_file, resample_mode='Cu', master=out_refit.outputs.out_file, outputtype='NIFTI_GZ') centered_brain_files.append(out_resample.outputs.out_file) out_tcat = tcat(in_files=centered_brain_files, out_file=os.path.join(write_dir, 'centered_brains.nii.gz'), **verbosity_kwargs) out_tstat_centered_brain = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') # do the same for heads. is also a better quality check than the brain centered_head_files = [] for head_file in head_files: out_resample = resample(in_file=head_file, resample_mode='Cu', master=out_refit.outputs.out_file, outputtype='NIFTI_GZ') centered_head_files.append(out_resample.outputs.out_file) out_tcat = tcat(in_files=centered_head_files, out_file=os.path.join(write_dir, 'centered_heads.nii.gz'), **verbosity_kwargs) out_tstat_centered_brain = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') ########################################################################### # At this point, we have achieved a translation-only registration of the # anatomical images to each other's brain's (as defined by the brain # masker) CoMs. ########################################################################### # Rigid-body registration (shift rotate in AFNI parlance) # ------------------------------------------------------- # Now we move on to the rigid-body registration of CoM brains, and # application of this registration to CoM heads. This requires a target # template. Here we use the mean of all bias-corrected, brain-extracted, # mass-centered images. Other possibilities include an externally-sourced # image or, more biased, a nicely-aligned individual. # # In extreme cases where acquisitions were done at highly variable head # angles, it may be worth running this twice or even more (for which there # is no current functionality), but we have never found a case that extreme # so it is not implemented. # rigid-body registration shift_rotated_brain_files = [] rigid_transform_files = [] for centered_brain_file in centered_brain_files: suffixed_matrix = fname_presuffix(centered_brain_file, suffix='_shr.aff12.1D', use_ext=False) out_matrix = os.path.join(write_dir, os.path.basename(suffixed_matrix)) out_allineate = allineate( in_file=centered_brain_file, reference=out_tstat_centered_brain.outputs.out_file, out_matrix=out_matrix, convergence=convergence, two_blur=blur_radius_coarse, warp_type='shift_rotate', out_file=fname_presuffix(centered_brain_file, suffix='_shr'), **verbosity_quietness_kwargs) rigid_transform_files.append(out_allineate.outputs.out_matrix) shift_rotated_brain_files.append(out_allineate.outputs.out_file) # application to the head images shift_rotated_head_files = [] for centered_head_file, rigid_transform_file in zip( centered_head_files, rigid_transform_files): suffixed_file = fname_presuffix(centered_head_file, suffix='_shr') out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_allineate = allineate2( in_file=centered_head_file, master=out_tstat_centered_brain.outputs.out_file, in_matrix=rigid_transform_file, out_file=out_file, **verbosity_quietness_kwargs) shift_rotated_head_files.append(out_allineate.outputs.out_file) # quality check video and mean for head and brain out_tcat = tcat(in_files=shift_rotated_head_files, out_file=os.path.join( write_dir, 'rigid_body_registered_heads.nii.gz'), **verbosity_kwargs) out_tstat_shr = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') out_tcat = tcat(in_files=shift_rotated_brain_files, out_file=os.path.join( write_dir, 'rigid_body_registered_brains.nii.gz'), **verbosity_kwargs) out_tstat_shr = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') if registration_kind == 'rigid': os.chdir(current_dir) return Bunch(registered=shift_rotated_head_files, transforms=rigid_transform_files) ########################################################################### # Affine transform # ---------------- # Similar to the previous rigid-body registration but with the following # differences: # 1) The registration target is now the product of rigid-body rather than # CoM registration. # 2) Rather than using the mean brain as a target, the mean head is used, # weighted by a mask made by binarizing the brains and making a count # mask out of them. This should mathematically be exactly the same thing # and was done this way partially for fun, partially to make more use of # the count mask, whose main purpose is to demonstrate variability in # brain size and extraction quality. # 3) There is an extra step for concatenation of transform results. # make the count mask out_mask_tool = mask_tool(in_file=out_tcat.outputs.out_file, count=True, verbose=verbose, outputtype='NIFTI_GZ') #affine transform affine_transform_files = [] for shift_rotated_head_file, rigid_transform_file in zip( shift_rotated_head_files, rigid_transform_files): out_allineate = allineate( in_file=shift_rotated_head_file, reference=out_tstat_shr.outputs.out_file, out_matrix=fname_presuffix(shift_rotated_head_file, suffix='_affine.aff12.1D', use_ext=False), convergence=convergence, two_blur=blur_radius_coarse, one_pass=True, weight=out_mask_tool.outputs.out_file, out_file=fname_presuffix(shift_rotated_head_file, suffix='_affine'), **verbosity_quietness_kwargs) # matrix concatenation suffixed_matrix = fname_presuffix(shift_rotated_head_file, suffix='_affine_catenated.aff12.1D', use_ext=False) catmatvec_out_file = os.path.join(write_dir, os.path.basename(suffixed_matrix)) out_catmatvec = catmatvec(in_file=[(rigid_transform_file, 'ONELINE'), (out_allineate.outputs.out_matrix, 'ONELINE')], out_file=catmatvec_out_file) affine_transform_files.append(catmatvec_out_file) # application to brains allineated_brain_files = [] for centered_brain_file, affine_transform_file in zip( centered_brain_files, affine_transform_files): out_allineate = allineate2(in_file=centered_brain_file, master=out_tstat_shr.outputs.out_file, in_matrix=affine_transform_file, out_file=fname_presuffix( centered_brain_file, suffix='_shr_affine_catenated'), **verbosity_quietness_kwargs) allineated_brain_files.append(out_allineate.outputs.out_file) # application to heads allineated_head_files = [] for centered_head_file, affine_transform_file in zip( centered_head_files, affine_transform_files): suffixed_file = fname_presuffix(centered_head_file, suffix='_shr_affine_catenated') out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_allineate = allineate2(in_file=centered_head_file, master=out_tstat_shr.outputs.out_file, in_matrix=affine_transform_file, out_file=out_file, **verbosity_quietness_kwargs) allineated_head_files.append(out_allineate.outputs.out_file) #quality check videos and template for head and brain out_tcat_head = tcat(in_files=allineated_head_files, out_file=os.path.join( write_dir, 'affine_registered_heads.nii.gz'), **verbosity_kwargs) out_tstat_allineated_head = tstat(in_file=out_tcat_head.outputs.out_file, outputtype='NIFTI_GZ') out_tcat_brain = tcat(in_files=allineated_brain_files, out_file=os.path.join( write_dir, 'affine_registered_brains.nii.gz'), **verbosity_kwargs) out_tstat_allineated_brain = tstat(in_file=out_tcat_brain.outputs.out_file, outputtype='NIFTI_GZ') if registration_kind == 'affine': os.chdir(current_dir) return Bunch(registered=allineated_head_files, transforms=affine_transform_files) ########################################################################### # Non-linear registration # ----------------------- # A weight mask that extends beyond the brain, incorporating some # surrounding tissue, is needed to help better define the brain head # boundary. if nonlinear_weight_file is None: out_mask_tool = mask_tool( in_file=out_tcat.outputs.out_file, union=True, out_file=os.path.join(write_dir, 'affine_registered_brains_unionmask.nii.gz'), outputtype='NIFTI_GZ', verbose=verbose) out_mask_tool = mask_tool( in_file=out_mask_tool.outputs.out_file, out_file=os.path.join( write_dir, 'affine_registered_brains_unionmask_dil4.nii.gz'), dilate_inputs='4', outputtype='NIFTI_GZ', verbose=verbose) nonlinear_weight_file = out_mask_tool.outputs.out_file ########################################################################### # Description to fill # # if nonlinear_levels is None: nonlinear_levels = [1, 2, 3] if nonlinear_minimal_patches is None: nonlinear_minimal_patches = [] levels_minpatches = nonlinear_levels + nonlinear_minimal_patches for n_iter, level_or_minpatch in enumerate(levels_minpatches): if n_iter == 0: previous_warp_files = affine_transform_files warped_files = [] warp_files = [] for warp_file, centered_head_file in zip(previous_warp_files, centered_head_files): out_file = fname_presuffix(centered_head_file, suffix='_warped{}'.format(n_iter)) if n_iter == 0: out_nwarp_cat = nwarp_cat( in_files=[('IDENT', centered_head_file), warp_file], out_file=fname_presuffix(centered_head_file, suffix='_iniwarp')) out_qwarp = qwarp( in_file=centered_head_file, base_file=out_tstat_allineated_head.outputs.out_file, noneg=True, iwarp=True, weight=nonlinear_weight_file, iniwarp=[out_nwarp_cat.outputs.out_file], inilev=0, maxlev=level_or_minpatch, out_file=out_file, **verbosity_quietness_kwargs) elif n_iter < len(nonlinear_levels): out_qwarp = qwarp(in_file=centered_head_file, base_file=nwarp_adjusted_mean, noneg=True, iwarp=True, weight=nonlinear_weight_file, iniwarp=[warp_file], inilev=levels_minpatches[n_iter - 1] + 1, maxlev=level_or_minpatch, out_file=out_file, **verbosity_quietness_kwargs) else: out_qwarp = qwarp2( in_file=centered_head_file, base_file=nwarp_adjusted_mean, noneg=True, iwarp=True, weight=nonlinear_weight_file, iniwarp=[warp_file], inilev=nonlinear_levels[-1] + 1, # not ideal minpatch=level_or_minpatch, out_file=out_file) warped_files.append(out_qwarp.outputs.warped_source) warp_files.append(out_qwarp.outputs.source_warp) previous_warp_files = warp_files out_tcat = tcat( in_files=warped_files, out_file=os.path.join( write_dir, 'warped_{0}iters_hetemplate_filenameads.nii.gz'.format( n_iter)), **verbosity_kwargs) out_tstat_warp_head = tstat(in_file=out_tcat.outputs.out_file, outputtype='NIFTI_GZ') nwarp_adjusted_mean = 'warped_{0}_adjusted_mean.nii.gz'.format(n_iter) out_nwarp_adjust = nwarp_adjust(warps=warp_files, in_files=centered_head_files, out_file=nwarp_adjusted_mean) ########################################################################### # Register to template # -------------------- # Apply non-linear registration results to uncorrected images warped_files = [] for centered_head_file, warp_file in zip(centered_head_files, warp_files): suffixed_file = fname_presuffix( centered_head_file, suffix='affine_warp{}_catenated'.format(len(nonlinear_levels))) out_file = os.path.join(write_dir, os.path.basename(suffixed_file)) out_warp_apply = warp_apply( in_file=centered_head_file, warp=warp_file, master=out_tstat_warp_head.outputs.out_file, out_file=out_file, **verbosity_quietness_kwargs) warped_files.append(out_warp_apply.outputs.out_file) os.chdir(current_dir) return Bunch(registered=warped_files, transforms=warp_files)