def freesurfer_pipeline(self, **kwargs): """ Segments grey matter, white matter and CSF from T1 images using SPM "NewSegment" function. NB: Default values come from the W2MHS toolbox """ pipeline = self.create_pipeline( name='segmentation', inputs=[DatasetSpec('primary', nifti_gz_format)], outputs=[DatasetSpec('fs_recon_all', freesurfer_recon_all_format)], desc="Segment white/grey matter and csf", version=1, citations=copy(freesurfer_cites), **kwargs) # FS ReconAll node recon_all = pipeline.create_node( interface=ReconAll(), name='recon_all', requirements=[freesurfer_req], wall_time=2000) recon_all.inputs.directive = 'all' recon_all.inputs.openmp = self.runner.num_processes # Wrapper around os.path.join join = pipeline.create_node(interface=JoinPath(), name='join') pipeline.connect(recon_all, 'subjects_dir', join, 'dirname') pipeline.connect(recon_all, 'subject_id', join, 'filename') # Connect inputs/outputs pipeline.connect_input('primary', recon_all, 'T1_files') pipeline.connect_output('fs_recon_all', join, 'path') return pipeline
def build_core_nodes(self): """Build and connect the core nodes of the pipeline.""" import os import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from nipype.interfaces.freesurfer.preprocess import ReconAll from .t1_freesurfer_utils import (init_input_node, write_tsv_files) # Nodes declaration # ================= # Initialize the pipeline # - Extract <image_id> (e.g. sub-CLNC01_ses-M00) T1w filename; # - Check FOV of T1w; # - Create <subjects_dir> folder in <WD>/<Pipeline.name>/ReconAll/<image_id>/; # - Print begin execution message. init_input = npe.Node( interface=nutil.Function( input_names=['t1w', 'recon_all_args', 'output_dir'], output_names=['image_id', 't1w', 'flags', 'subjects_dir'], function=init_input_node), name='0-InitPipeline') init_input.inputs.recon_all_args = self.parameters['recon_all_args'] init_input.inputs.output_dir = os.path.join(self.base_dir, self.name, 'ReconAll') # Run recon-all command # FreeSurfer segmentation will be in <subjects_dir>/<image_id>/ recon_all = npe.Node(interface=ReconAll(), name='1-SegmentationReconAll') recon_all.inputs.directive = 'all' # Generate TSV files containing a summary of the regional statistics # in <subjects_dir>/regional_measures create_tsv = npe.Node(interface=nutil.Function( input_names=['subjects_dir', 'image_id'], output_names=['image_id'], function=write_tsv_files), name='2-CreateTsvFiles') # Connections # =========== self.connect([ # Get <image_id> from input_node and print begin message (self.input_node, init_input, [('t1w', 't1w')]), # Run recon-all command (init_input, recon_all, [('subjects_dir', 'subjects_dir'), ('t1w', 'T1_files'), ('image_id', 'subject_id'), ('flags', 'flags')]), # Generate TSV files (init_input, create_tsv, [('subjects_dir', 'subjects_dir')]), (recon_all, create_tsv, [('subject_id', 'image_id')]), # Output node (recon_all, self.output_node, [('subject_id', 'image_id')]), ])
def test_ReconAll_inputs(): input_map = dict( T1_files=dict(argstr='-i %s...', ), T2_file=dict( argstr='-T2 %s', min_ver='5.3.0', ), args=dict(argstr='%s', ), directive=dict( argstr='-%s', position=0, usedefault=True, ), environ=dict( nohash=True, usedefault=True, ), flags=dict(argstr='%s', ), hemi=dict(argstr='-hemi %s', ), ignore_exception=dict( nohash=True, usedefault=True, ), openmp=dict(argstr='-openmp %d', ), subject_id=dict( argstr='-subjid %s', usedefault=True, ), subjects_dir=dict( argstr='-sd %s', genfile=True, hash_files=False, ), terminal_output=dict( mandatory=True, nohash=True, ), ) inputs = ReconAll.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value
def processT1(t1_filename): import os import nipype.pipeline.engine as pe import nipype.interfaces.io as nio from nipype.interfaces.freesurfer.preprocess import ReconAll from nipype.interfaces.freesurfer.utils import MakeAverageSubject subject_list = ['s1', 's3'] data_dir = os.path.abspath('data') subjects_dir = os.path.abspath('amri_freesurfer_tutorial/subjects_dir') wf = pe.Workflow(name="l1workflow") wf.base_dir = os.path.abspath('amri_freesurfer_tutorial/workdir') datasource = pe.MapNode(interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), name='datasource', iterfield=['subject_id']) datasource.inputs.base_directory = data_dir datasource.inputs.template = '%s/%s.nii' datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']]) datasource.inputs.subject_id = subject_list recon_all = pe.MapNode(interface=ReconAll(), name='recon_all', iterfield=['subject_id', 'T1_files']) recon_all.inputs.subject_id = subject_list if not os.path.exists(subjects_dir): os.mkdir(subjects_dir) recon_all.inputs.subjects_dir = subjects_dir wf.connect(datasource, 'struct', recon_all, 'T1_files') average = pe.Node(interface=MakeAverageSubject(), name="average") average.inputs.subjects_dir = subjects_dir wf.connect(recon_all, 'subject_id', average, 'subjects_ids') wf.run("MultiProc", plugin_args={'n_procs': 4})
def test_ReconAll_inputs(): input_map = dict(T1_files=dict(argstr='-i %s...', ), T2_file=dict(argstr='-T2 %s', min_ver='5.3.0', ), args=dict(argstr='%s', ), directive=dict(argstr='-%s', position=0, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), flags=dict(argstr='%s', ), hemi=dict(argstr='-hemi %s', ), ignore_exception=dict(nohash=True, usedefault=True, ), openmp=dict(argstr='-openmp %d', ), subject_id=dict(argstr='-subjid %s', usedefault=True, ), subjects_dir=dict(argstr='-sd %s', genfile=True, hash_files=False, ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = ReconAll.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value
def freesurfer_pipeline(self, **name_maps): """ Segments grey matter, white matter and CSF from T1 images using SPM "NewSegment" function. NB: Default values come from the W2MHS toolbox """ pipeline = self.new_pipeline(name='segmentation', name_maps=name_maps, desc="Segment white/grey matter and csf", citations=copy(freesurfer_cites)) # FS ReconAll node recon_all = pipeline.add( 'recon_all', interface=ReconAll(directive='all', openmp=self.processor.num_processes), inputs={'T1_files': ('mag_preproc', nifti_gz_format)}, requirements=[freesurfer_req.v('5.3')], wall_time=2000) if self.provided('t2_coreg'): pipeline.connect_input('t2_coreg', recon_all, 'T2_file', nifti_gz_format) recon_all.inputs.use_T2 = True # Wrapper around os.path.join pipeline.add('join', JoinPath(), inputs={ 'dirname': (recon_all, 'subjects_dir'), 'filename': (recon_all, 'subject_id') }, outputs={'fs_recon_all': ('path', directory_format)}) return pipeline
dcm2nii_t1 = pe.Node(DcmStack(), name="dcm2nii_t1") dcm2nii_t1.inputs.embed_meta = True dcm2nii_t1.plugin_args = {'submit_specs': 'request_memory = 2000'} wf.connect(datagrabber, "t1", dcm2nii_t1, "dicom_files") t1_rename = pe.Node(util.Rename(format_string="anat.nii.gz"), name="t1_rename") wf.connect(dcm2nii_t1, "out_file", t1_rename, "in_file") ds = pe.Node(nio.DataSink(), name="t1_datasink") ds.inputs.base_directory = '/scr/kalifornien1/data/nki_enhanced/' ds.inputs.substitutions = [('_subject_id_', '')] ds.inputs.regexp_substitutions = [('_others_rename[0-9]*/', '')] wf.connect(t1_rename, "out_file", ds, "niftis.@t1") recon_all = pe.Node(ReconAll(), name="recon_all") recon_all.plugin_args = {'submit_specs': 'request_memory = 2500'} recon_all.inputs.args = "-no-isrunning" #recon_all._interface._can_resume = False #recon_all.inputs.subjects_dir = "/scr/adenauer1/freesurfer" wf.connect(dcm2nii_t1, "out_file", recon_all, "T1_files") wf.connect(subjects_infosource, "subject_id", recon_all, "subject_id") def cat(s1, s2): import os return os.path.join(s1, s2) join = pe.Node(util.Function(input_names=['s1', 's2'], output_names=['out'], function=cat), name="join")
infosource.iterables = [('subject_id', subject_list)] infosource_task = pe.Node(util.IdentityInterface(fields=['session']), name="infosource_task") infosource_task.iterables = [('session', session_list)] # asjust the template to thelocation of the T1 templates = { 'anat': '/gpfs/gibbs/pi/levy_ifat/Nachshon/KPE/sub-{subject_id}/ses-{session}/anat/sub-{subject_id}_ses-{session}_T1w.nii.gz' } selectfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir), name="selectfiles") recon_all = pe.MapNode(interface=ReconAll(), name='recon_all', iterfield=['subject_id']) recon_all.inputs.subject_id = subject_list recon_all.inputs.directive = "all" if not os.path.exists(subjects_dir): os.mkdir(subjects_dir) recon_all.inputs.subjects_dir = subjects_dir #Here we use specific directory, in order to avoid crash when trying to create simlinks. #recon_all.inputs.hippocampal_subfields_T1 = True # add hippocampal subfields # home made Node to run hippocampus and amygdala segmentation. ## Now we should add another node that will run the hippocampal subfield script: ## Look here for instructions on how to run and wrap it in a node? ## https://surfer.nmr.mgh.harvard.edu/fswiki/HippocampalSubfieldsAndNucleiOfAmygdala hippSeg = pe.Node(interface=CustomHippoSeg(), name='hippSeg')
def test_ReconAll_outputs(): output_map = dict( BA_stats=dict( altkey='BA', loc='stats', ), T1=dict(loc='mri', ), annot=dict( altkey='*annot', loc='label', ), aparc_a2009s_stats=dict( altkey='aparc.a2009s', loc='stats', ), aparc_aseg=dict( altkey='aparc*aseg', loc='mri', ), aparc_stats=dict( altkey='aparc', loc='stats', ), aseg=dict(loc='mri', ), aseg_stats=dict( altkey='aseg', loc='stats', ), brain=dict(loc='mri', ), brainmask=dict(loc='mri', ), curv=dict(loc='surf', ), curv_stats=dict( altkey='curv', loc='stats', ), entorhinal_exvivo_stats=dict( altkey='entorhinal_exvivo', loc='stats', ), filled=dict(loc='mri', ), inflated=dict(loc='surf', ), label=dict( altkey='*label', loc='label', ), norm=dict(loc='mri', ), nu=dict(loc='mri', ), orig=dict(loc='mri', ), pial=dict(loc='surf', ), rawavg=dict(loc='mri', ), ribbon=dict( altkey='*ribbon', loc='mri', ), smoothwm=dict(loc='surf', ), sphere=dict(loc='surf', ), sphere_reg=dict( altkey='sphere.reg', loc='surf', ), subject_id=dict(), subjects_dir=dict(), sulc=dict(loc='surf', ), thickness=dict(loc='surf', ), volume=dict(loc='surf', ), white=dict(loc='surf', ), wm=dict(loc='mri', ), wmparc=dict(loc='mri', ), wmparc_stats=dict( altkey='wmparc', loc='stats', ), ) outputs = ReconAll.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
def test_ReconAll_outputs(): output_map = dict(BA_stats=dict(altkey='BA', loc='stats', ), T1=dict(loc='mri', ), annot=dict(altkey='*annot', loc='label', ), aparc_a2009s_stats=dict(altkey='aparc.a2009s', loc='stats', ), aparc_aseg=dict(altkey='aparc*aseg', loc='mri', ), aparc_stats=dict(altkey='aparc', loc='stats', ), aseg=dict(loc='mri', ), aseg_stats=dict(altkey='aseg', loc='stats', ), brain=dict(loc='mri', ), brainmask=dict(loc='mri', ), curv=dict(loc='surf', ), curv_stats=dict(altkey='curv', loc='stats', ), entorhinal_exvivo_stats=dict(altkey='entorhinal_exvivo', loc='stats', ), filled=dict(loc='mri', ), inflated=dict(loc='surf', ), label=dict(altkey='*label', loc='label', ), norm=dict(loc='mri', ), nu=dict(loc='mri', ), orig=dict(loc='mri', ), pial=dict(loc='surf', ), rawavg=dict(loc='mri', ), ribbon=dict(altkey='*ribbon', loc='mri', ), smoothwm=dict(loc='surf', ), sphere=dict(loc='surf', ), sphere_reg=dict(altkey='sphere.reg', loc='surf', ), subject_id=dict(), subjects_dir=dict(), sulc=dict(loc='surf', ), thickness=dict(loc='surf', ), volume=dict(loc='surf', ), white=dict(loc='surf', ), wm=dict(loc='mri', ), wmparc=dict(loc='mri', ), wmparc_stats=dict(altkey='wmparc', loc='stats', ), ) outputs = ReconAll.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
""" running freesurfer reconall to get tissue segmentations from T1 subject_id = sd51_d01 data_dir = /data/pt_mar006/subjects free_dir = /data/pt_mar006/freesurfer Usage: $ python 02_structReconAll.py <subject_id> <data_dir> <free_dir> """ from nipype.interfaces.freesurfer.preprocess import ReconAll import os, glob, sys # subject_id and T1 scan directory: user given subject_id = sys.argv[1] data_dir = sys.argv[2] free_dir = sys.argv[3] Tscan = os.path.join(data_dir, subject_id, 'nifti/mprage', 'T1.nii.gz') # run ReconAll recon_all = ReconAll() recon_all.inputs.subject_id = subject_id recon_all.inputs.subjects_dir = free_dir recon_all.inputs.T1_files = Tscan recon_all.run()
def build_core_nodes(self): """Build and connect the core nodes of the pipeline.""" import os import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from nipype.interfaces.freesurfer.preprocess import ReconAll from .t1_freesurfer_utils import init_input_node, write_tsv_files # Nodes declaration # ================= # Initialize the pipeline # - Extract <image_id> (e.g. sub-CLNC01_ses-M00) T1w filename; # - Check FOV of T1w; # - Create <subjects_dir> folder in <WD>/<Pipeline.name>/ReconAll/<image_id>/; # - Print begin execution message. init_input = npe.Node( interface=nutil.Function( input_names=["t1w", "recon_all_args", "output_dir"], output_names=["image_id", "t1w", "flags", "subjects_dir"], function=init_input_node, ), name="0-InitPipeline", ) init_input.inputs.recon_all_args = self.parameters["recon_all_args"] init_input.inputs.output_dir = os.path.join(self.base_dir, self.name, "ReconAll") # Run recon-all command # FreeSurfer segmentation will be in <subjects_dir>/<image_id>/ recon_all = npe.Node(interface=ReconAll(), name="1-SegmentationReconAll") recon_all.inputs.directive = "all" # Generate TSV files containing a summary of the regional statistics # in <subjects_dir>/regional_measures create_tsv = npe.Node( interface=nutil.Function( input_names=["subjects_dir", "image_id"], output_names=["image_id"], function=write_tsv_files, ), name="2-CreateTsvFiles", ) # Connections # =========== # fmt: off self.connect([ # Get <image_id> from input_node and print begin message (self.input_node, init_input, [("t1w", "t1w")]), # Run recon-all command ( init_input, recon_all, [("subjects_dir", "subjects_dir"), ("t1w", "T1_files"), ("image_id", "subject_id"), ("flags", "flags")], ), # Generate TSV files (init_input, create_tsv, [("subjects_dir", "subjects_dir")]), (recon_all, create_tsv, [("subject_id", "image_id")]), # Output node (create_tsv, self.output_node, [("image_id", "image_id")]), ])
def build_core_nodes(self): """Build and connect the core nodes of the pipelines. """ import t1_freesurfer_cross_sectional_utils as utils import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from nipype.interfaces.freesurfer.preprocess import ReconAll from clinica.utils.stream import cprint # check out ReconAll version try: if ReconAll.version.fget.func_globals['__version__'].split(".") < ['0', '11', '0']: raise RuntimeError('ReconAll version should at least be version of 0.11.0') except Exception as e: cprint(str(e)) exit(1) # MapNode to check out if we need -cw256 for every subject, and -qcache is default for every subject. flagnode = npe.MapNode(name='flagnode', iterfield=['t1_list'], interface=nutil.Function( input_names=['t1_list', 'recon_all_args'], output_names=['output_flags'], function=utils.checkfov)) # MapNode to transfer every subject's flag to string. create_flags = npe.MapNode(interface=nutil.Function( input_names=['input_flags'], output_names=['output_str'], function=utils.create_flags_str), name='create_flags_string', iterfield=['input_flags']) # MapNode to implement recon-all. recon_all = npe.MapNode(interface=ReconAll(), name='recon_all', iterfield=['subject_id', 'T1_files', 'subjects_dir', 'flags']) recon_all.inputs.directive = 'all' # MapNode to create the statistical tsv file for each subject tsvmapnode = npe.MapNode(name='tsvmapnode', iterfield=['subject_id'], interface=nutil.Function( input_names=['subject_id', 'output_dir'], output_names=[], function=utils.write_statistics_per_subject, imports=['import os', 'import errno'])) tsvmapnode.inputs.output_dir = self.caps_directory # Node to create the log file doing the first step quality check lognode = npe.Node(name='lognode', interface=nutil.Function( input_names=['subject_list', 'session_list', 'subject_id', 'output_dir'], output_names=[], function=utils.log_summary)) lognode.inputs.output_dir = self.caps_directory # Connection # ========== self.connect([ # FieldMap calculation (self.input_node, flagnode, [('recon_all_args', 'recon_all_args')]), (self.input_node, recon_all, [('subject_dir', 'subjects_dir')]), (self.input_node, recon_all, [('subject_id', 'subject_id')]), (self.input_node, recon_all, [('anat_t1', 'T1_files')]), (self.input_node, flagnode, [('anat_t1', 't1_list')]), (self.input_node, lognode, [('subject_list', 'subject_list')]), (self.input_node, lognode, [('session_list', 'session_list')]), (flagnode, create_flags, [('output_flags', 'input_flags')]), (create_flags, recon_all, [('output_str', 'flags')]), (recon_all, tsvmapnode, [('subject_id', 'subject_id')]), (recon_all, lognode, [('subject_id', 'subject_id')]), (recon_all, self.output_node, [('subject_id', 'subject_id')]), ]) return self
def build_core_nodes(self): """Build and connect the core nodes of the pipelines. """ import clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_utils as utils import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from nipype.interfaces.freesurfer.preprocess import ReconAll # MapNode to check out if we need -cw256 for every subject, and -qcache is default for every subject. flagnode = npe.MapNode(name='flagnode', iterfield=['t1_list'], interface=nutil.Function( input_names=['t1_list', 'recon_all_args'], output_names=['output_flags'], function=utils.check_fov)) # MapNode to transfer every subject's flag to string. create_flags = npe.MapNode(interface=nutil.Function( input_names=['input_flags'], output_names=['output_str'], function=utils.create_flags_str), name='create_flags_string', iterfield=['input_flags']) # MapNode to implement recon-all. recon_all = npe.MapNode( interface=ReconAll(), name='recon_all', iterfield=['subject_id', 'T1_files', 'subjects_dir', 'flags']) recon_all.inputs.directive = 'all' # MapNode to create the statistical tsv file for each subject tsvmapnode = npe.MapNode(name='tsvmapnode', iterfield=['subject_id'], interface=nutil.Function( input_names=['subject_id', 'output_dir'], output_names=[], function=utils.write_tsv_files, imports=['import os', 'import errno'])) tsvmapnode.inputs.output_dir = self.caps_directory print_begin_message = npe.MapNode(interface=nutil.Function( input_names=['subject_id'], function=utils.print_begin_pipeline), iterfield='subject_id', name='Write-Begin_Message') print_end_message = npe.MapNode(interface=nutil.Function( input_names=['subject_id', 'final_file'], function=utils.print_end_pipeline), iterfield='subject_id', name='Write-End_Message') # Connection # ========== self.connect([ (self.input_node, print_begin_message, [('subject_id', 'subject_id')]), (self.input_node, flagnode, [('recon_all_args', 'recon_all_args') ]), (self.input_node, flagnode, [('anat_t1', 't1_list')]), (flagnode, create_flags, [('output_flags', 'input_flags')]), (create_flags, recon_all, [('output_str', 'flags')]), (self.input_node, recon_all, [('subject_dir', 'subjects_dir')]), (self.input_node, recon_all, [('subject_id', 'subject_id')]), (self.input_node, recon_all, [('anat_t1', 'T1_files')]), # (self.input_node, lognode, [('subject_list', 'subject_list')]), # (self.input_node, lognode, [('session_list', 'session_list')]), (recon_all, tsvmapnode, [('subject_id', 'subject_id')]), # (recon_all, lognode, [('subject_id', 'subject_id')]), (recon_all, self.output_node, [('subject_id', 'subject_id')]), (self.input_node, print_end_message, [('subject_id', 'subject_id') ]), (recon_all, print_end_message, [('subject_id', 'final_file')]), ]) return self