def sort_pes(pes):
    from nipype import config, logging
    from nipype.interfaces.fsl import Merge
    from os.path import abspath
    config.enable_debug_mode()
    logging.update_logging(config)

    print(pes)
    pe1s = []
    pe0s = []
    for file in pes:
        if 'pe0' in file:
            pe0s.append(file)
        elif 'pe1' in file:
            pe1s.append(file)

    pe1s = sorted(pe1s)
    pe0s = sorted(pe0s)

    me = Merge()
    merged_pes = []

    for i in range(0,len(pe1s)):
        num=pe1s[i][-12:-11]
        me.inputs.in_files = [pe1s[i],pe0s[i]]
        me.inputs.dimension='t'
        me.inputs.merged_file = 'merged_pes%s.nii.gz' % num
        me.run()
        file = abspath('merged_pes%s.nii.gz' % num)
        merged_pes.append(file)

    return(merged_pes)
Example #2
0
def phantoms_wf(options, cfg):
    import glob
    import nipype.pipeline.engine as pe
    from nipype import config, logging
    from nipype.interfaces import utility as niu
    from pyacwereg.workflows import evaluation as ev

    config.update_config(cfg)
    logging.update_logging(config)

    grid_size = options.grid_size
    if len(grid_size) == 1:
        grid_size = grid_size * 3

    bs = ev.bspline(name=options.name, shapes=options.shape, snr_list=options.snr, N=options.repetitions)
    bs.inputs.inputnode.grid_size = grid_size
    bs.inputs.inputnode.lo_matrix = options.lo_matrix
    bs.inputs.inputnode.hi_matrix = options.hi_matrix
    bs.inputs.inputnode.cortex = options.no_cortex

    if options.out_csv is None:
        bs.inputs.inputnode.out_csv = op.join(options.work_dir, bs.name, "results.csv")
    else:
        bs.inputs.inputnode.out_csv = options.out_csv

    return bs
Example #3
0
def make3DTemplate(subject_T1s, num_proc, output_prefix):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    
    from os.path import abspath, split
    from os import getcwd
    from shutil import copyfile
    from glob import glob
    from subprocess import call

    curr_dir = getcwd()

    #copy T1s into current directory
    for T in range(0,len(subject_T1s)):
        [dirname,filename] = split(subject_T1s[T])
        copyfile(subject_T1s[T],curr_dir + '/S' + str(T)+'_'+filename)

    # -c flag is control for local computing (2= use localhost; required for -j flag)
    # -j flag is for number of processors allowed
    call(['antsMultivariateTemplateConstruction2.sh', '–d','3','–o', output_prefix,'–r','1','–c','2','–j', str(num_proc), '*.nii.gz'])
    
    sample_template = abspath(output_prefix + 'template0.nii.gz')
    
    return(sample_template)
Example #4
0
def execute_task(pckld_task, node_config, updatehash):
    from socket import gethostname
    from traceback import format_exc
    from nipype import config, logging

    traceback = None
    result = None
    import os

    cwd = os.getcwd()
    try:
        config.update_config(node_config)
        logging.update_logging(config)
        from pickle import loads

        task = loads(pckld_task)
        result = task.run(updatehash=updatehash)
    except:
        traceback = format_exc()
        from pickle import loads

        task = loads(pckld_task)
        result = task.result
    os.chdir(cwd)
    return result, traceback, gethostname()
Example #5
0
def adjust_masks(masks):
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    from nipype.interfaces.freesurfer.model import Binarize
    #pve0 = csf, pve1 = gm, pve2 = wm

    origvols = sorted(masks)
    csf = origvols[0]
    wm = origvols[2]

    erode = Binarize()
    erode.inputs.in_file = wm
    erode.inputs.erode = 1
    erode.inputs.min = 0.5
    erode.inputs.max = 1000
    erode.inputs.binary_file = 'WM_seg.nii'
    erode.run()

    wm_new = abspath(erode.inputs.binary_file)

    vols = []
    vols.append(wm_new)
    vols.append(csf)

    return (vols)
Example #6
0
def init_logging(workdir):
    """
    Add new logging handler to nipype to output to log directory

    :param workdir: Log directory

    """
    fp = os.path.join(workdir, "pipeline.json")

    with open(fp, "r") as f:
        data = json.load(f)

    images = transpose(data["images"])

    real_output_dir = os.path.join(workdir, "log")

    hdlr = WfHandler(real_output_dir, images)

    from nipype import logging as nlogging
    from nipype import config

    formatter = Formatter(fmt=nlogging.fmt, datefmt=nlogging.datefmt)
    hdlr.setFormatter(formatter)

    config.set("logging", "interface_level", "DEBUG")
    nlogging.update_logging(config)

    nlogging._iflogger.handlers = []
    nlogging._iflogger.propagate = False
    nlogging._iflogger.addHandler(hdlr)

    nlogging._logger.handlers = []
    nlogging._logger.propagate = True
    nlogging._logger.addHandler(hdlr)
def check_mask_coverage(epi,brainmask):
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting
    from nipype.interfaces.nipy.preprocess import Trim

    trim = Trim()
    trim.inputs.in_file = epi
    trim.inputs.end_index = 1
    trim.inputs.out_file = 'epi_vol1.nii.gz'
    trim.run()
    epi_vol = abspath('epi_vol1.nii.gz')

    maskcheck_filename='maskcheck.png'
    display = plotting.plot_anat(epi_vol, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'brainmask coverage')
    display.add_contours(brainmask,levels=[.5], colors='r')
    display.savefig(maskcheck_filename)
    display.close()
    maskcheck_file = abspath(maskcheck_filename)

    return(maskcheck_file)
Example #8
0
def aseg_to_tissuemaps(aseg):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nibabel import load, save, Nifti1Image
    from numpy import zeros_like
    from os.path import abspath
    aseg_nifti = load(aseg)
    aseg_data = aseg_nifti.get_data()
    cortical_labels = [3, 42]
    subcortical_labels =[8, 10, 11, 12, 13, 17, 18, 26, 47, 49, 50, 51, 52, 53, 54, 58]

    #creating array of zeroes that replaces 0's with 1's when matches values of subcortical_labels
    cortical_data = zeros_like(aseg_data)
    for x in cortical_labels:
        cortical_data[aseg_data == x] = 1
    cortical_nifti = Nifti1Image(cortical_data, aseg_nifti.affine)
    
    subcort_data = zeros_like(aseg_data) 
    for x in subcortical_labels:
        subcort_data[aseg_data == x] = 1
    subcort_nifti = Nifti1Image(subcort_data, aseg_nifti.affine)
    
    save(subcort_nifti, "subcortical_gm.nii.gz")
    save(cortical_nifti, "cortical_gm.nii.gz")
    subcort_file = abspath("subcortical_gm.nii.gz")
    cortical_file = abspath("cortical_gm.nii.gz")
    gm_list = [subcort_file, cortical_file]
    return(gm_list)
Example #9
0
def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(
            project, params, args, subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow)+'.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
Example #10
0
def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(project, params, args,
                                              subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow) + '.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
Example #11
0
    def process(self):
        """Executes the fMRI pipeline workflow and returns True if successful."""
        # Enable the use of the the W3C PROV data model to capture and represent provenance in Nipype
        # config.enable_provenance()

        # Process time
        self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        cmp_deriv_subject_directory, nipype_deriv_subject_directory, nipype_fmri_pipeline_subject_dir = \
            self.init_subject_derivatives_dirs()

        # Initialization
        log_file = os.path.join(nipype_fmri_pipeline_subject_dir,
                                "pypeline.log")
        if os.path.isfile(log_file):
            os.unlink(log_file)

        config.update_config({
            "logging": {
                "workflow_level": "INFO",
                "interface_level": "INFO",
                "log_directory": nipype_fmri_pipeline_subject_dir,
                "log_to_file": True,
            },
            "execution": {
                "remove_unnecessary_outputs": False,
                "stop_on_first_crash": True,
                "stop_on_first_rerun": False,
                "try_hard_link_datasink": True,
                "use_relative_paths": True,
                "crashfile_format": "txt",
            },
        })

        logging.update_logging(config)

        iflogger = logging.getLogger("nipype.interface")
        iflogger.info("**** Processing ****")

        flow = self.create_pipeline_flow(
            cmp_deriv_subject_directory=cmp_deriv_subject_directory,
            nipype_deriv_subject_directory=nipype_deriv_subject_directory,
        )
        flow.write_graph(graph2use="colored", format="svg", simple_form=False)
        # Create dictionary of arguments passed to plugin_args
        plugin_args = {
            'maxtasksperchild': 1,
            'n_procs': self.number_of_cores,
            'raise_insufficient': False,
        }
        flow.run(plugin="MultiProc", plugin_args=plugin_args)

        iflogger.info("**** Processing finished ****")

        return True
def combine_masks(mask1,mask2):
    from nipype.interfaces.fsl.utils import Merge
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    vols = []
    vols.append(mask1)
    vols.append(mask2)

    return(vols)
Example #13
0
def _create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
           'auxlmk' in master_config['components'] or \
           'denoise' in master_config['components'] or \
           'landmark' in master_config['components'] or \
           'segmentation' in master_config['components'] or \
           'malf_2012_neuro' in master_config['components']

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from workflows.baseline import generate_single_session_template_WF

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    blackListFileName = dataDict['T1s'][0] + '_noDenoise'
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config['workflow_phase'], subject,
                                 session)
    onlyT1 = not (len(dataDict['T2s']) > 0)
    sessionWorkflow = generate_single_session_template_WF(
        project,
        subject,
        session,
        onlyT1,
        master_config,
        phase=master_config['workflow_phase'],
        interpMode=interpMode,
        pipeline_name=pipeline_name,
        doDenoise=(not isBlackList))
    sessionWorkflow.base_dir = master_config['cachedir']

    sessionWorkflow_inputsspec = sessionWorkflow.get_node('inputspec')
    sessionWorkflow_inputsspec.inputs.T1s = dataDict['T1s']
    sessionWorkflow_inputsspec.inputs.T2s = dataDict['T2s']
    sessionWorkflow_inputsspec.inputs.PDs = dataDict['PDs']
    sessionWorkflow_inputsspec.inputs.FLs = dataDict['FLs']
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict['OTs']
    return sessionWorkflow
Example #14
0
def extract_fisherZ(subj_betas, clusters, cluster_table):
    from os import path
    from numpy import genfromtxt, savetxt
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    
    header = []
    clusters = genfromtxt(clusters_table, delimiter='\t', dtype=None, skip_header=1)
    
    savetxt(file, matrix, delimiter='\t', header=header)

    return(table_path)
Example #15
0
def _create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
           'auxlmk' in master_config['components'] or \
           'denoise' in master_config['components'] or \
           'landmark' in master_config['components'] or \
           'segmentation' in master_config['components'] or \
           'malf_2012_neuro' in master_config['components']

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from workflows.baseline import generate_single_session_template_WF

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    blackListFileName = dataDict['T1s'][0] + '_noDenoise'
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config['workflow_phase'], subject, session)
    onlyT1 = not (len(dataDict['T2s']) > 0)
    if onlyT1:
        print "T1 Only processing starts ..."
    else:
        print "Multimodal processing starts ..."
    sessionWorkflow = generate_single_session_template_WF(project, subject, session, onlyT1, master_config,
                                                          phase=master_config['workflow_phase'],
                                                          interpMode=interpMode,
                                                          pipeline_name=pipeline_name,
                                                          doDenoise=(not isBlackList))
    sessionWorkflow.base_dir = master_config['cachedir']

    sessionWorkflow_inputsspec = sessionWorkflow.get_node('inputspec')
    sessionWorkflow_inputsspec.inputs.T1s = dataDict['T1s']
    sessionWorkflow_inputsspec.inputs.T2s = dataDict['T2s']
    sessionWorkflow_inputsspec.inputs.PDs = dataDict['PDs']
    sessionWorkflow_inputsspec.inputs.FLs = dataDict['FLs']
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict['OTs']
    return sessionWorkflow
def convertafni(in_file):
    from nipype.interfaces.afni.utils import AFNItoNIFTI
    from os import path
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    cvt = AFNItoNIFTI()
    cvt.inputs.in_file = in_file
    cvt.inputs.out_file = 'func_filtered.nii.gz'
    cvt.run()

    out_file = path.abspath('func_filtered.nii.gz')
    return(out_file)
Example #17
0
def relabel_fast(fast_tissue_list):
    from nipype import config, logging
    from os.path import split
    from os import rename
    config.enable_debug_mode()
    logging.update_logging(config)
    tissue_list = sorted(fast_tissue_list)
    csf = tissue_list[0]
    wm = tissue_list[2]
    [wd, csf_file] = split(csf)
    [wd, wm_file] = split(wm)
    rename(csf, wd + 'csf.nii.gz')
    rename(wm, wd + 'wm.nii.gz')
    wm_csf = [wd + 'csf.nii.gz', wd + 'wm.nii.gz']
    return(wm_csf)
Example #18
0
def determine_clusters(clusters_table, min_clust_size):
    from os import path
    from numpy import genfromtxt
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    
    clusters = genfromtxt(clusters_table, delimiter='\t', dtype=None, skip_header=1)
    clusters_to_extract = []
    
    for t in clusters:
        if clusters[t][1] >= min_clust_size:
            clusters_to_extract.append(clusters[t][0])
    
    
    return(cluster_index)
Example #19
0
def execute_task(pckld_task, node_config, updatehash):
    from socket import gethostname
    from traceback import format_exc
    from nipype import config, logging
    traceback=None
    result=None
    try:
        config.update_config(node_config)
        logging.update_logging(config)
        from cPickle import loads
        task = loads(pckld_task)
        result = task.run(updatehash=updatehash)
    except:
        traceback = format_exc()
        result = task.result
    return result, traceback, gethostname()
Example #20
0
def create_and_run_p3_workflow(imported_workflows, settings):
    """
        Create main workflow
    """

    # Set nipype debug messages if enabled
    if settings['debug']:
        config.set('logging', 'workflow_level', 'DEBUG')
        config.set('logging', 'workflow_level', 'DEBUG')
    # always hash on content
    config.set('execution', 'hash_method', 'content')
    # stop on first crash
    config.set('execution', 'stop_on_first_crash', 'true')
    logging.update_logging(config)

    # define subworkflows from imported workflows
    subworkflows = generate_subworkflows(imported_workflows, settings)

    # create a workflow
    p3 = Workflow(name='p3_pipeline', base_dir=settings['tmp_dir'])

    # get connections
    connections = generate_connections(subworkflows, settings)

    # connect nodes
    p3.connect(connections)

    # apply sideloads
    sideload_nodes(p3, connections, settings)

    # Create graph images
    p3.write_graph(os.path.join(settings['output_dir'], 'graph', 'p3'),
                   graph2use='flat',
                   simple_form=False)
    p3.write_graph(os.path.join(settings['output_dir'], 'graph', 'p3'),
                   graph2use='colored')

    # copy the grpah files to the output directory
    # copy2(os.path.join(settings['tmp_dir'],'p3_pipeline','graph.png'),settings['output_dir'])
    # copy2(os.path.join(settings['tmp_dir'],'p3_pipeline','graph_detailed.png'),settings['output_dir'])

    # Run pipeline (check multiproc setting)
    if not settings['disable_run']:
        if settings['multiproc']:
            p3.run(plugin='MultiProc')
        else:
            p3.run()
Example #21
0
def prep_logging(opts, output_folder):
    cli_file = f'{output_folder}/rabies_{opts.rabies_stage}.pkl'
    if os.path.isfile(cli_file):
        raise ValueError(f"""
            A previous run was indicated by the presence of {cli_file}.
            This can lead to inconsistencies between previous outputs and the log files.
            To prevent this, you are required to manually remove {cli_file}, and we 
            recommend also removing previous datasinks from the {opts.rabies_stage} RABIES step.
            """)

    with open(cli_file, 'wb') as handle:
        pickle.dump(opts, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # remove old versions of the log if already existing
    log_path = f'{output_folder}/rabies_{opts.rabies_stage}.log'
    if os.path.isfile(log_path):
        os.remove(log_path)

    config.update_config({'logging': {'log_directory': output_folder,
                                    'log_to_file': True}})

    # setting workflow logging level
    if opts.verbose==0:
        level="WARNING"
    elif opts.verbose==1:
        level="INFO"
    elif opts.verbose<=2:
        level="DEBUG"
        config.enable_debug_mode()
    else:
        raise ValueError(f"--verbose must be provided an integer of 0 or above. {opts.verbose} was provided instead.")

    # nipype has hard-coded 'nipype.log' filename; we rename it after it is created, and change the handlers
    logging.update_logging(config)
    os.rename(f'{output_folder}/pypeline.log', log_path)
    # change the handlers path to the desired file
    for logger in logging.loggers.keys():
        log = logging.getLogger(logger)
        handler = log.handlers[0]
        handler.baseFilename = log_path

    # set the defined level of verbose
    log = logging.getLogger('nipype.workflow')
    log.setLevel(level)
    log.debug('Debug ON')
    return log
def combine_par(par_list):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import abspath
    from numpy import vstack, savetxt, genfromtxt

    motion = genfromtxt(par_list[0], dtype=float)
    if len(par_list)>1:
        for file in par_list[1:]:
            temp = genfromtxt(par_list[0], dtype=float)
            motion=vstack((motion,temp))

    filename = 'motion.par'
    savetxt(filename, motion, delimiter=' ')
    combined_par = abspath(filename)
    return(combined_par)
def create_coreg_plot(epi,anat):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    coreg_filename='coregistration.png'
    display = plotting.plot_anat(epi, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'coregistration to anatomy')
    display.add_edges(anat)
    display.savefig(coreg_filename)
    display.close()
    coreg_file = os.path.abspath(coreg_filename)

    return(coreg_file)
Example #24
0
def brightthresh(func):
    import nibabel as nib
    from numpy import median, where

    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    func_nifti1 = nib.load(func)
    func_data = func_nifti1.get_data()
    func_data = func_data.astype(float)

    brain_values = where(func_data > 0)
    median_thresh = median(brain_values)
    bright_thresh = 0.75 * median_thresh

    return (bright_thresh)
def brightthresh(func):
    import nibabel as nib
    from numpy import median, where

    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    func_nifti1 = nib.load(func)
    func_data = func_nifti1.get_data()
    func_data = func_data.astype(float)

    brain_values = where(func_data > 0)
    median_thresh = median(brain_values)
    bright_thresh = 0.75 * median_thresh

    return(bright_thresh)
Example #26
0
def create_coreg_plot(epi, anat):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    coreg_filename = 'coregistration.png'
    display = plotting.plot_anat(epi,
                                 display_mode='ortho',
                                 draw_cross=False,
                                 title='coregistration to anatomy')
    display.add_edges(anat)
    display.savefig(coreg_filename)
    display.close()
    coreg_file = os.path.abspath(coreg_filename)

    return (coreg_file)
Example #27
0
def check_mask_coverage(epi, brainmask):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    maskcheck_filename = 'maskcheck.png'
    display = plotting.plot_anat(epi,
                                 display_mode='ortho',
                                 draw_cross=False,
                                 title='brainmask coverage')
    display.add_contours(brainmask, levels=[.5], colors='r')
    display.savefig(maskcheck_filename)
    display.close()
    maskcheck_file = os.path.abspath(maskcheck_filename)

    return (maskcheck_file)
def combine_fd(fd_list):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import abspath
    from numpy import asarray, savetxt

    motion = open(fd_list[0]).read().splitlines()

    if len(fd_list)>1:
        for file in fd_list[1:]:
            temp = open(file).read().splitlines()
            motion = motion+temp

    motion = asarray(motion).astype(float)
    filename = 'FD_full.txt'
    savetxt(filename,motion)
    combined_fd = abspath(filename)
    return(combined_fd)
Example #29
0
def bandpass_filter(in_file, lowpass, highpass, TR):
    import numpy as np
    import nibabel as nb
    from os import path
    from nipype.interfaces.afni.preprocess import Bandpass
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    out_file = 'func_filtered.nii'
    bp = Bandpass()
    bp.inputs.highpass = highpass
    bp.inputs.lowpass = lowpass
    bp.inputs.in_file = in_file
    bp.inputs.tr = TR
    bp.inputs.out_file = out_file
    bp.inputs.outputtype = 'NIFTI'
    bp.run()

    out_file = path.abspath(out_file)
    return (out_file)
def summarize_motion(motion_df_file, motion_file, vols_to_censor, TR):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import dirname, basename
    from numpy import asarray, mean, insert, zeros, sort
    from pandas import DataFrame, Series, read_csv

    motion_df = read_csv(motion_df_file, index_col=0)

    motion = asarray(open(motion_file).read().splitlines()).astype(float)
    censvols = asarray(open(vols_to_censor).read().splitlines()).astype(int)
    sec_not_censored = (len(motion)-len(censvols))*TR

    if censvols[0]>0:
        periods_not_censored = insert(censvols,0,0)
    else:
        periods_not_censored = censvols

    if periods_not_censored[-1]<len(motion):
        periods_not_censored = insert(periods_not_censored,len(periods_not_censored),len(motion))

    lengths = zeros(len(periods_not_censored)-1)
    for a in range(0,len(lengths)):
        lengths[a] = periods_not_censored[a+1] - periods_not_censored[a] - 1

    lengths = lengths*TR

    # sort lengths in descending order
    lengths = sort(lengths)[::-1]

    fp = dirname(motion_file)
    subject = basename(fp)

    motion_df.loc[subject] = [mean(motion),max(motion),len(censvols),len(motion),sec_not_censored,lengths]
    motion_df.to_csv(motion_df_file)

    return()
Example #31
0
def main():
    """Entry point"""
    from nipype import config, logging
    from regseg.workflows.phantoms import phantoms_wf

    options = get_parser().parse_args()

    # Setup multiprocessing
    nthreads = options.nthreads
    if nthreads == 0:
        from multiprocessing import cpu_count
        nthreads = cpu_count()

    cfg = {}
    cfg['plugin'] = 'Linear'
    if nthreads > 1:
        cfg['plugin'] = 'MultiProc'
        cfg['plugin_args'] = {'n_proc': nthreads}

    # Setup work_dir
    if not op.exists(options.work_dir):
        os.makedirs(options.work_dir)

    # Setup logging dir
    log_dir = op.abspath('logs')
    cfg['logging'] = {'log_directory': log_dir, 'log_to_file': True}
    if not op.exists(log_dir):
        os.makedirs(log_dir)

    config.update_config(cfg)
    logging.update_logging(config)

    wf = phantoms_wf(options, cfg)
    wf.base_dir = options.work_dir
    wf.write_graph(graph2use='hierarchical', format='pdf', simple_form=True)
    wf.run()
def create_workflow(wf_base_dir, input_anat, oasis_path):
    '''
    Method to create the nipype workflow that is executed for
    preprocessing the data

    Parameters
    ----------
    wf_base_dir : string
        filepath to the base directory to run the workflow
    input_anat : string
        filepath to the input file to run antsCorticalThickness.sh on
    oasis_path : string
        filepath to the oasis

    Returns
    -------
    wf : nipype.pipeline.engine.Workflow instance
        the workflow to be ran for preprocessing
    '''

    # Import packages
    from act_interface import antsCorticalThickness
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype.interfaces.utility import Function
    from nipype import logging as np_logging
    from nipype import config
    import os

    # Init variables
    oasis_trt_20 = os.path.join(oasis_path,
                                'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30.nii')

    # Setup nipype workflow
    if not os.path.exists(wf_base_dir):
        os.makedirs(wf_base_dir)
    wf = pe.Workflow(name='thickness_workflow')
    wf.base_dir = wf_base_dir

    # Init log directory
    log_dir = wf_base_dir

    # Define antsCorticalThickness node
    thickness = pe.Node(antsCorticalThickness(), name='thickness')

    # Set antsCorticalThickness inputs
    thickness.inputs.dimension = 3
    thickness.inputs.segmentation_iterations = 1
    thickness.inputs.segmentation_weight = 0.25
    thickness.inputs.input_skull = input_anat #-a
    thickness.inputs.template = oasis_path + 'T_template0.nii.gz' #-e
    thickness.inputs.brain_prob_mask = oasis_path + \
                                       'T_template0_BrainCerebellumProbabilityMask.nii.gz'  #-m
    thickness.inputs.brain_seg_priors = oasis_path + \
                                        'Priors2/priors%d.nii.gz'  #-p
    thickness.inputs.intensity_template = oasis_path + \
                                          'T_template0_BrainCerebellum.nii.gz'  #-t
    thickness.inputs.extraction_registration_mask = oasis_path + \
                                                    'T_template0_BrainCerebellumExtractionMask.nii.gz'  #-f
    thickness.inputs.out_prefix = 'OUTPUT_' #-o
    thickness.inputs.keep_intermediate_files = 0 #-k

    # Node to run ANTs 3dROIStats
    ROIstats = pe.Node(util.Function(input_names=['mask','thickness_normd'], 
                                     output_names=['roi_stats_file'], 
                                     function=roi_func),
                       name='ROIstats')
    wf.connect(thickness, 'cortical_thickness_normalized', 
               ROIstats, 'thickness_normd')
    ROIstats.inputs.mask = oasis_trt_20

    # Create datasink node
    datasink = pe.Node(nio.DataSink(), name='sinker')
    datasink.inputs.base_directory = wf_base_dir

    # Connect thickness outputs to datasink
    wf.connect(thickness, 'brain_extraction_mask', 
               datasink, 'output.@brain_extr_mask')
    wf.connect(thickness, 'brain_segmentation', 
               datasink, 'output.@brain_seg')
    wf.connect(thickness, 'brain_segmentation_N4', 
               datasink, 'output.@brain_seg_N4')
    wf.connect(thickness, 'brain_segmentation_posteriors_1', 
               datasink, 'output.@brain_seg_post_1')
    wf.connect(thickness, 'brain_segmentation_posteriors_2', 
               datasink, 'output.@brain_seg_post_2')
    wf.connect(thickness, 'brain_segmentation_posteriors_3', 
               datasink, 'output.@brain_seg_post_3')
    wf.connect(thickness, 'brain_segmentation_posteriors_4', 
               datasink, 'output.@brain_seg_post_4')
    wf.connect(thickness, 'brain_segmentation_posteriors_5', 
               datasink, 'output.@brain_seg_post_5')
    wf.connect(thickness, 'brain_segmentation_posteriors_6', 
               datasink, 'output.@brain_seg_post_6')
    wf.connect(thickness, 'cortical_thickness', 
               datasink, 'output.@cortical_thickness')
    wf.connect(thickness, 'cortical_thickness_normalized', 
               datasink,'output.@cortical_thickness_normalized')
    # Connect ROI stats output text file to datasink
    wf.connect(ROIstats, 'roi_stats_file', datasink, 'output.@ROIstats')

    # Setup crashfile directory and logging
    wf.config['execution'] = {'hash_method': 'timestamp', 
                              'crashdump_dir': '/home/ubuntu/crashes'}
    config.update_config({'logging': {'log_directory': log_dir, 
                                      'log_to_file': True}})
    np_logging.update_logging(config)

    # Return the workflow
    return wf
Example #33
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    if session is not None:
        session = int(session)
        if session < 10:
            session = '0' + str(session)
        else:
            session = str(session)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file

    # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files
    if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')):
        os.makedirs(os.path.join(output_interm_dir, subId, 'logs'))
    log_dir = os.path.join(output_interm_dir, subId, 'logs')
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': write_logs
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass

    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length)
    # normalization = Node(Registration(), name='normalization')
    # normalization.inputs.float = False
    # normalization.inputs.collapse_output_transforms = True
    # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    # normalization.inputs.convergence_window_size = [10]
    # normalization.inputs.dimension = 3
    # normalization.inputs.fixed_image = MNItemplate
    # normalization.inputs.initial_moving_transform_com = True
    # normalization.inputs.metric = ['MI', 'MI', 'CC']
    # normalization.inputs.metric_weight = [1.0]*3
    # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
    #                                              [1000, 500, 250, 100],
    #                                              [100, 70, 50, 20]]
    # normalization.inputs.num_threads = ants_threads
    # normalization.inputs.output_transform_prefix = 'anat2template'
    # normalization.inputs.output_inverse_warped_image = True
    # normalization.inputs.output_warped_image = True
    # normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    # normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    # normalization.inputs.sampling_strategy = ['Regular',
    #                                           'Regular',
    #                                           'None']
    # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3
    # normalization.inputs.sigma_units = ['vox']*3
    # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    # normalization.inputs.transform_parameters = [(0.1,),
    #                                              (0.1,),
    #                                              (0.1, 3.0, 0.0)]
    # normalization.inputs.use_histogram_matching = True
    # normalization.inputs.winsorize_lower_quantile = 0.005
    # normalization.inputs.winsorize_upper_quantile = 0.995
    # normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    if session:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + [
            'ses-' + session
        ] + ['func']
    else:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    # If we have sessions provide the full path to the subject's intermediate directory
    # and only rely on workflow init to create the session container *within* that directory
    # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory
    if session:
        workflow = Workflow(name='ses_' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    # realignment parms -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads != 8:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Example #34
0
def run_workflow(csv_file, use_pbs, contrasts_name, template):
    workflow = pe.Workflow(name='run_level1flow')
    workflow.base_dir = os.path.abspath('./workingdirs')

    from nipype import config, logging
    config.update_config({
        'logging': {
            'log_directory': os.path.join(workflow.base_dir, 'logs'),
            'log_to_file': True,
            'workflow_level': 'DEBUG',
            'interface_level': 'DEBUG',
        }
    })
    logging.update_logging(config)

    config.enable_debug_mode()

    # redundant with enable_debug_mode() ...
    workflow.stop_on_first_crash = True
    workflow.remove_unnecessary_outputs = False
    workflow.keep_inputs = True
    workflow.hash_method = 'content'
    """
    Setup the contrast structure that needs to be evaluated. This is a list of
    lists. The inner list specifies the contrasts and has the following format:
    [Name,Stat,[list of condition names],[weights on those conditions]. The
    condition names must match the `names` listed in the `evt_info` function
    described above.
    """

    try:
        import importlib
        mod = importlib.import_module('contrasts.' + contrasts_name)
        contrasts = mod.contrasts
        # event_names = mod.event_names
    except ImportError:
        raise RuntimeError('Unknown contrasts: %s. Must exist as a Python'
                           ' module in contrasts directory!' % contrasts_name)

    modelfit = create_workflow(contrasts)

    import bids_templates as bt

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'subject_id',
        'session_id',
        'run_id',
    ]),
                        name='input')

    assert csv_file is not None, "--csv argument must be defined!"

    reader = niu.CSVReader()
    reader.inputs.header = True
    reader.inputs.in_file = csv_file
    out = reader.run()
    subject_list = out.outputs.subject
    session_list = out.outputs.session
    run_list = out.outputs.run

    inputnode.iterables = [
        ('subject_id', subject_list),
        ('session_id', session_list),
        ('run_id', run_list),
    ]
    inputnode.synchronize = True

    templates = {
        'funcs':
        'derivatives/featpreproc/highpassed_files/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc_*.nii.gz',

        # 'funcmasks':
        # 'featpreproc/func_unwarp/sub-{subject_id}/ses-{session_id}/func/'
        #     'sub-{subject_id}_ses-{session_id}_*_run-{run_id}*_bold_res-1x1x1_preproc'
        #     '_mc_unwarped.nii.gz',
        'highpass':
        '******'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_*.nii.gz',
        'motion_parameters':
        'derivatives/featpreproc/motion_corrected/sub-{subject_id}/ses-{session_id}/func/'
        'sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc.param.1D',
        'motion_outlier_files':
        'derivatives/featpreproc/motion_outliers/sub-{subject_id}/ses-{session_id}/func/'
        'art.sub-{subject_id}_ses-{session_id}_*_run-{run_id}_bold_res-1x1x1_preproc_mc'
        '_maths_outliers.txt',
        'event_log':
        'sub-{subject_id}/ses-{session_id}/func/'
        # 'sub-{subject_id}_ses-{session_id}*_bold_res-1x1x1_preproc'
        'sub-{subject_id}_ses-{session_id}*run-{run_id}*'
        # '.nii.gz',
        '_events.tsv',
        'ref_func':
        'derivatives/featpreproc/reference/func/*.nii.gz',
        'ref_funcmask':
        'derivatives/featpreproc/reference/func_mask/*.nii.gz',
    }

    inputfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                         name='in_files')

    workflow.connect([
        (inputnode, inputfiles, [
            ('subject_id', 'subject_id'),
            ('session_id', 'session_id'),
            ('run_id', 'run_id'),
        ]),
    ])

    join_input = pe.JoinNode(
        niu.IdentityInterface(fields=[
            # 'subject_id',
            # 'session_id',
            # 'run_id',
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
            'ref_func',
            'ref_funcmask',
        ]),
        joinsource='input',
        joinfield=[
            'funcs',
            'highpass',
            'motion_parameters',
            'motion_outlier_files',
            'event_log',
        ],
        # unique=True,
        name='join_input')

    workflow.connect([
        (inputfiles, join_input, [
            ('funcs', 'funcs'),
            ('highpass', 'highpass'),
            ('motion_parameters', 'motion_parameters'),
            ('motion_outlier_files', 'motion_outlier_files'),
            ('event_log', 'event_log'),
            ('ref_func', 'ref_func'),
            ('ref_funcmask', 'ref_funcmask'),
        ]),
        (join_input, modelfit, [
            ('funcs', 'inputspec.funcs'),
            ('highpass', 'inputspec.highpass'),
            ('motion_parameters', 'inputspec.motion_parameters'),
            ('motion_outlier_files', 'inputspec.motion_outlier_files'),
            ('event_log', 'inputspec.event_log'),
            ('ref_func', 'inputspec.ref_func'),
            ('ref_funcmask', 'inputspec.ref_funcmask'),
        ]),
    ])

    modelfit.inputs.inputspec.fwhm = 2.0
    modelfit.inputs.inputspec.highpass = 50
    modelfit.write_graph(simple_form=True)
    modelfit.write_graph(graph2use='orig', format='png', simple_form=True)
    # modelfit.write_graph(graph2use='detailed', format='png', simple_form=False)

    workflow.stop_on_first_crash = True
    workflow.keep_inputs = True
    workflow.remove_unnecessary_outputs = False
    workflow.write_graph(simple_form=True)
    workflow.write_graph(graph2use='colored', format='png', simple_form=True)
    # workflow.write_graph(graph2use='detailed', format='png', simple_form=False)
    if use_pbs:
        workflow.run(plugin='PBS',
                     plugin_args={'template': os.path.expanduser(template)})
    else:
        workflow.run()
def build_functional_temporal_workflow(resource_pool, config, subject_info, \
                                           run_name, site_name=None):
    
    # build pipeline for each subject, individually

    # ~ 5 min 45 sec per subject
    # (roughly 345 seconds)

    import os
    import sys

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl.maths as fsl
    
    import glob
    import yaml

    import time
    from time import strftime
    from nipype import config as nyconfig
    from nipype import logging


    logger = logging.getLogger('workflow')


    sub_id = str(subject_info[0])

    if subject_info[1]:
        session_id = str(subject_info[1])
    else:
        session_id = "session_0"

    if subject_info[2]:
        scan_id = str(subject_info[2])
    else:
        scan_id = "scan_0"


    # define and create the output directory
    output_dir = os.path.join(config["output_directory"], run_name, \
                              sub_id, session_id, scan_id)

    try:
        os.makedirs(output_dir)
    except:
        if not os.path.isdir(output_dir):
            err = "[!] Output directory unable to be created.\n" \
                  "Path: %s\n\n" % output_dir
            raise Exception(err)
        else:
            pass


    log_dir = output_dir
   
    # set up logging
    nyconfig.update_config({'logging': {'log_directory': log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    # take date+time stamp for run identification purposes
    unique_pipeline_id = strftime("%Y%m%d%H%M%S")
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    
    pipeline_start_time = time.time()


    logger.info(pipeline_start_stamp)

    logger.info("Contents of resource pool:\n" + str(resource_pool))

    logger.info("Configuration settings:\n" + str(config))


        
    # for QAP spreadsheet generation only
    config["subject_id"] = sub_id

    config["session_id"] = session_id

    config["scan_id"] = scan_id
    
    config["run_name"] = run_name


    if site_name:
        config["site_name"] = site_name
    
    

    workflow = pe.Workflow(name=scan_id)

    workflow.base_dir = os.path.join(config["working_directory"], sub_id, \
                            session_id)
                            
    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}
    
    
    # update that resource pool with what's already in the output directory
    for resource in os.listdir(output_dir):
    
        if os.path.isdir(os.path.join(output_dir,resource)) and resource not in resource_pool.keys():
        
            resource_pool[resource] = glob.glob(os.path.join(output_dir, \
                                          resource, "*"))[0]
                 

    # resource pool check
    invalid_paths = []
    
    for resource in resource_pool.keys():
    
        if not os.path.isfile(resource_pool[resource]):
        
            invalid_paths.append((resource, resource_pool[resource]))
            
            
    if len(invalid_paths) > 0:
        
        err = "\n\n[!] The paths provided in the subject list to the " \
              "following resources are not valid:\n"
        
        for path_tuple in invalid_paths:
        
            err = err + path_tuple[0] + ": " + path_tuple[1] + "\n"
                  
        err = err + "\n\n"
        
        raise Exception(err)
                  
    
    
    # start connecting the pipeline
       
    if "qap_functional_temporal" not in resource_pool.keys():

        from qap.qap_workflows import qap_functional_temporal_workflow

        workflow, resource_pool = \
            qap_functional_temporal_workflow(workflow, resource_pool, config)

    

    # set up the datasinks
    new_outputs = 0
    
    if "write_all_outputs" not in config.keys():
        config["write_all_outputs"] = False

    if config["write_all_outputs"] == True:

        for output in resource_pool.keys():
    
            # we use a check for len()==2 here to select those items in the
            # resource pool which are tuples of (node, node_output), instead
            # of the items which are straight paths to files

            # resource pool items which are in the tuple format are the
            # outputs that have been created in this workflow because they
            # were not present in the subject list YML (the starting resource 
            # pool) and had to be generated

            if len(resource_pool[output]) == 2:
    
                ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
                ds.inputs.base_directory = output_dir
    
                node, out_file = resource_pool[output]

                workflow.connect(node, out_file, ds, output)
            
                new_outputs += 1

    else:

        # write out only the output CSV (default)

        output = "qap_functional_temporal"

        if len(resource_pool[output]) == 2:

            ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
            ds.inputs.base_directory = output_dir
    
            node, out_file = resource_pool[output]

            workflow.connect(node, out_file, ds, output)
            
            new_outputs += 1
         
    

    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
    
        workflow.write_graph(dotfilename=os.path.join(output_dir, run_name + \
                                                          ".dot"), \
                                                          simple_form=False)

        workflow.run(plugin='MultiProc', plugin_args= \
                         {'n_procs': config["num_cores_per_subject"]})

    else:

        print "\nEverything is already done for subject %s." % sub_id


    # Remove working directory when done
    if config["write_all_outputs"] == False:
        try:
            work_dir = os.path.join(workflow.base_dir, scan_id)

            if os.path.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            print "Couldn\'t remove the working directory!"
            pass


    pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    
    pipeline_end_time = time.time()

    logger.info("Elapsed time (minutes) since last start: %s" \
                % ((pipeline_end_time - pipeline_start_time)/60))

    logger.info("Pipeline end time: %s" % pipeline_end_stamp)



    return workflow
Example #36
0
    def process(self):
        # Process time
        self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        if '_' in self.subject:
            self.subject = self.subject.split('_')[0]

        old_subject = self.subject

        if self.global_conf.subject_session == '':
            deriv_subject_directory = os.path.join(self.output_directory,
                                                   "cmp", self.subject)
        else:
            deriv_subject_directory = os.path.join(
                self.output_directory, "cmp", self.subject,
                self.global_conf.subject_session)

            self.subject = "_".join(
                (self.subject, self.global_conf.subject_session))

        # Initialization
        if os.path.isfile(
                os.path.join(
                    deriv_subject_directory, "anat",
                    "{}_log-multiscalbrainparcellator.txt".format(
                        self.subject))):
            os.unlink(
                os.path.join(
                    deriv_subject_directory, "anat",
                    "{}_log-multiscalbrainparcellator.txt".format(
                        self.subject)))

        config.update_config({
            'logging': {
                'log_directory': os.path.join(deriv_subject_directory, "anat"),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'crashfile_format': "txt"
            }
        })
        logging.update_logging(config)
        iflogger = logging.getLogger('nipype.interface')

        iflogger.info("**** Processing ****")
        anat_flow = self.create_pipeline_flow(
            deriv_subject_directory=deriv_subject_directory)
        anat_flow.write_graph(graph2use='colored',
                              format='svg',
                              simple_form=True)

        if (self.number_of_cores != 1):
            print("Number of cores used: {}".format(self.number_of_cores))
            #print(os.environ)
            anat_flow.run(plugin='MultiProc',
                          plugin_args={'n_procs': self.number_of_cores})
        else:
            print("Number of cores used: {}".format(self.number_of_cores))
            #print(os.environ)
            anat_flow.run()

        # Clean undesired folders/files
        # rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        # for file_to_rm in rm_file_list:
        #     if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
        #         os.remove(os.path.join(self.base_directory,file_to_rm))

        # copy .ini and log file
        outdir = deriv_subject_directory
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        try:
            src = os.path.join(deriv_subject_directory, "anat", "pypeline.log")
            dest = os.path.join(
                deriv_subject_directory, "anat",
                "{}_log-multiscalbrainparcellator.txt".format(self.subject))
            shutil.move(src, dest)
        except:
            print("Skipped renaming of log file")

        try:
            shutil.copy(self.config_file, outdir)
        except shutil.Error:
            print("Skipped copy of config file")

        #shutil.copy(os.path.join(self.output_directory,"cmp",self.subject,'pypeline.log'),outdir)

        iflogger.info("**** Processing finished ****")

        return True, 'Processing successful'
Example #37
0
    def process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        # Initialization
        if os.path.exists(os.path.join(self.base_directory, "LOG", "pypeline.log")):
            os.unlink(os.path.join(self.base_directory, "LOG", "pypeline.log"))
        config.update_config(
            {
                "logging": {"log_directory": os.path.join(self.base_directory, "LOG"), "log_to_file": True},
                "execution": {"remove_unnecessary_outputs": False},
            }
        )
        logging.update_logging(config)
        iflogger = logging.getLogger("interface")

        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(outfields=["diffusion", "T1", "T2"]), name="datasource")
        datasource.inputs.base_directory = os.path.join(self.base_directory, "NIFTI")
        datasource.inputs.template = "*"
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(
            diffusion=self.global_conf.imaging_model + ".nii.gz", T1="T1.nii.gz", T2="T2.nii.gz"
        )
        datasource.inputs.sort_filelist = False

        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="diffusion_sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory, "RESULTS")

        # Clear previous outputs
        self.clear_stages_outputs()

        # Create common_flow
        common_flow = self.create_common_flow()

        # Create diffusion flow

        diffusion_flow = pe.Workflow(name="diffusion_pipeline")
        diffusion_inputnode = pe.Node(
            interface=util.IdentityInterface(
                fields=[
                    "diffusion",
                    "T1",
                    "T2",
                    "wm_mask_file",
                    "roi_volumes",
                    "subjects_dir",
                    "subject_id",
                    "atlas_info",
                    "parcellation_scheme",
                ]
            ),
            name="inputnode",
        )
        diffusion_outputnode = pe.Node(
            interface=util.IdentityInterface(fields=["connectivity_matrices"]), name="outputnode"
        )
        diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])

        if self.stages["Preprocessing"].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            diffusion_flow.connect([(diffusion_inputnode, preproc_flow, [("diffusion", "inputnode.diffusion")])])

        if self.stages["Registration"].enabled:
            reg_flow = self.create_stage_flow("Registration")
            diffusion_flow.connect(
                [
                    (
                        diffusion_inputnode,
                        reg_flow,
                        [
                            ("T1", "inputnode.T1"),
                            ("T2", "inputnode.T2"),
                            ("wm_mask_file", "inputnode.wm_mask"),
                            ("roi_volumes", "inputnode.roi_volumes"),
                        ],
                    ),
                    (preproc_flow, reg_flow, [("outputnode.diffusion_preproc", "inputnode.target")]),
                ]
            )
            if self.stages["Registration"].config.registration_mode == "BBregister (FS)":
                diffusion_flow.connect(
                    [
                        (
                            diffusion_inputnode,
                            reg_flow,
                            [("subjects_dir", "inputnode.subjects_dir"), ("subject_id", "inputnode.subject_id")],
                        )
                    ]
                )

        if self.stages["Diffusion"].enabled:
            diff_flow = self.create_stage_flow("Diffusion")
            diffusion_flow.connect(
                [
                    (preproc_flow, diff_flow, [("outputnode.diffusion_preproc", "inputnode.diffusion")]),
                    (reg_flow, diff_flow, [("outputnode.wm_mask_registered", "inputnode.wm_mask_registered")]),
                    (reg_flow, diff_flow, [("outputnode.roi_volumes_registered", "inputnode.roi_volumes")]),
                ]
            )

        if self.stages["Connectome"].enabled:
            if self.stages["Diffusion"].config.processing_tool == "FSL":
                self.stages["Connectome"].config.probtrackx = True
            else:
                self.stages["Connectome"].config.probtrackx = False
            con_flow = self.create_stage_flow("Connectome")
            diffusion_flow.connect(
                [
                    (diffusion_inputnode, con_flow, [("parcellation_scheme", "inputnode.parcellation_scheme")]),
                    (
                        diff_flow,
                        con_flow,
                        [
                            ("outputnode.track_file", "inputnode.track_file"),
                            ("outputnode.gFA", "inputnode.gFA"),
                            ("outputnode.roi_volumes", "inputnode.roi_volumes_registered"),
                            ("outputnode.skewness", "inputnode.skewness"),
                            ("outputnode.kurtosis", "inputnode.kurtosis"),
                            ("outputnode.P0", "inputnode.P0"),
                        ],
                    ),
                    (con_flow, diffusion_outputnode, [("outputnode.connectivity_matrices", "connectivity_matrices")]),
                ]
            )

            if self.stages["Parcellation"].config.parcellation_scheme == "Custom":
                diffusion_flow.connect([(diffusion_inputnode, con_flow, [("atlas_info", "inputnode.atlas_info")])])

        # Create NIPYPE flow

        flow = pe.Workflow(name="NIPYPE", base_dir=os.path.join(self.base_directory))

        flow.connect(
            [
                (datasource, common_flow, [("T1", "inputnode.T1")]),
                (
                    datasource,
                    diffusion_flow,
                    [("diffusion", "inputnode.diffusion"), ("T1", "inputnode.T1"), ("T2", "inputnode.T2")],
                ),
                (
                    common_flow,
                    diffusion_flow,
                    [
                        ("outputnode.subjects_dir", "inputnode.subjects_dir"),
                        ("outputnode.subject_id", "inputnode.subject_id"),
                        ("outputnode.wm_mask_file", "inputnode.wm_mask_file"),
                        ("outputnode.roi_volumes", "inputnode.roi_volumes"),
                        ("outputnode.parcellation_scheme", "inputnode.parcellation_scheme"),
                        ("outputnode.atlas_info", "inputnode.atlas_info"),
                    ],
                ),
                (
                    diffusion_flow,
                    sinker,
                    [
                        (
                            "outputnode.connectivity_matrices",
                            "%s.%s.connectivity_matrices" % (self.global_conf.imaging_model, now),
                        )
                    ],
                ),
            ]
        )

        # Process pipeline

        iflogger.info("**** Processing ****")

        if self.number_of_cores != 1:
            flow.run(plugin="MultiProc", plugin_args={"n_procs": self.number_of_cores})
        else:
            flow.run()

        self.fill_stages_outputs()

        # Clean undesired folders/files
        rm_file_list = ["rh.EC_average", "lh.EC_average", "fsaverage"]
        for file_to_rm in rm_file_list:
            if os.path.exists(os.path.join(self.base_directory, file_to_rm)):
                os.remove(os.path.join(self.base_directory, file_to_rm))

        # copy .ini and log file
        outdir = os.path.join(self.base_directory, "RESULTS", self.global_conf.imaging_model, now)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file, outdir)
        shutil.copy(os.path.join(self.base_directory, "LOG", "pypeline.log"), outdir)

        iflogger.info("**** Processing finished ****")

        return True, "Processing sucessful"
def run_workflow(args, run=True):
    """Connect and execute the QAP Nipype workflow for one bundle of data.

    - This function will update the resource pool with what is found in the
      output directory (if it already exists). If the final expected output
      of the pipeline is already found, the pipeline will not run and it
      will move onto the next bundle. If the final expected output is not
      present, the pipeline begins to build itself backwards.

    :type args: tuple
    :param args: A 7-element tuple of information comprising of the bundle's
                 resource pool, a list of participant info, the configuration
                 options, the pipeline ID run name and miscellaneous run args.
    :rtype: dictionary
    :return: A dictionary with information about the workflow run, its status,
             and results.
    """

    import os
    import os.path as op

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu

    import qap
    from qap_utils import read_json

    import glob

    import time
    from time import strftime
    from nipype import config as nyconfig

    # unpack args
    resource_pool_dict, sub_info_list, config, run_name, runargs, \
        bundle_idx, num_bundles = args

    # Read and apply general settings in config
    keep_outputs = config.get('write_all_outputs', False)

    # take date+time stamp for run identification purposes
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    pipeline_start_time = time.time()

    if "workflow_log_dir" not in config.keys():
        config["workflow_log_dir"] = config["output_directory"]

    bundle_log_dir = op.join(config["workflow_log_dir"],
                             '_'.join(["bundle", str(bundle_idx)]))

    try:
        os.makedirs(bundle_log_dir)
    except:
        if not op.isdir(bundle_log_dir):
            err = "[!] Bundle log directory unable to be created.\n" \
                    "Path: %s\n\n" % bundle_log_dir
            raise Exception(err)
        else:
            pass

    # set up logging
    nyconfig.update_config(
        {'logging': {'log_directory': bundle_log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    logger.info("QAP version %s" % qap.__version__)
    logger.info("Pipeline start time: %s" % pipeline_start_stamp)

    workflow = pe.Workflow(name=run_name)
    workflow.base_dir = op.join(config["working_directory"])

    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}

    # create the one node all participants will start from
    starter_node = pe.Node(niu.Function(input_names=['starter'], 
                                        output_names=['starter'], 
                                        function=starter_node_func),
                           name='starter_node')

    # set a dummy variable
    starter_node.inputs.starter = ""

    new_outputs = 0

    # iterate over each subject in the bundle
    logger.info("Starting bundle %s out of %s.." % (str(bundle_idx),
                                                    str(num_bundles)))
    # results dict
    rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}

    for sub_info in sub_info_list:

        resource_pool = resource_pool_dict[sub_info]

        # in case we're dealing with string entries in the data dict
        try:
            resource_pool.keys()
        except AttributeError:
            continue

        # resource pool check
        invalid_paths = []

        for resource in resource_pool.keys():
            try:
                if not op.isfile(resource_pool[resource]) and resource != "site_name":
                    invalid_paths.append((resource, resource_pool[resource]))
            except:
                err = "\n\n[!]"
                raise Exception(err)

        if len(invalid_paths) > 0:
            err = "\n\n[!] The paths provided in the subject list to the " \
                  "following resources are not valid:\n"

            for path_tuple in invalid_paths:
                err = "%s%s: %s\n" % (err, path_tuple[0], path_tuple[1])

            err = "%s\n\n" % err
            raise Exception(err)

        # process subject info
        sub_id = str(sub_info[0])
        # for nipype
        if "-" in sub_id:
            sub_id = sub_id.replace("-","_")
        if "." in sub_id:
            sub_id = sub_id.replace(".","_")

        if sub_info[1]:
            session_id = str(sub_info[1])
            # for nipype
            if "-" in session_id:
                session_id = session_id.replace("-","_")
            if "." in session_id:
                session_id = session_id.replace(".","_")
        else:
            session_id = "session_0"

        if sub_info[2]:
            scan_id = str(sub_info[2])
            # for nipype
            if "-" in scan_id:
                scan_id = scan_id.replace("-","_")
            if "." in scan_id:
                scan_id = scan_id.replace(".","_")
        else:
            scan_id = "scan_0"

        name = "_".join(["", sub_id, session_id, scan_id])

        rt[name] = {'id': sub_id, 'session': session_id, 'scan': scan_id,
                    'resource_pool': str(resource_pool)}

        logger.info("Participant info: %s" % name)

        # set output directory
        output_dir = op.join(config["output_directory"], run_name,
                             sub_id, session_id, scan_id)

        try:
            os.makedirs(output_dir)
        except:
            if not op.isdir(output_dir):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % output_dir
                raise Exception(err)
            else:
                pass

        # for QAP spreadsheet generation only
        config.update({"subject_id": sub_id, "session_id": session_id,
                       "scan_id": scan_id, "run_name": run_name})

        if "site_name" in resource_pool:
            config.update({"site_name": resource_pool["site_name"]})

        logger.info("Configuration settings:\n%s" % str(config))

        qap_types = ["anatomical_spatial", 
                     "functional_spatial", 
                     "functional_temporal"]

        # update that resource pool with what's already in the output
        # directory
        for resource in os.listdir(output_dir):
            if (op.exists(op.join(output_dir, resource)) and
                    resource not in resource_pool.keys()):
                try:
                    resource_pool[resource] = \
                        glob.glob(op.join(output_dir, resource, "*"))[0]
                except IndexError:
                    if ".json" in resource:
                        # load relevant json info into resource pool
                        json_file = op.join(output_dir, resource)
                        json_dict = read_json(json_file)
                        sub_json_dict = json_dict["%s %s %s" % (sub_id,
                                                                session_id,
                                                                scan_id)]

                        if "anatomical_header_info" in sub_json_dict.keys():
                            resource_pool["anatomical_header_info"] = \
                                sub_json_dict["anatomical_header_info"]

                        if "functional_header_info" in sub_json_dict.keys():
                            resource_pool["functional_header_info"] = \
                                sub_json_dict["functional_header_info"]

                        for qap_type in qap_types:
                            if qap_type in sub_json_dict.keys():
                                resource_pool["_".join(["qap",qap_type])] = \
                                    sub_json_dict[qap_type]
                except:
                    # a stray file in the sub-sess-scan output directory
                    pass

        # create starter node which links all of the parallel workflows within
        # the bundle together as a Nipype pipeline
        resource_pool["starter"] = (starter_node, 'starter')

        # individual workflow and logger setup
        logger.info("Contents of resource pool for this participant:\n%s"
                    % str(resource_pool))

        # start connecting the pipeline
        qw = None
        for qap_type in qap_types:
            if "_".join(["qap", qap_type]) not in resource_pool.keys():
                if qw is None:
                    from qap import qap_workflows as qw
                wf_builder = \
                    getattr(qw, "_".join(["qap", qap_type, "workflow"]))
                workflow, resource_pool = wf_builder(workflow, resource_pool,
                                                     config, name)

        if ("anatomical_scan" in resource_pool.keys()) and \
            ("anatomical_header_info" not in resource_pool.keys()):
            if qw is None:
                from qap import qap_workflows as qw
            workflow, resource_pool = \
                qw.qap_gather_header_info(workflow, resource_pool, config,
                    name, "anatomical")

        if ("functional_scan" in resource_pool.keys()) and \
            ("functional_header_info" not in resource_pool.keys()):
            if qw is None:
                from qap import qap_workflows as qw
            workflow, resource_pool = \
                qw.qap_gather_header_info(workflow, resource_pool, config,
                    name, "functional")

        # set up the datasinks
        out_list = []
        for output in resource_pool.keys():
            for qap_type in qap_types:
                if qap_type in output:
                    out_list.append("_".join(["qap", qap_type]))

        # write_all_outputs (writes everything to the output directory, not
        # just the final JSON files)
        if keep_outputs:
            out_list = resource_pool.keys()
        logger.info("Outputs we're keeping: %s" % str(out_list))
        logger.info('Resource pool keys after workflow connection: '
                    '{}'.format(str(resource_pool.keys())))

        # Save reports to out_dir if necessary
        if config.get('write_report', False):

            if ("qap_mosaic" in resource_pool.keys()) and  \
                    ("qap_mosaic" not in out_list):
                out_list += ['qap_mosaic']

            # The functional temporal also has an FD plot
            if 'qap_functional_temporal' in resource_pool.keys():
                if ("qap_fd" in resource_pool.keys()) and \
                        ("qap_fd" not in out_list):
                    out_list += ['qap_fd']

        for output in out_list:
            # we use a check for len()==2 here to select those items in the
            # resource pool which are tuples of (node, node_output), instead
            # of the items which are straight paths to files

            # resource pool items which are in the tuple format are the
            # outputs that have been created in this workflow because they
            # were not present in the subject list YML (the starting resource
            # pool) and had to be generated
            if (len(resource_pool[output]) == 2) and (output != "starter"):
                ds = pe.Node(nio.DataSink(), name='datasink_%s%s'
                                                  % (output,name))
                ds.inputs.base_directory = output_dir
                node, out_file = resource_pool[output]
                workflow.connect(node, out_file, ds, output)
                new_outputs += 1
            elif ".json" in resource_pool[output]:
                new_outputs += 1

    logger.info("New outputs: %s" % str(new_outputs))

    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
        if config.get('write_graph', False):
            workflow.write_graph(
                dotfilename=op.join(config["output_directory"],
                                    "".join([run_name, ".dot"])),
                simple_form=False)
            workflow.write_graph(
                graph2use="orig",
                dotfilename=op.join(config["output_directory"],
                                    "".join([run_name, ".dot"])),
                simple_form=False)
            workflow.write_graph(
                graph2use="hierarchical",
                dotfilename=op.join(config["output_directory"],
                                    "".join([run_name, ".dot"])),
                simple_form=False)
        if run:
            try:
                logger.info("Running with plugin %s" % runargs["plugin"])
                logger.info("Using plugin args %s" % runargs["plugin_args"])
                workflow.run(plugin=runargs["plugin"],
                             plugin_args=runargs["plugin_args"])
                rt['status'] = 'finished'
                logger.info("Workflow run finished for bundle %s."
                            % str(bundle_idx))
            except Exception as e:  # TODO We should be more specific here ...
                errmsg = e
                rt.update({'status': 'failed'})
                logger.info("Workflow run failed for bundle %s."
                            % str(bundle_idx))
                # ... however this is run inside a pool.map: do not raise
                # Exception
        else:
            return workflow

    else:
        rt['status'] = 'cached'
        logger.info("\nEverything is already done for bundle %s."
                    % str(bundle_idx))

    # Remove working directory when done
    if not keep_outputs:
        try:
            work_dir = op.join(workflow.base_dir, scan_id)

            if op.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            logger.warn("Couldn\'t remove the working directory!")
            pass

    if rt["status"] == "failed":
        logger.error(errmsg)
    else:
        pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
        pipeline_end_time = time.time()
        logger.info("Elapsed time (minutes) since last start: %s"
                    % ((pipeline_end_time - pipeline_start_time) / 60))
        logger.info("Pipeline end time: %s" % pipeline_end_stamp)

    return rt
Example #39
0
def pipeline(args):
    if args['debug']:
        config.enable_debug_mode()
    config.update_config({'logging': {'log_directory':makeSupportDir(args['name'], "logs")}})
    logging.update_logging(config)

    # CONSTANTS
    sessionID = args['session']
    outputType = args['format'].upper()
    fOutputType = args['freesurfer']
    preprocessOn = args['preprocess']
    maskGM = args['maskgm']
    maskWholeBrain = args['maskwb']
    maskWhiteMatterFromSeeds = args['maskseeds']
    # print args['name']
    t1_experiment = "20141001_PREDICTHD_long_Results"  #"20130729_PREDICT_Results"
    atlasFile = os.path.abspath(os.path.join(os.path.dirname(__file__), "ReferenceAtlas", "template_t1.nii.gz"))
    wholeBrainFile = os.path.abspath(os.path.join(os.path.dirname(__file__), "ReferenceAtlas", "template_brain.nii.gz"))
    atlasLabel = os.path.abspath(os.path.join(os.path.dirname(__file__), "ReferenceAtlas", "template_nac_labels.nii.gz"))
    resampleResolution = (2.0, 2.0, 2.0)
    downsampledfilename = 'downsampled_atlas.nii.gz'

    master = pipe.Workflow(name=args['name'] + "_CACHE")
    master.base_dir = os.path.abspath("/Shared/sinapse/CACHE")

    sessions = pipe.Node(interface=IdentityInterface(fields=['session_id']), name='sessionIDs')
    sessions.iterables = ('session_id', sessionID)
    downsampleAtlas = pipe.Node(interface=Function(function=resampleImage,
                                                   input_names=['inputVolume', 'outputVolume', 'resolution'],
                                                   output_names=['outputVolume']),
                                name="downsampleAtlas")
    downsampleAtlas.inputs.inputVolume = atlasFile
    downsampleAtlas.inputs.outputVolume = downsampledfilename
    downsampleAtlas.inputs.resolution = [int(x) for x in resampleResolution]

    # HACK: Remove node from pipeline until Nipype/AFNI file copy issue is resolved
    # fmri_DataSink = pipe.Node(interface=DataSink(), name="fmri_DataSink")
    # fmri_DataSink.overwrite = REWRITE_DATASINKS
    # Output to: /Shared/paulsen/Experiments/YYYYMMDD_<experiment>_Results/fmri
    # fmri_DataSink.inputs.base_directory = os.path.join(master.base_dir, RESULTS_DIR, 'fmri')
    # fmri_DataSink.inputs.substitutions = [('to_3D_out+orig', 'to3D')]
    # fmri_DataSink.inputs.parameterization = False
    #
    # master.connect([(sessions, fmri_DataSink, [('session_id', 'container')])])
    # END HACK

    registration = registrationWorkflow.workflow(t1_experiment, outputType, name="registration_wkfl")
    master.connect([(sessions, registration, [('session_id', "inputs.session_id")])])

    detrend = afninodes.detrendnode(outputType, 'afni3Ddetrend')
    # define grabber
    site = "*"
    subject = "*"
    if preprocessOn:
        grabber = dataio.iowaGrabber(t1_experiment, site, subject, maskGM, maskWholeBrain)
        master.connect([(sessions, grabber, [('session_id', 'session_id')]),
                         (grabber, registration,     [('t1_File', 'inputs.t1')])])
        # Why isn't preprocessWorkflow.workflow() used instead? It would avoid most of the nuisance connections here...
        preprocessing = preprocessWorkflow.prepWorkflow(skipCount=6, outputType=outputType)
        name = args.pop('name')  # HACK: prevent name conflict with nuisance workflow
        nuisance = nuisanceWorkflow.workflow(outputType=outputType, **args)
        args['name'] = name  # END HACK
        master.connect([(grabber, preprocessing,      [('fmri_dicom_dir', 'to_3D.infolder'),
                                                       ('fmri_dicom_dir', 'formatFMRINode.dicomDirectory')]),
                        (grabber, nuisance,           [('whmFile', 'wm.warpWMtoFMRI.input_image')]),
                        (preprocessing, registration, [('merge.out_file', 'inputs.fmri'),  # 7
                                                       ('automask.out_file', 'tstat.mask_file')]),  # *optional*
                        (registration, nuisance,      [('outputs.fmri_reference', 'csf.warpCSFtoFMRI.reference_image'),  # CSF
                                                       ('outputs.nac2fmri_list', 'csf.warpCSFtoFMRI.transforms'),
                                                       ('outputs.fmri_reference', 'wm.warpWMtoFMRI.reference_image'),    # WM
                                                       ('outputs.t12fmri_list', 'wm.warpWMtoFMRI.transforms')]),
                        ])
        warpCSFtoFMRInode = nuisance.get_node('csf').get_node('warpCSFtoFMRI')
        warpCSFtoFMRInode.inputs.input_image = atlasFile
        if maskGM:
            master.connect([(grabber, nuisance,       [('gryFile', 'gm.warpGMtoFMRI.input_image')]),
                            (registration, nuisance,  [('outputs.fmri_reference', 'gm.warpGMtoFMRI.reference_image'),
                                                       ('outputs.t12fmri_list', 'gm.warpGMtoFMRI.transforms')]),
                            (preprocessing, nuisance, [('calc.out_file', 'gm.afni3DmaskAve_grm.in_file'),
                                                       ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_4')])])
        elif maskWholeBrain:
            master.connect([(registration, nuisance,  [('outputs.fmri_reference', 'wb.warpBraintoFMRI.reference_image'),
                                                       ('outputs.nac2fmri_list', 'wb.warpBraintoFMRI.transforms')]),
                            (preprocessing, nuisance, [('calc.out_file', 'wb.afni3DmaskAve_whole.in_file'),
                                                       ('volreg.oned_file', 'afni3Ddeconvolve.stim_file_4')])])
            warpBraintoFMRInode = nuisance.get_node('wb').get_node('warpBraintoFMRI')
            warpBraintoFMRInode.inputs.input_image= wholeBrainFile
        else:
            master.connect([(preprocessing, nuisance, [('volreg.oned_file', 'afni3Ddeconvolve.stim_file_3')])])

        master.connect([(preprocessing, nuisance, [('calc.out_file', 'wm.afni3DmaskAve_wm.in_file'),
                                                   ('calc.out_file', 'csf.afni3DmaskAve_csf.in_file'),
                                                   ('calc.out_file', 'afni3Ddeconvolve.in_file')]),
                        (nuisance, detrend,       [('afni3Ddeconvolve.out_errts', 'in_file')])])  # 13
    else:
        cleveland_grabber = dataio.clevelandGrabber()
        grabber = dataio.autoworkupGrabber(t1_experiment, site, subject)
        converter = pipe.Node(interface=Copy(), name='converter')  # Convert ANALYZE to AFNI

        master.connect([(sessions, grabber,            [('session_id', 'session_id')]),
                         (grabber, registration,        [('t1_File', 'inputs.t1')]),
                         (sessions, cleveland_grabber,  [('session_id', 'session_id')]),
                         (cleveland_grabber, converter, [('fmriHdr', 'in_file')]),
                         (converter, registration,      [('out_file', 'inputs.fmri')]),
                         (converter, detrend,           [('out_file', 'in_file')]),  # in fMRI_space
                        ])

    t1_wf = registrationWorkflow.t1Workflow()
    babc_wf = registrationWorkflow.babcWorkflow()
    # HACK: No EPI
    # epi_wf = registrationWorkflow.epiWorkflow()
    lb_wf = registrationWorkflow.labelWorkflow()
    seed_wf = registrationWorkflow.seedWorkflow()
    bandpass = afninodes.fouriernode(outputType, 'fourier') # Fourier is the last NIFTI file format in the AFNI pipeline

    master.connect([(detrend, bandpass,       [('out_file', 'in_file')]), # Per Dawei, bandpass after running 3dDetrend
                     (grabber, t1_wf,         [('t1_File', 'warpT1toFMRI.input_image')]),
                     (registration, t1_wf,    [('outputs.fmri_reference', 'warpT1toFMRI.reference_image'),  # T1
                                               ('outputs.t12fmri_list', 'warpT1toFMRI.transforms')]),
                     (grabber, babc_wf,       [('csfFile', 'warpBABCtoFMRI.input_image')]),
                     (registration, babc_wf,  [('outputs.fmri_reference', 'warpBABCtoFMRI.reference_image'),  # Labels
                                               ('outputs.t12fmri_list', 'warpBABCtoFMRI.transforms')]),
                     # HACK: No EPI
                     # (downsampleAtlas, epi_wf, [('outputVolume', 'warpEPItoNAC.reference_image')]),
                     # (registration, epi_wf,    [('outputs.fmri2nac_list', 'warpEPItoNAC.transforms')]),
                     # (bandpass, epi_wf,         [('out_file', 'warpEPItoNAC.input_image')]),
                     # END HACK
                     (downsampleAtlas, lb_wf, [('outputVolume', 'warpLabeltoNAC.reference_image')]),
                     (registration, lb_wf,    [('outputs.fmri2nac_list', 'warpLabeltoNAC.transforms')]),
                     (t1_wf, seed_wf,         [('warpT1toFMRI.output_image', 'warpSeedtoFMRI.reference_image')]),
                     (registration, seed_wf,  [('outputs.nac2fmri_list', 'warpSeedtoFMRI.transforms')]),
                     ])

    renameMasks = pipe.Node(interface=Rename(format_string='%(label)s_mask'), name='renameMasksAtlas')
    renameMasks.inputs.keep_ext = True
    atlas_DataSink = dataio.atlasSink(base_directory=master.base_dir, **args)
    master.connect([(renameMasks, atlas_DataSink,     [('out_file', 'Atlas')]),
                    (downsampleAtlas, atlas_DataSink, [('outputVolume', 'Atlas.@resampled')]),
                    ])

    renameMasks2 = pipe.Node(interface=Rename(format_string='%(session)s_%(label)s_mask'), name='renameMasksFMRI')
    renameMasks2.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameMasks2, 'session')

    clipSeedWithVentriclesNode = pipe.Node(interface=Function(function=clipSeedWithVentricles,
                                           input_names=['seed', 'label', 'outfile'],
                                           output_names=['clipped_seed_fn']),
                                           name='clipSeedWithVentriclesNode')
    clipSeedWithVentriclesNode.inputs.outfile = "clipped_seed.nii.gz"

    master.connect(seed_wf, 'warpSeedtoFMRI.output_image', clipSeedWithVentriclesNode, 'seed')
    master.connect(babc_wf, 'warpBABCtoFMRI.output_image', clipSeedWithVentriclesNode, 'label')
    if not maskWhiteMatterFromSeeds:
        master.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn', renameMasks2, 'in_file')
    else:
        clipSeedWithWhiteMatterNode = pipe.Node(interface=Function(function=clipSeedWithWhiteMatter,
                                                                   input_names=['seed', 'mask', 'outfile'],
                                                                   output_names=['outfile']),
                                                name='clipSeedWithWhiteMatterNode')
        clipSeedWithWhiteMatterNode.inputs.outfile = 'clipped_wm_seed.nii.gz'
        master.connect(babc_wf, 'warpBABCtoFMRI.output_image', clipSeedWithWhiteMatterNode, 'mask')
        master.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn', clipSeedWithWhiteMatterNode, 'seed')
        master.connect(clipSeedWithWhiteMatterNode, 'outfile', renameMasks2, 'in_file')
    # Labels are iterated over, so we need a seperate datasink to avoid overwriting any preprocessing
    # results when the labels are iterated (e.g. To3d output)
    # Write out to: /Shared/sinapse/CACHE/YYYYMMDD_<experiment>_Results/<SESSION>
    fmri_label_DataSink = dataio.fmriSink(master.base_dir, **args)
    master.connect(sessions, 'session_id', fmri_label_DataSink, 'container')
    master.connect(renameMasks2, 'out_file', fmri_label_DataSink, 'masks')
    master.connect(bandpass,'out_file', fmri_label_DataSink, 'masks.@bandpass')

    roiMedian = afninodes.maskavenode('AFNI_1D', 'afni_roiMedian', '-mrange 1 1')
    master.connect(renameMasks2, 'out_file', roiMedian, 'mask')
    master.connect(bandpass, 'out_file', roiMedian, 'in_file')

    correlate = afninodes.fimnode('Correlation', 'afni_correlate')
    master.connect(roiMedian, 'out_file', correlate, 'ideal_file')
    master.connect(bandpass, 'out_file', correlate, 'in_file')

    regionLogCalc = afninodes.logcalcnode(outputType, 'afni_regionLogCalc')
    master.connect(correlate, 'out_file', regionLogCalc, 'in_file_a')

    renameZscore = pipe.Node(interface=Rename(format_string="%(session)s_%(label)s_zscore"), name='renameZscore')
    renameZscore.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameZscore, 'session')
    master.connect(regionLogCalc, 'out_file', renameZscore, 'in_file')
    master.connect(renameZscore, 'out_file', fmri_label_DataSink, 'zscores')
    master.connect(t1_wf, 'warpT1toFMRI.output_image', fmri_label_DataSink, 'zscores.@t1Underlay')

    # Move z values back into NAC atlas space
    # master.connect(downsampleAtlas, 'outputVolume', lb_wf, 'warpLabeltoNAC.reference_image')
    master.connect(regionLogCalc, 'out_file', lb_wf, 'warpLabeltoNAC.input_image')

    renameZscore2 = pipe.Node(interface=Rename(format_string="%(session)s_%(label)s_result"), name='renameZscore2')
    renameZscore2.inputs.keep_ext = True
    master.connect(sessions, 'session_id', renameZscore2, 'session')
    master.connect(lb_wf, 'warpLabeltoNAC.output_image', renameZscore2, 'in_file')
    master.connect(renameZscore2, 'out_file', atlas_DataSink, 'Atlas.@zscore')
    # Connect seed subworkflow
    seedSubflow = seedWorkflow.workflow(args['seeds'], outputType='NIFTI_GZ', name='seed_wkfl')
    master.connect([(downsampleAtlas, seedSubflow,    [('outputVolume', 'afni3Dcalc_seeds.in_file_a')]),
                     (seedSubflow, renameMasks,        [('afni3Dcalc_seeds.out_file', 'in_file'),
                                                        ('selectLabel.out', 'label')]),
                     (seedSubflow, renameMasks2,       [('selectLabel.out', 'label')]),
                     (seedSubflow, renameZscore,       [('selectLabel.out', 'label')]),
                     (seedSubflow, renameZscore2,      [('selectLabel.out', 'label')]),
                     (seedSubflow, seed_wf,               [('afni3Dcalc_seeds.out_file', 'warpSeedtoFMRI.input_image')])
                    ])
    imageDir = makeSupportDir(args['name'], "images")
    if args['plot']:
        registration.write_graph(dotfilename=os.path.join(imageDir, 'register.dot'), graph2use='orig', format='png',
                                 simple_form=False)
        if preprocessOn:
            preprocessing.write_graph(dotfilename=os.path.join(imageDir, 'preprocess.dot'), graph2use='orig', format='png',
                                      simple_form=False)
            nuisance.write_graph(dotfilename=os.path.join(imageDir, 'nuisance.dot'), graph2use='orig', format='png',
                                 simple_form=False)
        seedSubflow.write_graph(dotfilename=os.path.join(imageDir, 'seed.dot'), graph2use='orig', format='png',
                                 simple_form=False)
        master.write_graph(dotfilename=os.path.join(imageDir, 'master.dot'), graph2use="orig", format='png', simple_form=False)
    elif args['debug']:
        try:
            master.run(updatehash=True)
            # Run restingState on the all threads
            # Setup environment for CPU load balancing of ITK based programs.
            # --------
            # import multiprocessing
            # total_CPUS = 10  # multiprocessing.cpu_count()
            # master.run(plugin='MultiProc', plugin_args={'n_proc': total_CPUS})  #, updatehash=True)
            # --------
            # Run restingState on the local cluster
            # master.run(plugin='SGE', plugin_args={'template': os.path.join(os.getcwd(), 'ENV/bin/activate'),
            #                                        'qsub_args': '-S /bin/bash -cwd'})  #, updatehash=True)
        except:
            pass
        master.name = "master"  # HACK: Bug in Graphviz for nodes beginning with numbers
        master.write_graph(dotfilename=os.path.join(imageDir, 'debug_hier.dot'), graph2use="colored", format='png')
        master.write_graph(dotfilename=os.path.join(imageDir, 'debug_orig.dot'), graph2use="flat", format='png')
    else:
        import multiprocessing
        total_CPUS = multiprocessing.cpu_count()
        master.run(plugin='MultiProc', plugin_args={'n_proc': total_CPUS})  #, updatehash=True)
    return 0
Example #40
0
    def process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
       
        # Initialization
        if os.path.exists(os.path.join(self.base_directory,"LOG","pypeline.log")):
            os.unlink(os.path.join(self.base_directory,"LOG","pypeline.log"))
        config.update_config({'logging': {'log_directory': os.path.join(self.base_directory,"LOG"),
                                  'log_to_file': True},
                              'execution': {}
                              })
        logging.update_logging(config)
        flow = pe.Workflow(name='diffusion_pipeline', base_dir=os.path.join(self.base_directory,'NIPYPE'))
        iflogger = logging.getLogger('interface')
       
        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(outfields = ['diffusion','T1','T2']), name='datasource')
        datasource.inputs.base_directory = os.path.join(self.base_directory,'NIFTI')
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(diffusion=self.global_conf.imaging_model+'.nii.gz',T1='T1.nii.gz',T2='T2.nii.gz')
       
        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory, "RESULTS")
        
        # Clear previous outputs
        self.clear_stages_outputs()

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            flow.connect([
                (datasource,preproc_flow,[("diffusion","inputnode.diffusion")]),
                ])
       
        if self.stages['Segmentation'].enabled:
            if self.stages['Segmentation'].config.seg_tool == "Freesurfer":
                if self.stages['Segmentation'].config.use_existing_freesurfer_data == False:
                    self.stages['Segmentation'].config.freesurfer_subjects_dir = os.path.join(self.base_directory)
                    self.stages['Segmentation'].config.freesurfer_subject_id = os.path.join(self.base_directory,'FREESURFER')
                    if (not os.path.exists(os.path.join(self.base_directory,'NIPYPE/diffusion_pipeline/segmentation_stage/reconall/result_reconall.pklz'))) and os.path.exists(os.path.join(self.base_directory,'FREESURFER')):
                        shutil.rmtree(os.path.join(self.base_directory,'FREESURFER'))
            seg_flow = self.create_stage_flow("Segmentation")
            if self.stages['Segmentation'].config.seg_tool == "Freesurfer":
                flow.connect([(datasource,seg_flow, [('T1','inputnode.T1')])])
       
        if self.stages['Parcellation'].enabled:
            parc_flow = self.create_stage_flow("Parcellation")
            if self.stages['Segmentation'].config.seg_tool == "Freesurfer":
                flow.connect([(seg_flow,parc_flow, [('outputnode.subjects_dir','inputnode.subjects_dir'),
                                                    ('outputnode.subject_id','inputnode.subject_id')]),
                            ])
            else:
                flow.connect([
                            (seg_flow,parc_flow,[("outputnode.custom_wm_mask","inputnode.custom_wm_mask")])
                            ])
                                               
        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            flow.connect([
              (datasource,reg_flow,[('T1','inputnode.T1'),('T2','inputnode.T2')]),
                          (preproc_flow,reg_flow, [('outputnode.diffusion_preproc','inputnode.diffusion')]),
                          (parc_flow,reg_flow, [('outputnode.wm_mask_file','inputnode.wm_mask'),('outputnode.roi_volumes','inputnode.roi_volumes')]),
                          ])
            if self.stages['Registration'].config.registration_mode == "BBregister (FS)":
                flow.connect([
                          (seg_flow,reg_flow, [('outputnode.subjects_dir','inputnode.subjects_dir'),
                                                ('outputnode.subject_id','inputnode.subject_id')]),
                          ])
       
        if self.stages['Diffusion'].enabled:
            diff_flow = self.create_stage_flow("Diffusion")
            flow.connect([
                        (preproc_flow,diff_flow, [('outputnode.diffusion_preproc','inputnode.diffusion')]),
                        (reg_flow,diff_flow, [('outputnode.wm_mask_registered','inputnode.wm_mask_registered')]),
            (reg_flow,diff_flow,[('outputnode.roi_volumes_registered','inputnode.roi_volumes')])
                        ])
                       
        if self.stages['Connectome'].enabled:
            if self.stages['Diffusion'].config.processing_tool == 'FSL':
                self.stages['Connectome'].config.probtrackx = True
            con_flow = self.create_stage_flow("Connectome")
            flow.connect([
		                (parc_flow,con_flow, [('outputnode.parcellation_scheme','inputnode.parcellation_scheme')]),
		                (diff_flow,con_flow, [('outputnode.track_file','inputnode.track_file'),('outputnode.gFA','inputnode.gFA'),
                                              ('outputnode.roi_volumes','inputnode.roi_volumes_registered'),
		                                      ('outputnode.skewness','inputnode.skewness'),('outputnode.kurtosis','inputnode.kurtosis'),
		                                      ('outputnode.P0','inputnode.P0')]),
		                (con_flow,sinker, [('outputnode.connectivity_matrices',now+'.connectivity_matrices')])
		                ])
            
            if self.stages['Parcellation'].config.parcellation_scheme == "Custom":
                flow.connect([(parc_flow,con_flow, [('outputnode.atlas_info','inputnode.atlas_info')])])
                
       
        iflogger.info("**** Processing ****")
       
        if(self.number_of_cores != 1):
            flow.run(plugin='MultiProc', plugin_args={'n_procs' : self.number_of_cores})
        else:
            flow.run()
       
        self.fill_stages_outputs()
        
        # Clean undesired folders/files
        rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        for file_to_rm in rm_file_list:
            if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
                os.remove(os.path.join(self.base_directory,file_to_rm))
       
        # copy .ini and log file
        outdir = os.path.join(self.base_directory,"RESULTS",now)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file,outdir)
        shutil.copy(os.path.join(self.base_directory,'LOG','pypeline.log'),outdir)
       
        iflogger.info("**** Processing finished ****")
       
        return True,'Processing sucessful'
Example #41
0
def _create_single_session(dataDict, master_config, interpMode, pipeline_name):
    """
    Create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.

    :param dataDict:
    :param master_config:
    :param interpMode:
    :param pipeline_name:
    :return:
    """
    assert (
        "tissue_classify" in master_config["components"]
        or "auxlmk" in master_config["components"]
        or "denoise" in master_config["components"]
        or "landmark" in master_config["components"]
        or "segmentation" in master_config["components"]
        or "jointfusion_2015_wholebrain" in master_config["components"]
    )

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from BAW.workflows.baseline import generate_single_session_template_wf

    project = dataDict["project"]
    subject = dataDict["subject"]
    session = dataDict["session"]

    blackListFileName = dataDict["T1s"][0] + "_noDenoise"
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config["workflow_phase"], subject, session)
    onlyT1 = not (len(dataDict["T2s"]) > 0)
    hasPDs = len(dataDict["PDs"]) > 0
    hasFLs = len(dataDict["FLs"]) > 0
    if onlyT1:
        print("T1 Only processing starts ...")
    else:
        print("Multimodal processing starts ...")

    doDenoise = False
    if "denoise" in master_config["components"]:
        if isBlackList:
            print(
                """
                  Denoise is ignored when the session is in Blacklist
                  There is known issue that Landmark Detection algorithm
                  may not work well with denoising step
                  """
            )
            doDenoise = False
        else:
            doDenoise = True
    useEMSP = False
    if len(dataDict["EMSP"]) > 0:
        useEMSP = True
    sessionWorkflow = generate_single_session_template_wf(
        project,
        subject,
        session,
        onlyT1,
        hasPDs,
        hasFLs,
        master_config,
        phase=master_config["workflow_phase"],
        interpMode=interpMode,
        pipeline_name=pipeline_name,
        doDenoise=doDenoise,
        badT2=dataDict["BadT2"],
        useEMSP=useEMSP,
    )
    sessionWorkflow.base_dir = master_config["cachedir"]

    sessionWorkflow_inputsspec = sessionWorkflow.get_node("inputspec")
    sessionWorkflow_inputsspec.inputs.T1s = dataDict["T1s"]
    sessionWorkflow_inputsspec.inputs.T2s = dataDict["T2s"]
    sessionWorkflow_inputsspec.inputs.PDs = dataDict["PDs"]
    sessionWorkflow_inputsspec.inputs.FLs = dataDict["FLs"]
    if useEMSP:
        sessionWorkflow_inputsspec.inputs.EMSP = dataDict["EMSP"][0]
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict["OTHERs"]
    return sessionWorkflow
Example #42
0
def _create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
           'auxlmk' in master_config['components'] or \
           'denoise' in master_config['components'] or \
           'landmark' in master_config['components'] or \
           'segmentation' in master_config['components'] or \
           'jointfusion_2015_wholebrain' in master_config['components']

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from workflows.baseline import generate_single_session_template_WF

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    blackListFileName = dataDict['T1s'][0] + '_noDenoise'
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config['workflow_phase'], subject,
                                 session)
    onlyT1 = not (len(dataDict['T2s']) > 0)
    if onlyT1:
        print("T1 Only processing starts ...")
    else:
        print("Multimodal processing starts ...")

    doDenoise = False
    if ('denoise' in master_config['components']):
        if isBlackList:
            print("""
                  Denoise is ignored when the session is in Blacklist
                  There is known issue that Landmark Detection algorithm
                  may not work well with denoising step
                  """)
            doDenoise = False
        else:
            doDenoise = True
    useEMSP = False
    if len(dataDict['EMSP']) > 0:
        useEMSP = True
    sessionWorkflow = generate_single_session_template_WF(
        project,
        subject,
        session,
        onlyT1,
        master_config,
        phase=master_config['workflow_phase'],
        interpMode=interpMode,
        pipeline_name=pipeline_name,
        doDenoise=doDenoise,
        badT2=dataDict['BadT2'],
        useEMSP=useEMSP)
    sessionWorkflow.base_dir = master_config['cachedir']

    sessionWorkflow_inputsspec = sessionWorkflow.get_node('inputspec')
    sessionWorkflow_inputsspec.inputs.T1s = dataDict['T1s']
    sessionWorkflow_inputsspec.inputs.T2s = dataDict['T2s']
    sessionWorkflow_inputsspec.inputs.PDs = dataDict['PDs']
    sessionWorkflow_inputsspec.inputs.FLs = dataDict['FLs']
    if useEMSP:
        sessionWorkflow_inputsspec.inputs.EMSP = dataDict['EMSP'][0]
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict['OTHERs']
    return sessionWorkflow
Example #43
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    database, start_time, subject, master_config = args
    assert 'baseline' in master_config['components'] or 'longitudinal' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    # HACK:
    #    To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), re-instantiate database
    # database.__init__(defaultDBName=database.dbName, subject_list=database.subjectList)
    #
    # END HACK
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import create_baseline as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudial as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(session, current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
                                                                            ('ExtendedAtlasDefinition_xml',
                                                                             'inputspec.atlasDefinition')]),
                                 ])
        else:
            assert current_phase == 'longitudinal', "Phase value is unknown: {0}".format(current_phase)

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(template, plugin=master_config['execution']['plugin'], dotfilename='template')
    return run_workflow(template, plugin=master_config['execution']['plugin'], plugin_args=master_config['plugin_args'])
Example #44
0
    def create_workflow(self):
        """Create the Niype workflow of the super-resolution pipeline.

        It is composed of a succession of Nodes and their corresponding parameters,
        where the output of node i goes to the input of node i+1.

        """
        sub_ses = self.subject
        if self.session is not None:
            sub_ses = ''.join([sub_ses, '_', self.session])

        if self.session is None:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk",
                                                   __version__]), self.subject)
        else:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, self.session, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk", __version__]),
                                         self.subject, self.session)

        if not os.path.exists(wf_base_dir):
            os.makedirs(wf_base_dir)
        print("Process directory: {}".format(wf_base_dir))

        # Initialization (Not sure we can control the name of nipype log)
        if os.path.isfile(os.path.join(wf_base_dir, "pypeline.log")):
            os.unlink(os.path.join(wf_base_dir, "pypeline.log"))

        self.wf = Workflow(name=self.pipeline_name, base_dir=wf_base_dir)

        config.update_config({
            'logging': {
                'log_directory': os.path.join(wf_base_dir),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'crashfile_format': "txt",
                'use_relative_paths': True,
                'write_provenance': False
            }
        })

        # Update nypipe logging with config
        nipype_logging.update_logging(config)
        # config.enable_provenance()

        if self.use_manual_masks:
            dg = Node(interface=DataGrabber(outfields=['T2ws', 'masks']),
                      name='data_grabber')
            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            if self.session is not None:
                t2ws_template = os.path.join(
                    self.subject, self.session, 'anat',
                    '_'.join([sub_ses, '*run-*', '*T2w.nii.gz']))
                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat',
                        '_'.join([sub_ses, '*run-*', '*mask.nii.gz']))
            else:
                t2ws_template = os.path.join(self.subject, 'anat',
                                             sub_ses + '*_run-*_T2w.nii.gz')

                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, 'anat', sub_ses + '*_run-*_*mask.nii.gz')

            dg.inputs.field_template = dict(T2ws=t2ws_template,
                                            masks=masks_template)

            brainMask = MapNode(
                interface=IdentityInterface(fields=['out_file']),
                name='brain_masks_bypass',
                iterfield=['out_file'])

            if self.m_stacks is not None:
                custom_masks_filter = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='custom_masks_filter')
                custom_masks_filter.inputs.stacks_id = self.m_stacks

        else:
            dg = Node(interface=DataGrabber(outfields=['T2ws']),
                      name='data_grabber')

            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            dg.inputs.field_template = dict(
                T2ws=os.path.join(self.subject, 'anat', sub_ses +
                                  '*_run-*_T2w.nii.gz'))
            if self.session is not None:
                dg.inputs.field_template = dict(T2ws=os.path.join(
                    self.subject, self.session, 'anat', '_'.join(
                        [sub_ses, '*run-*', '*T2w.nii.gz'])))

            if self.m_stacks is not None:
                t2ws_filter_prior_masks = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='t2ws_filter_prior_masks')
                t2ws_filter_prior_masks.inputs.stacks_id = self.m_stacks

            brainMask = MapNode(interface=preprocess.BrainExtraction(),
                                name='brainExtraction',
                                iterfield=['in_file'])

            brainMask.inputs.bids_dir = self.bids_dir
            brainMask.inputs.in_ckpt_loc = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_localization",
                             "Unet.ckpt-88000.index")).split('.index')[0]
            brainMask.inputs.threshold_loc = 0.49
            brainMask.inputs.in_ckpt_seg = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_segmentation",
                             "Unet.ckpt-20000.index")).split('.index')[0]
            brainMask.inputs.threshold_seg = 0.5

        t2ws_filtered = Node(interface=preprocess.FilteringByRunid(),
                             name='t2ws_filtered')
        masks_filtered = Node(interface=preprocess.FilteringByRunid(),
                              name='masks_filtered')

        if not self.m_skip_stacks_ordering:
            stacksOrdering = Node(interface=preprocess.StacksOrdering(),
                                  name='stackOrdering')
        else:
            stacksOrdering = Node(
                interface=IdentityInterface(fields=['stacks_order']),
                name='stackOrdering')
            stacksOrdering.inputs.stacks_order = self.m_stacks

        if not self.m_skip_nlm_denoising:
            nlmDenoise = MapNode(interface=preprocess.BtkNLMDenoising(),
                                 name='nlmDenoise',
                                 iterfield=['in_file', 'in_mask'])
            nlmDenoise.inputs.bids_dir = self.bids_dir

            # Sans le mask le premier correct slice intensity...
            srtkCorrectSliceIntensity01_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity01_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity01_nlm.inputs.bids_dir = self.bids_dir
            srtkCorrectSliceIntensity01_nlm.inputs.out_postfix = '_uni'

        srtkCorrectSliceIntensity01 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity01',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity01.inputs.bids_dir = self.bids_dir
        srtkCorrectSliceIntensity01.inputs.out_postfix = '_uni'

        srtkSliceBySliceN4BiasFieldCorrection = MapNode(
            interface=preprocess.MialsrtkSliceBySliceN4BiasFieldCorrection(),
            name='srtkSliceBySliceN4BiasFieldCorrection',
            iterfield=['in_file', 'in_mask'])
        srtkSliceBySliceN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        srtkSliceBySliceCorrectBiasField = MapNode(
            interface=preprocess.MialsrtkSliceBySliceCorrectBiasField(),
            name='srtkSliceBySliceCorrectBiasField',
            iterfield=['in_file', 'in_mask', 'in_field'])
        srtkSliceBySliceCorrectBiasField.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        if not self.m_skip_nlm_denoising:
            srtkCorrectSliceIntensity02_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity02_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity02_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization01_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization01_nlm')
            srtkIntensityStandardization01_nlm.inputs.bids_dir = self.bids_dir

            srtkHistogramNormalization_nlm = Node(
                interface=preprocess.MialsrtkHistogramNormalization(),
                name='srtkHistogramNormalization_nlm')
            srtkHistogramNormalization_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization02_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization02_nlm')
            srtkIntensityStandardization02_nlm.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        srtkCorrectSliceIntensity02 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity02',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity02.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization01 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization01')
        srtkIntensityStandardization01.inputs.bids_dir = self.bids_dir

        srtkHistogramNormalization = Node(
            interface=preprocess.MialsrtkHistogramNormalization(),
            name='srtkHistogramNormalization')
        srtkHistogramNormalization.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization02 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization02')
        srtkIntensityStandardization02.inputs.bids_dir = self.bids_dir

        srtkMaskImage01 = MapNode(interface=preprocess.MialsrtkMaskImage(),
                                  name='srtkMaskImage01',
                                  iterfield=['in_file', 'in_mask'])
        srtkMaskImage01.inputs.bids_dir = self.bids_dir

        srtkImageReconstruction = Node(
            interface=reconstruction.MialsrtkImageReconstruction(),
            name='srtkImageReconstruction')
        srtkImageReconstruction.inputs.bids_dir = self.bids_dir
        srtkImageReconstruction.inputs.sub_ses = sub_ses
        srtkImageReconstruction.inputs.no_reg = self.m_skip_svr

        srtkTVSuperResolution = Node(
            interface=reconstruction.MialsrtkTVSuperResolution(),
            name='srtkTVSuperResolution')
        srtkTVSuperResolution.inputs.bids_dir = self.bids_dir
        srtkTVSuperResolution.inputs.sub_ses = sub_ses
        srtkTVSuperResolution.inputs.in_loop = self.primal_dual_loops
        srtkTVSuperResolution.inputs.in_deltat = self.deltatTV
        srtkTVSuperResolution.inputs.in_lambda = self.lambdaTV
        srtkTVSuperResolution.inputs.use_manual_masks = self.use_manual_masks

        srtkN4BiasFieldCorrection = Node(
            interface=postprocess.MialsrtkN4BiasFieldCorrection(),
            name='srtkN4BiasFieldCorrection')
        srtkN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        if self.m_do_refine_hr_mask:
            srtkHRMask = Node(
                interface=postprocess.MialsrtkRefineHRMaskByIntersection(),
                name='srtkHRMask')
            srtkHRMask.inputs.bids_dir = self.bids_dir
        else:
            srtkHRMask = Node(interface=postprocess.BinarizeImage(),
                              name='srtkHRMask')

        srtkMaskImage02 = Node(interface=preprocess.MialsrtkMaskImage(),
                               name='srtkMaskImage02')
        srtkMaskImage02.inputs.bids_dir = self.bids_dir

        # Build workflow : connections of the nodes
        # Nodes ready : Linking now
        if self.use_manual_masks:
            if self.m_stacks is not None:
                self.wf.connect(dg, "masks", custom_masks_filter,
                                "input_files")
                self.wf.connect(custom_masks_filter, "output_files", brainMask,
                                "out_file")
            else:
                self.wf.connect(dg, "masks", brainMask, "out_file")
        else:
            if self.m_stacks is not None:
                self.wf.connect(dg, "T2ws", t2ws_filter_prior_masks,
                                "input_files")
                self.wf.connect(t2ws_filter_prior_masks, "output_files",
                                brainMask, "in_file")
            else:
                self.wf.connect(dg, "T2ws", brainMask, "in_file")

        if not self.m_skip_stacks_ordering:
            self.wf.connect(brainMask, "out_file", stacksOrdering,
                            "input_masks")

        self.wf.connect(stacksOrdering, "stacks_order", t2ws_filtered,
                        "stacks_id")
        self.wf.connect(dg, "T2ws", t2ws_filtered, "input_files")

        self.wf.connect(stacksOrdering, "stacks_order", masks_filtered,
                        "stacks_id")
        self.wf.connect(brainMask, "out_file", masks_filtered, "input_files")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(t2ws_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_mask")  ## Comment to match docker process

            self.wf.connect(nlmDenoise, ("out_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_mask")

        self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkCorrectSliceIntensity01_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        else:
            self.wf.connect(srtkCorrectSliceIntensity01,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceN4BiasFieldCorrection, "in_mask")

        self.wf.connect(srtkCorrectSliceIntensity01,
                        ("out_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_file")
        self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                        ("out_fld_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_field")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                            ("out_im_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_mask")
            self.wf.connect(srtkCorrectSliceIntensity02_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkIntensityStandardization01_nlm, "input_images")
            self.wf.connect(srtkIntensityStandardization01_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_masks")
            self.wf.connect(srtkHistogramNormalization_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkIntensityStandardization02_nlm, "input_images")

        self.wf.connect(srtkSliceBySliceCorrectBiasField,
                        ("out_im_file", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_mask")
        self.wf.connect(srtkCorrectSliceIntensity02,
                        ("out_file", utils.sort_ascending),
                        srtkIntensityStandardization01, "input_images")

        self.wf.connect(srtkIntensityStandardization01,
                        ("output_images", utils.sort_ascending),
                        srtkHistogramNormalization, "input_images")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkHistogramNormalization, "input_masks")
        self.wf.connect(srtkHistogramNormalization,
                        ("output_images", utils.sort_ascending),
                        srtkIntensityStandardization02, "input_images")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkIntensityStandardization02_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")
        else:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")

        self.wf.connect(srtkMaskImage01, "out_im_file",
                        srtkImageReconstruction, "input_images")
        self.wf.connect(masks_filtered, "output_files",
                        srtkImageReconstruction, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order",
                        srtkImageReconstruction, "stacks_order")

        self.wf.connect(srtkIntensityStandardization02, "output_images",
                        srtkTVSuperResolution, "input_images")
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending),
                        srtkTVSuperResolution, "input_transforms")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkTVSuperResolution, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order", srtkTVSuperResolution,
                        "stacks_order")

        self.wf.connect(srtkImageReconstruction, "output_sdi",
                        srtkTVSuperResolution, "input_sdi")

        if self.m_do_refine_hr_mask:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkHRMask, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), srtkHRMask,
                            "input_masks")
            self.wf.connect(srtkImageReconstruction,
                            ("output_transforms", utils.sort_ascending),
                            srtkHRMask, "input_transforms")
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_sr")
        else:
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_image")

        self.wf.connect(srtkTVSuperResolution, "output_sr", srtkMaskImage02,
                        "in_file")
        self.wf.connect(srtkHRMask, "output_srmask", srtkMaskImage02,
                        "in_mask")

        self.wf.connect(srtkTVSuperResolution, "output_sr",
                        srtkN4BiasFieldCorrection, "input_image")
        self.wf.connect(srtkHRMask, "output_srmask", srtkN4BiasFieldCorrection,
                        "input_mask")

        # Datasinker
        finalFilenamesGeneration = Node(
            interface=postprocess.FilenamesGeneration(), name='filenames_gen')
        finalFilenamesGeneration.inputs.sub_ses = sub_ses
        finalFilenamesGeneration.inputs.sr_id = self.sr_id
        finalFilenamesGeneration.inputs.use_manual_masks = self.use_manual_masks

        self.wf.connect(stacksOrdering, "stacks_order",
                        finalFilenamesGeneration, "stacks_order")

        datasink = Node(interface=DataSink(), name='data_sinker')
        datasink.inputs.base_directory = final_res_dir

        if not self.m_skip_stacks_ordering:
            self.wf.connect(stacksOrdering, "report_image", datasink,
                            'figures.@stackOrderingQC')
            self.wf.connect(stacksOrdering, "motion_tsv", datasink,
                            'anat.@motionTSV')
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        datasink, 'anat.@LRmasks')
        self.wf.connect(srtkIntensityStandardization02,
                        ("output_images", utils.sort_ascending), datasink,
                        'anat.@LRsPreproc')
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending), datasink,
                        'xfm.@transforms')
        self.wf.connect(finalFilenamesGeneration, "substitutions", datasink,
                        "substitutions")
        self.wf.connect(srtkMaskImage01, ("out_im_file", utils.sort_ascending),
                        datasink, 'anat.@LRsDenoised')
        self.wf.connect(srtkImageReconstruction, "output_sdi", datasink,
                        'anat.@SDI')
        self.wf.connect(srtkN4BiasFieldCorrection, "output_image", datasink,
                        'anat.@SR')
        self.wf.connect(srtkTVSuperResolution, "output_json_path", datasink,
                        'anat.@SRjson')
        self.wf.connect(srtkTVSuperResolution, "output_sr_png", datasink,
                        'figures.@SRpng')
        self.wf.connect(srtkHRMask, "output_srmask", datasink, 'anat.@SRmask')
Example #45
0
def _create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
           'auxlmk' in master_config['components'] or \
           'denoise' in master_config['components'] or \
           'landmark' in master_config['components'] or \
           'segmentation' in master_config['components'] or \
           'jointfusion_2015_wholebrain' in master_config['components']

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from workflows.baseline import generate_single_session_template_WF

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    blackListFileName = dataDict['T1s'][0] + '_noDenoise'
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config['workflow_phase'], subject, session)
    onlyT1 = not (len(dataDict['T2s']) > 0)
    if onlyT1:
        print("T1 Only processing starts ...")
    else:
        print("Multimodal processing starts ...")

    doDenoise = False
    if ('denoise' in master_config['components']):
        if isBlackList:
            print("""
                  Denoise is ignored when the session is in Blacklist
                  There is known issue that Landmark Detection algorithm
                  may not work well with denoising step
                  """)
            doDenoise = False
        else:
            doDenoise = True
    useEMSP=False
    if len( dataDict['EMSP']) >0:
        useEMSP =True
    sessionWorkflow = generate_single_session_template_WF(project, subject, session, onlyT1, master_config,
                                                          phase=master_config['workflow_phase'],
                                                          interpMode=interpMode,
                                                          pipeline_name=pipeline_name,
                                                          doDenoise=doDenoise,
                                                          badT2=dataDict['BadT2'],
                                                          useEMSP=useEMSP)
    sessionWorkflow.base_dir = master_config['cachedir']

    sessionWorkflow_inputsspec = sessionWorkflow.get_node('inputspec')
    sessionWorkflow_inputsspec.inputs.T1s = dataDict['T1s']
    sessionWorkflow_inputsspec.inputs.T2s = dataDict['T2s']
    sessionWorkflow_inputsspec.inputs.PDs = dataDict['PDs']
    sessionWorkflow_inputsspec.inputs.FLs = dataDict['FLs']
    if useEMSP:
        sessionWorkflow_inputsspec.inputs.EMSP = dataDict['EMSP'][0]
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict['OTHERs']
    return sessionWorkflow
Example #46
0
    def process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
       
        # Initialization
        if os.path.exists(os.path.join(self.base_directory,"LOG","pypeline.log")):
            os.unlink(os.path.join(self.base_directory,"LOG","pypeline.log"))
        config.update_config({'logging': {'log_directory': os.path.join(self.base_directory,"LOG"),
                                  'log_to_file': True},
                              'execution': {'remove_unnecessary_outputs': False}
                              })
        logging.update_logging(config)
        iflogger = logging.getLogger('interface')
       
        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(outfields = ['fMRI','T1','T2']), name='datasource')
        datasource.inputs.base_directory = os.path.join(self.base_directory,'NIFTI')
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(fMRI='fMRI.nii.gz',T1='T1.nii.gz',T2='T2.nii.gz')
        datasource.inputs.sort_filelist=False
       
        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="fMRI_sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory, "RESULTS")
        
        # Clear previous outputs
        self.clear_stages_outputs()
        
        # Create common_flow
        common_flow = self.create_common_flow()
        
        # Create fMRI flow
        
        fMRI_flow = pe.Workflow(name='fMRI_pipeline')
        fMRI_inputnode = pe.Node(interface=util.IdentityInterface(fields=["fMRI","T1","T2","subjects_dir","subject_id","wm_mask_file","roi_volumes","wm_eroded","brain_eroded","csf_eroded","parcellation_scheme","atlas_info"]),name="inputnode")
        fMRI_outputnode = pe.Node(interface=util.IdentityInterface(fields=["connectivity_matrices"]),name="outputnode")
        fMRI_flow.add_nodes([fMRI_inputnode,fMRI_outputnode])

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            fMRI_flow.connect([
                (fMRI_inputnode,preproc_flow,[("fMRI","inputnode.functional")]),
                ])
                                               
        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            fMRI_flow.connect([
                          (fMRI_inputnode,reg_flow,[('T1','inputnode.T1')]),(fMRI_inputnode,reg_flow,[('T2','inputnode.T2')]),
                          (preproc_flow,reg_flow, [('outputnode.mean_vol','inputnode.target')]),
                          (fMRI_inputnode,reg_flow, [('wm_mask_file','inputnode.wm_mask'),('roi_volumes','inputnode.roi_volumes'),
                                                ('wm_eroded','inputnode.eroded_wm')])
                          ])
            if self.stages['Functional'].config.global_nuisance:
                fMRI_flow.connect([
                              (fMRI_inputnode,reg_flow,[('brain_eroded','inputnode.eroded_brain')])
                            ])
            if self.stages['Functional'].config.csf:
                fMRI_flow.connect([
                              (fMRI_inputnode,reg_flow,[('csf_eroded','inputnode.eroded_csf')])
                            ])
            if self.stages['Registration'].config.registration_mode == "BBregister (FS)":
                fMRI_flow.connect([
                          (fMRI_inputnode,reg_flow, [('subjects_dir','inputnode.subjects_dir'),
                                                ('subject_id','inputnode.subject_id')]),
                          ])
       
        if self.stages['Functional'].enabled:
            func_flow = self.create_stage_flow("Functional")
            fMRI_flow.connect([
                        (preproc_flow,func_flow, [('outputnode.functional_preproc','inputnode.preproc_file')]),
                        (reg_flow,func_flow, [('outputnode.wm_mask_registered','inputnode.registered_wm'),('outputnode.roi_volumes_registered','inputnode.registered_roi_volumes'),
                                              ('outputnode.eroded_wm_registered','inputnode.eroded_wm'),('outputnode.eroded_csf_registered','inputnode.eroded_csf'),
                                              ('outputnode.eroded_brain_registered','inputnode.eroded_brain')])
                        ])
            if self.stages['Functional'].config.scrubbing or self.stages['Functional'].config.motion:
                fMRI_flow.connect([
                                   (preproc_flow,func_flow,[("outputnode.par_file","inputnode.motion_par_file")])
                                ])
                       
        if self.stages['Connectome'].enabled:
            con_flow = self.create_stage_flow("Connectome")
            fMRI_flow.connect([
		                (fMRI_inputnode,con_flow, [('parcellation_scheme','inputnode.parcellation_scheme')]),
		                (func_flow,con_flow, [('outputnode.func_file','inputnode.func_file'),("outputnode.FD","inputnode.FD"),
                                              ("outputnode.DVARS","inputnode.DVARS")]),
                        (reg_flow,con_flow,[("outputnode.roi_volumes_registered","inputnode.roi_volumes_registered")]),
                        (con_flow,fMRI_outputnode,[("outputnode.connectivity_matrices","connectivity_matrices")])
		                ])
            
            if self.stages['Parcellation'].config.parcellation_scheme == "Custom":
                fMRI_flow.connect([(fMRI_inputnode,con_flow, [('atlas_info','inputnode.atlas_info')])])
                
        # Create NIPYPE flow
        
        flow = pe.Workflow(name='NIPYPE', base_dir=os.path.join(self.base_directory))
        
        flow.connect([
                      (datasource,common_flow,[("T1","inputnode.T1")]),
                      (datasource,fMRI_flow,[("fMRI","inputnode.fMRI"),("T1","inputnode.T1"),("T2","inputnode.T2")]),
                      (common_flow,fMRI_flow,[("outputnode.subjects_dir","inputnode.subjects_dir"),
                                              ("outputnode.subject_id","inputnode.subject_id"),
                                              ("outputnode.wm_mask_file","inputnode.wm_mask_file"),
                                              ("outputnode.roi_volumes","inputnode.roi_volumes"),
                                              ("outputnode.wm_eroded","inputnode.wm_eroded"),
                                              ("outputnode.brain_eroded","inputnode.brain_eroded"),
                                              ("outputnode.csf_eroded","inputnode.csf_eroded"),
                                              ("outputnode.parcellation_scheme","inputnode.parcellation_scheme"),
                                              ("outputnode.atlas_info","inputnode.atlas_info")]),
                      (fMRI_flow,sinker,[("outputnode.connectivity_matrices","fMRI.%s.connectivity_matrices"%now)])
                    ])
        
        # Process pipeline
        
        iflogger.info("**** Processing ****")
       
        if(self.number_of_cores != 1):
            flow.run(plugin='MultiProc', plugin_args={'n_procs' : self.number_of_cores})
        else:
            flow.run()
       
        self.fill_stages_outputs()
        
        # Clean undesired folders/files
        rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        for file_to_rm in rm_file_list:
            if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
                os.remove(os.path.join(self.base_directory,file_to_rm))
       
        # copy .ini and log file
        outdir = os.path.join(self.base_directory,"RESULTS",'fMRI',now)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file,outdir)
        shutil.copy(os.path.join(self.base_directory,'LOG','pypeline.log'),outdir)
       
        iflogger.info("**** Processing finished ****")
       
        return True,'Processing sucessful'
        wf.base_dir = c.workingDirectory
        wf.config["execution"] = {"hash_method": "timestamp", "crashdump_dir": os.path.abspath(c.crashLogDirectory)}
        log_dir = os.path.join(
            c.outputDirectory, "logs", "group_analysis", resource, "model_%s" % (os.path.basename(model))
        )
        try:
            os.makedirs(log_dir)
        except:
            print "log_dir already exist"

        # enable logging
        from nipype import config
        from nipype import logging

        config.update_config({"logging": {"log_directory": log_dir, "log_to_file": True}})
        logging.update_logging(config)
        iflogger = logging.getLogger("interface")

        input_subject_list = [
            line.rstrip("\r\n") for line in open(subject_list, "r") if not (line == "\n") and not line.startswith("#")
        ]

        ordered_paths = []
        for sub in input_subject_list:
            for path in s_paths:
                if sub in path:
                    ordered_paths.append(path)

        iflogger.info("input_subject_list -> %s" % input_subject_list)
        # print "ordered_paths ->", ordered_paths
Example #48
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    database, start_time, subject, master_config = args
    assert 'baseline' in master_config[
        'components'] or 'longitudinal' in master_config[
            'components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    # HACK:
    #    To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), re-instantiate database
    # database.__init__(defaultDBName=database.dbName, subject_list=database.subjectList)
    #
    # END HACK
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution',
                      'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".
          format(subject))

    subjectWorkflow = pe.Workflow(
        name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import create_baseline as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudial as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(
            session,
            current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(
            name='inputspec_{0}'.format(session),
            interface=IdentityInterface(
                fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(
            session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(
            session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(
            session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(
            session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(
            session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project,
                                               subject,
                                               session,
                                               master_config,
                                               interpMode='Linear',
                                               pipeline_name=pname)

        subjectWorkflow.connect([
            (inputsSpec[session], sessionWorkflow[session], [
                ('T1s', 'inputspec.T1s'),
                ('T2s', 'inputspec.T2s'),
                ('PDs', 'inputspec.PDs'),
                ('FLs', 'inputspec.FLs'),
                ('OTs', 'inputspec.OTHERs'),
            ]),
            (atlasNode, sessionWorkflow[session],
             [('template_landmarks_50Lmks_fcsv',
               'inputspec.atlasLandmarkFilename'),
              ('template_weights_50Lmks_wts', 'inputspec.atlasWeightFilename'),
              ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
              ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
        ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([
                (atlasNode, sessionWorkflow[session],
                 [('template_t1', 'inputspec.template_t1'),
                  ('ExtendedAtlasDefinition_xml', 'inputspec.atlasDefinition')
                  ]),
            ])
        else:
            assert current_phase == 'longitudinal', "Phase value is unknown: {0}".format(
                current_phase)

    from utils import run_workflow, print_workflow
    if False:
        print_workflow(template,
                       plugin=master_config['execution']['plugin'],
                       dotfilename='template')
    return run_workflow(template,
                        plugin=master_config['execution']['plugin'],
                        plugin_args=master_config['plugin_args'])
Example #49
0
def create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
      'auxlmk' in master_config['components'] or \
      'segmentation' in master_config['components']

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, Directory, traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Split, Rename, IdentityInterface, Function

    from workflows.baseline import baseline_workflow as create_baseline
    from PipeLineFunctionHelpers import convertToList
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName
    from workflows.utils import run_workflow, print_workflow
    from workflows.atlasNode import MakeAtlasNode

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    pname = "{0}_{1}_{2}".format(master_config['workflow_type'], subject, session)
    sessionWorkflow = create_baseline(project, subject, session, master_config,
                             phase=master_config['workflow_type'],
                             interpMode=interpMode,
                             pipeline_name=pipeline_name)
    sessionWorkflow.base_dir = master_config['cachedir']

    inputsSpec = sessionWorkflow.get_node('inputspec')
    inputsSpec.inputs.T1s = dataDict['T1s']
    inputsSpec.inputs.T2s = dataDict['T2s']
    inputsSpec.inputs.PDs = dataDict['PDs']
    inputsSpec.inputs.FLs = dataDict['FLs']
    inputsSpec.inputs.OTHERs = dataDict['OTs']
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas_{0}'.format(session))  # TODO: input atlas csv
    sessionWorkflow.connect([(atlasNode, inputsSpec, [('template_landmarks_50Lmks_fcsv',
                                                                         'atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputTemplateModel')]),
                                ])
    if True:  # FIXME: current_phase == 'baseline':
        sessionWorkflow.connect([(atlasNode, inputsSpec, [('template_t1', 'template_t1'),
                                                          ('ExtendedAtlasDefinition_xml',
                                                           'atlasDefinition')]),
                                 ])
    else:
        template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                                        outfields=['template_t1', 'outAtlasFullPath']),
                              name='Template_DG')
        template_DG.inputs.base_directory = master_config['previousresult']
        template_DG.inputs.subject = subject
        template_DG.inputs.template = 'SUBJECT_TEMPLATES/%s/AVG_%s.nii.gz'
        template_DG.inputs.template_args['template_t1'] = [['subject', 'T1']]
        template_DG.inputs.field_template = {'outAtlasFullPath': 'Atlas/definitions/AtlasDefinition_%s.xml'}
        template_DG.inputs.template_args['outAtlasFullPath'] = [['subject']]
        template_DG.inputs.sort_filelist = True
        template_DG.inputs.raise_on_empty = True

        sessionWorkflow.connect([(template_DG, inputsSpec, [('outAtlasFullPath', 'atlasDefinition'),
                                                            ('template_t1', 'template_t1')]),
                                 ])

    if 'segmentation' in master_config['components']:
        from workflows.segmentation import segmentation
        from workflows.WorkupT1T2BRAINSCut import GenerateWFName
        try:
            bCutInputName = ".".join([GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])
        except:
            print project, subject, session
            raise
        sname = 'segmentation'
        onlyT1 = not(len(dataDict['T2s']) > 0)
        segWF = segmentation(project, subject, session, master_config, onlyT1, pipeline_name=sname)
        sessionWorkflow.connect([(atlasNode, segWF,
                                [('hncma-atlas', 'inputspec.hncma-atlas'),
                                 ('template_t1', 'inputspec.template_t1'),
                                 ('template_t1', bCutInputName + '.template_t1'),
                                 ('rho', bCutInputName + '.rho'),
                                 ('phi', bCutInputName + '.phi'),
                                 ('theta', bCutInputName + '.theta'),
                                 ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),
                                 ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),
                                 ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),
                                 ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),
                                 ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),
                                 ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),
                                 ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),
                                 ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),
                                 ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),
                                 ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),
                                 ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),
                                 ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),
                                 ('trainModelFile_txtD0060NT0060_gz',
                                  bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])
        outputSpec = sessionWorkflow.get_node('outputspec')
        sessionWorkflow.connect([(outputSpec, segWF, [('t1_average', 'inputspec.t1_average'),
                                             ('LMIatlasToSubject_tx', 'inputspec.LMIatlasToSubject_tx'),
                                             ('outputLabels', 'inputspec.inputLabels'),
                                             ('posteriorImages', 'inputspec.posteriorImages'),
                                             ('tc_atlas2sessionInverse_tx',
                                              'inputspec.TissueClassifyatlasToSubjectInverseTransform'),
                                             ('UpdatedPosteriorsList', 'inputspec.UpdatedPosteriorsList'),
                                             ('outputHeadLabels', 'inputspec.inputHeadLabels')])
                                ])
        if not onlyT1:
            sessionWorkflow.connect([(outputSpec, segWF, [('t1_average', 'inputspec.t2_average')])])

    return sessionWorkflow
def _run_workflow(args):

    # build pipeline for each subject, individually
    # ~ 5 min 20 sec per subject
    # (roughly 320 seconds)

    import os
    import os.path as op
    import sys

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl.maths as fsl

    import glob

    import time
    from time import strftime
    from nipype import config as nyconfig

    resource_pool, config, subject_info, run_name, site_name = args
    sub_id = str(subject_info[0])

    qap_type = config['qap_type']

    if subject_info[1]:
        session_id = subject_info[1]
    else:
        session_id = "session_0"

    if subject_info[2]:
        scan_id = subject_info[2]
    else:
        scan_id = "scan_0"

    # Read and apply general settings in config
    keep_outputs = config.get('write_all_outputs', False)
    output_dir = op.join(config["output_directory"], run_name,
                         sub_id, session_id, scan_id)

    try:
        os.makedirs(output_dir)
    except:
        if not op.isdir(output_dir):
            err = "[!] Output directory unable to be created.\n" \
                  "Path: %s\n\n" % output_dir
            raise Exception(err)
        else:
            pass

    log_dir = output_dir

    # set up logging
    nyconfig.update_config(
        {'logging': {'log_directory': log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    # take date+time stamp for run identification purposes
    unique_pipeline_id = strftime("%Y%m%d%H%M%S")
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")

    pipeline_start_time = time.time()

    logger.info("Pipeline start time: %s" % pipeline_start_stamp)
    logger.info("Contents of resource pool:\n" + str(resource_pool))
    logger.info("Configuration settings:\n" + str(config))

    # for QAP spreadsheet generation only
    config.update({"subject_id": sub_id, "session_id": session_id,
                   "scan_id": scan_id, "run_name": run_name})

    if site_name:
        config["site_name"] = site_name

    workflow = pe.Workflow(name=scan_id)
    workflow.base_dir = op.join(config["working_directory"], sub_id,
                                session_id)

    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}

    # update that resource pool with what's already in the output directory
    for resource in os.listdir(output_dir):
        if (op.isdir(op.join(output_dir, resource)) and
                resource not in resource_pool.keys()):
            resource_pool[resource] = glob.glob(op.join(output_dir,
                                                        resource, "*"))[0]

    # resource pool check
    invalid_paths = []

    for resource in resource_pool.keys():
        if not op.isfile(resource_pool[resource]):
            invalid_paths.append((resource, resource_pool[resource]))

    if len(invalid_paths) > 0:
        err = "\n\n[!] The paths provided in the subject list to the " \
              "following resources are not valid:\n"

        for path_tuple in invalid_paths:
            err = err + path_tuple[0] + ": " + path_tuple[1] + "\n"

        err = err + "\n\n"
        raise Exception(err)

    # start connecting the pipeline
    if 'qap_' + qap_type not in resource_pool.keys():
        from qap import qap_workflows as qw
        wf_builder = getattr(qw, 'qap_' + qap_type + '_workflow')
        workflow, resource_pool = wf_builder(workflow, resource_pool, config)

    # set up the datasinks
    new_outputs = 0

    out_list = ['qap_' + qap_type]
    if keep_outputs:
        out_list = resource_pool.keys()

    # Save reports to out_dir if necessary
    if config.get('write_report', False):
        out_list += ['qap_mosaic']

        # The functional temporal also has an FD plot
        if 'functional_temporal' in qap_type:
            out_list += ['qap_fd']

    for output in out_list:
        # we use a check for len()==2 here to select those items in the
        # resource pool which are tuples of (node, node_output), instead
        # of the items which are straight paths to files

        # resource pool items which are in the tuple format are the
        # outputs that have been created in this workflow because they
        # were not present in the subject list YML (the starting resource
        # pool) and had to be generated
        if len(resource_pool[output]) == 2:
            ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
            ds.inputs.base_directory = output_dir
            node, out_file = resource_pool[output]
            workflow.connect(node, out_file, ds, output)
            new_outputs += 1

    rt = {'id': sub_id, 'session': session_id, 'scan': scan_id,
          'status': 'started'}
    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
        workflow.write_graph(
            dotfilename=op.join(output_dir, run_name + ".dot"),
            simple_form=False)

        nc_per_subject = config.get('num_cores_per_subject', 1)
        runargs = {'plugin': 'Linear', 'plugin_args': {}}
        if nc_per_subject > 1:
            runargs['plugin'] = 'MultiProc',
            runargs['plugin_args'] = {'n_procs': nc_per_subject}

        try:
            workflow.run(**runargs)
            rt['status'] = 'finished'
        except Exception as e:  # TODO We should be more specific here ...
            rt.update({'status': 'failed', 'msg': e.msg})
            # ... however this is run inside a pool.map: do not raise Execption

    else:
        rt['status'] = 'cached'
        logger.info("\nEverything is already done for subject %s." % sub_id)

    # Remove working directory when done
    if not keep_outputs:
        try:
            work_dir = op.join(workflow.base_dir, scan_id)

            if op.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            logger.warn("Couldn\'t remove the working directory!")
            pass

    pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    pipeline_end_time = time.time()
    logger.info("Elapsed time (minutes) since last start: %s"
                % ((pipeline_end_time - pipeline_start_time) / 60))
    logger.info("Pipeline end time: %s" % pipeline_end_stamp)
    return rt
Example #51
0
def _create_single_session(dataDict, master_config, interpMode, pipeline_name):
    """
    Create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.

    :param dataDict:
    :param master_config:
    :param interpMode:
    :param pipeline_name:
    :return:
    """
    assert ("tissue_classify" in master_config["components"]
            or "auxlmk" in master_config["components"]
            or "denoise" in master_config["components"]
            or "landmark" in master_config["components"]
            or "segmentation" in master_config["components"]
            or "jointfusion_2015_wholebrain" in master_config["components"])

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from BAW.workflows.baseline import generate_single_session_template_wf

    project = dataDict["project"]
    subject = dataDict["subject"]
    session = dataDict["session"]

    blackListFileName = dataDict["T1s"][0] + "_noDenoise"
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config["workflow_phase"], subject,
                                 session)
    onlyT1 = not (len(dataDict["T2s"]) > 0)
    hasPDs = len(dataDict["PDs"]) > 0
    hasFLs = len(dataDict["FLs"]) > 0
    if onlyT1:
        print("T1 Only processing starts ...")
    else:
        print("Multimodal processing starts ...")

    doDenoise = False
    if "denoise" in master_config["components"]:
        if isBlackList:
            print("""
                  Denoise is ignored when the session is in Blacklist
                  There is known issue that Landmark Detection algorithm
                  may not work well with denoising step
                  """)
            doDenoise = False
        else:
            doDenoise = True
    useEMSP = None
    if len(dataDict["EMSP"]) > 0:
        useEMSP = dataDict["EMSP"][0]

    def replace_image_extensions(filename, new_extension):
        filename_base = filename
        for rmext in [
                r".gz$", r".nii$", r".hdr$", r".img$", r".dcm$", r".nrrd$",
                r".nhdr$", r".mhd$"
        ]:
            filename_base = re.sub(rmext, "", filename_base)
        return filename_base + new_extension

    input_sidecare_fcsv_filename = replace_image_extensions(
        dataDict["T1s"][0], ".fcsv")
    if os.path.exists(input_sidecare_fcsv_filename):
        useEMSP = input_sidecare_fcsv_filename

    sessionWorkflow = generate_single_session_template_wf(
        project,
        subject,
        session,
        onlyT1,
        hasPDs,
        hasFLs,
        master_config,
        phase=master_config["workflow_phase"],
        interpMode=interpMode,
        pipeline_name=pipeline_name,
        doDenoise=doDenoise,
        badT2=dataDict["BadT2"],
        useEMSP=useEMSP,
    )
    sessionWorkflow.base_dir = master_config["cachedir"]

    sessionWorkflow_inputsspec = sessionWorkflow.get_node("inputspec")
    sessionWorkflow_inputsspec.inputs.T1s = dataDict["T1s"]
    sessionWorkflow_inputsspec.inputs.T2s = dataDict["T2s"]
    sessionWorkflow_inputsspec.inputs.PDs = dataDict["PDs"]
    sessionWorkflow_inputsspec.inputs.FLs = dataDict["FLs"]
    if useEMSP is not None:
        sessionWorkflow_inputsspec.inputs.EMSP = useEMSP
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict["OTHERs"]
    return sessionWorkflow
Example #52
0
#logging
log_dir = experiment["files_path"]["brain_atlas"]["log"]
log_level = experiment["log_level"]
msg_format = "%(asctime)s - %(levelname)s - %(process)s - {%(pathname)s:%(lineno)d}- %(message)s"

if not os.path.exists(log_dir):
    os.makedirs(log_dir)
config.enable_debug_mode()
config.set_log_dir(log_dir)
config.update_config(
    {'logging': {
        'log_directory': log_dir,
        'log_to_file': True
    }})
nl.update_logging(config)

logging.basicConfig(filename=log_dir + '/main.log',
                    filemode='w',
                    format=msg_format)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)

logger.info("Functional brain connectivity extraction")
logger.debug("Configuration file is " + args.config)
logger.debug("Smoothing parameter is " + str(args.fwhm))
logger.debug("Experiment loaded")

# set up files' path
subject_list = np.genfromtxt(args.subjects, dtype="str", skip_header=1)
logger.debug("Subject ids: " + str(subject_list))
Example #53
0
wf.connect(thickness, 'brain_segmentation', datasink, 'output.@brain_seg')
wf.connect(thickness, 'brain_segmentation_N4', datasink, 'output.@brain_seg_N4')
wf.connect(thickness, 'brain_segmentation_posteriors_1', datasink, 'output.@brain_seg_post_1')
wf.connect(thickness, 'brain_segmentation_posteriors_2', datasink, 'output.@brain_seg_post_2')
wf.connect(thickness, 'brain_segmentation_posteriors_3', datasink, 'output.@brain_seg_post_3')
wf.connect(thickness, 'brain_segmentation_posteriors_4', datasink, 'output.@brain_seg_post_4')
wf.connect(thickness, 'brain_segmentation_posteriors_5', datasink, 'output.@brain_seg_post_5')
wf.connect(thickness, 'brain_segmentation_posteriors_6', datasink, 'output.@brain_seg_post_6')
wf.connect(thickness, 'cortical_thickness', datasink, 'output.@cortical_thickness')
wf.connect(thickness, 'cortical_thickness_normalized', datasink,'output.@cortical_thickness_normalized')
# Connect ROI stats output text file to datasink
wf.connect(ROIstats, 'roi_stats_file', datasink, 'output.@ROIstats')
# Setup crashfile directory and logging
wf.config['execution'] = {'hash_method': 'timestamp', 'crashdump_dir': crash_dir}
config.update_config({'logging': {'log_directory': log_dir, 'log_to_file': True}})
np_logging.update_logging(config)

# --- Run the workflow ---
wf_status = 0
try:
    ndar_log.info('Running the workflow...')
    wf.run()
    # We're successful at this point, add it as a file to the completed path
    ndar_log.info('Workflow completed successfully for IMAGE03 ID %s' % img03_id_str)
    wf_status = 1
    finish_str = 'Finish time: %s'
# If the workflow run fails
except:
    ndar_log.info('ACT Workflow failed for IMAGE03 ID %s' % img03_id_str)
    finish_str = 'Crash time: %s'
Example #54
0
def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    start_time, subject, master_config = args
    assert 'tissue_classify' in master_config['components'] or 'auxlmk' in master_config['components'] or 'segmentation' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName
    from utils import run_workflow, print_workflow

    # while time.time() < start_time:
        # time.sleep(start_time - time.time() + 1)
        # print "Delaying start for {subject}".format(subject=subject)
    # print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG


    sessionWorkflow = dict()
    inputsSpec = dict()
    # To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), instantiate database here
    database = OpenSubjectDatabase(master_config['cachedir'], [subject], master_config['prefix'], master_config['dbfile'])
    # print database.getAllSessions()
    database.open_connection()

    sessions = database.getSessionsFromSubject(subject)
    print "These are the sessions: ", sessions
    # TODO: atlas input csv read
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')
    # atlasNode = GetAtlasNode(master_config['previouscache'], 'BAtlas')
    from singleSession import create_singleSession as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_singleSession".format(session)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, 'singleSession')
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if 'segmentation' in master_config['components']:
            from WorkupT1T2BRAINSCut import GenerateWFName
            try:
                bCutInputName = ".".join(['segmentation', GenerateWFName(project, subject, session, 'Segmentation'), 'inputspec'])
            except:
                print project, subject, session
                raise
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session],
                                      [('hncma-atlas', 'segmentation.inputspec.hncma-atlas'),
                                       ('template_t1', 'segmentation.inputspec.template_t1'),
                                       ('template_t1', bCutInputName + '.template_t1'),
                                       ('rho', bCutInputName + '.rho'),
                                       ('phi', bCutInputName + '.phi'),
                                       ('theta', bCutInputName + '.theta'),
                                       ('l_caudate_ProbabilityMap', bCutInputName + '.l_caudate_ProbabilityMap'),
                                       ('r_caudate_ProbabilityMap', bCutInputName + '.r_caudate_ProbabilityMap'),
                                       ('l_hippocampus_ProbabilityMap', bCutInputName + '.l_hippocampus_ProbabilityMap'),
                                       ('r_hippocampus_ProbabilityMap', bCutInputName + '.r_hippocampus_ProbabilityMap'),
                                       ('l_putamen_ProbabilityMap', bCutInputName + '.l_putamen_ProbabilityMap'),
                                       ('r_putamen_ProbabilityMap', bCutInputName + '.r_putamen_ProbabilityMap'),
                                       ('l_thalamus_ProbabilityMap', bCutInputName + '.l_thalamus_ProbabilityMap'),
                                       ('r_thalamus_ProbabilityMap', bCutInputName + '.r_thalamus_ProbabilityMap'),
                                       ('l_accumben_ProbabilityMap', bCutInputName + '.l_accumben_ProbabilityMap'),
                                       ('r_accumben_ProbabilityMap', bCutInputName + '.r_accumben_ProbabilityMap'),
                                       ('l_globus_ProbabilityMap', bCutInputName + '.l_globus_ProbabilityMap'),
                                       ('r_globus_ProbabilityMap', bCutInputName + '.r_globus_ProbabilityMap'),
                                       ('trainModelFile_txtD0060NT0060_gz',
                                        bCutInputName + '.trainModelFile_txtD0060NT0060_gz')])])
        if True:  # FIXME: current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
                                                                            ('ExtendedAtlasDefinition_xml',
                                                                             'inputspec.atlasDefinition')]),
                                 ])
        else:
            template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                  outfields=['template_t1', 'outAtlasFullPath']),
                                  name='Template_DG')
            template_DG.inputs.base_directory = master_config['previousresult']
            template_DG.inputs.subject = subject
            template_DG.inputs.template = 'SUBJECT_TEMPLATES/%s/AVG_%s.nii.gz'
            template_DG.inputs.template_args['template_t1'] = [['subject', 'T1']]
            template_DG.inputs.field_template = {'outAtlasFullPath': 'Atlas/definitions/AtlasDefinition_%s.xml'}
            template_DG.inputs.template_args['outAtlasFullPath'] = [['subject']]
            template_DG.inputs.sort_filelist = True
            template_DG.inputs.raise_on_empty = True

            baw201.connect([(template_DG, sessionWorkflow[session], [('outAtlasFullPath', 'inputspec.atlasDefinition'),
                                                                     ('template_t1', 'inputspec.template_t1')]),
                           ])
        # HACK: only run first subject
        break
        # END HACK
        if not True:
            return print_workflow(subjectWorkflow,
                                  plugin=master_config['execution']['plugin'], dotfilename='subjectWorkflow') #, graph2use='flat')
    try:
        return subjectWorkflow.run(plugin='SGEGraph', plugin_args=master_config['plugin_args'])
    except:
        return 1
Example #55
0
    def process(self):
        # Enable the use of the the W3C PROV data model to capture and represent provenance in Nipype
        # config.enable_provenance()

        # Process time
        self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        if '_' in self.subject:
            self.subject = self.subject.split('_')[0]

        # old_subject = self.subject

        if self.global_conf.subject_session == '':
            cmp_deriv_subject_directory = os.path.join(self.output_directory,
                                                       "cmp", self.subject)
            nipype_deriv_subject_directory = os.path.join(
                self.output_directory, "nipype", self.subject)
        else:
            cmp_deriv_subject_directory = os.path.join(
                self.output_directory, "cmp", self.subject,
                self.global_conf.subject_session)
            nipype_deriv_subject_directory = os.path.join(
                self.output_directory, "nipype", self.subject,
                self.global_conf.subject_session)

            self.subject = "_".join(
                (self.subject, self.global_conf.subject_session))

        if not os.path.exists(
                os.path.join(nipype_deriv_subject_directory, "fMRI_pipeline")):
            try:
                os.makedirs(
                    os.path.join(nipype_deriv_subject_directory,
                                 "fMRI_pipeline"))
            except os.error:
                print("%s was already existing" % os.path.join(
                    nipype_deriv_subject_directory, "fMRI_pipeline"))

        # Initialization
        if os.path.isfile(
                os.path.join(nipype_deriv_subject_directory, "fMRI_pipeline",
                             "pypeline.log")):
            os.unlink(
                os.path.join(nipype_deriv_subject_directory, "fMRI_pipeline",
                             "pypeline.log"))
        config.update_config({
            'logging': {
                'log_directory':
                os.path.join(nipype_deriv_subject_directory, "fMRI_pipeline"),
                'log_to_file':
                True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'use_relative_paths': True,
                'crashfile_format': "txt"
            }
        })
        logging.update_logging(config)
        iflogger = logging.getLogger('nipype.interface')

        iflogger.info("**** Processing ****")

        flow = self.create_pipeline_flow(
            cmp_deriv_subject_directory=cmp_deriv_subject_directory,
            nipype_deriv_subject_directory=nipype_deriv_subject_directory)
        flow.write_graph(graph2use='colored', format='svg', simple_form=False)

        # try:

        if (self.number_of_cores != 1):
            flow.run(plugin='MultiProc',
                     plugin_args={'n_procs': self.number_of_cores})
        else:
            flow.run()

        # self.fill_stages_outputs()

        iflogger.info("**** Processing finished ****")

        return True, 'Processing successful'
Example #56
0
def execute():
    import argparse
    from colorama import Fore
    import warnings

    # Suppress potential warnings
    warnings.filterwarnings("ignore")

    # Nice traceback when clinica crashes
    sys.excepthook = custom_traceback

    MANDATORY_TITLE = (Fore.YELLOW + 'Mandatory arguments' + Fore.RESET)
    OPTIONAL_TITLE = (Fore.YELLOW + 'Optional arguments' + Fore.RESET)
    """
    Define and parse the command line argument
    """
    parser = ArgumentParser(add_help=False)
    parser.add_argument('-h',
                        '--help',
                        action='help',
                        default=argparse.SUPPRESS,
                        help=argparse.SUPPRESS)
    parser._positionals.title = (
        Fore.YELLOW + 'clinica expects one of the following keywords' +
        Fore.RESET)
    parser._optionals.title = OPTIONAL_TITLE

    sub_parser = parser.add_subparsers(metavar='')
    parser.add_argument("-v",
                        "--verbose",
                        dest='verbose',
                        action='store_true',
                        default=False,
                        help='Verbose: print all messages to the console')
    parser.add_argument("-l",
                        "--logname",
                        dest='logname',
                        default="clinica.log",
                        metavar=('file.log'),
                        help='Define the log file name (default: clinica.log)')
    """
    run category: run one of the available pipelines
    """
    from clinica.engine import CmdParser

    from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_cli import T1FreeSurferCrossSectionalCLI  # noqa
    from clinica.pipelines.t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_cli import T1VolumeTissueSegmentationCLI  # noqa
    from clinica.pipelines.t1_volume_create_dartel.t1_volume_create_dartel_cli import T1VolumeCreateDartelCLI  # noqa
    from clinica.pipelines.t1_volume_existing_dartel.t1_volume_existing_dartel_cli import T1VolumeExistingDartelCLI  # noqa
    from clinica.pipelines.t1_volume_dartel2mni.t1_volume_dartel2mni_cli import T1VolumeDartel2MNICLI  # noqa
    from clinica.pipelines.t1_volume_new_template.t1_volume_new_template_cli import T1VolumeNewTemplateCLI  # noqa
    from clinica.pipelines.t1_volume_existing_template.t1_volume_existing_template_cli import T1VolumeExistingTemplateCLI  # noqa
    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
    from clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_cli import DwiPreprocessingUsingPhaseDiffFieldmapCli  # noqa
    from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DwiPreprocessingUsingT1Cli  # noqa
    from clinica.pipelines.dwi_dti.dwi_dti_cli import DwiDtiCli  # noqa
    # from clinica.pipelines.dwi_connectome.dwi_connectome_cli import DwiConnectomeCli  # noqa
    from clinica.pipelines.fmri_preprocessing.fmri_preprocessing_cli import fMRIPreprocessingCLI  # noqa
    from clinica.pipelines.pet_volume.pet_volume_cli import PETVolumeCLI  # noqa
    from clinica.pipelines.pet_surface.pet_surface_cli import PetSurfaceCLI  # noqa
    from clinica.pipelines.machine_learning_spatial_svm.spatial_svm_cli import SpatialSVMCLI  # noqa
    from clinica.pipelines.statistics_surface.statistics_surface_cli import StatisticsSurfaceCLI  # noqa
    pipelines = ClinicaClassLoader(baseclass=CmdParser,
                                   extra_dir="pipelines").load()
    pipelines += [
        T1FreeSurferCrossSectionalCLI(),
        T1VolumeNewTemplateCLI(),
        DwiPreprocessingUsingPhaseDiffFieldmapCli(),
        DwiPreprocessingUsingT1Cli(),
        DwiDtiCli(),
        # DwiConnectomeCli(),
        fMRIPreprocessingCLI(),
        PETVolumeCLI(),
        PetSurfaceCLI(),
        SpatialSVMCLI(),
        StatisticsSurfaceCLI(),
        T1VolumeExistingTemplateCLI(),
        T1VolumeTissueSegmentationCLI(),
        T1VolumeCreateDartelCLI(),
        T1VolumeExistingDartelCLI(),
        T1VolumeDartel2MNICLI(),
        T1VolumeParcellationCLI()
    ]

    run_parser = sub_parser.add_parser(
        'run',
        add_help=False,
        formatter_class=argparse.RawTextHelpFormatter,
        help='To run pipelines on BIDS/CAPS datasets.')
    run_parser.description = (Fore.GREEN +
                              'Run pipelines on BIDS/CAPS datasets.' +
                              Fore.RESET)
    run_parser._positionals.title = (
        Fore.YELLOW + 'clinica run expects one of the following pipelines' +
        Fore.RESET)

    init_cmdparser_objects(parser,
                           run_parser.add_subparsers(metavar='', dest='run'),
                           pipelines)
    """
    convert category: convert one of the supported datasets into BIDS hierarchy
    """
    from clinica.iotools.converters.aibl_to_bids.aibl_to_bids_cli import AiblToBidsCLI  # noqa
    from clinica.iotools.converters.adni_to_bids.adni_to_bids_cli import AdniToBidsCLI  # noqa
    from clinica.iotools.converters.oasis_to_bids.oasis_to_bids_cli import OasisToBidsCLI  # noqa

    converters = ClinicaClassLoader(baseclass=CmdParser,
                                    extra_dir="iotools/converters").load()
    converters += [
        AdniToBidsCLI(),
        AiblToBidsCLI(),
        OasisToBidsCLI(),
    ]

    convert_parser = sub_parser.add_parser(
        'convert',
        add_help=False,
        help='To convert unorganized datasets into a BIDS hierarchy.',
    )
    convert_parser.description = (
        Fore.GREEN +
        'Tools to convert unorganized datasets into a BIDS hierarchy.' +
        Fore.RESET)
    convert_parser._positionals.title = (
        Fore.YELLOW + 'clinica convert expects one of the following datasets' +
        Fore.RESET)
    convert_parser._optionals.title = OPTIONAL_TITLE
    init_cmdparser_objects(
        parser, convert_parser.add_subparsers(metavar='', dest='convert'),
        converters)
    """
    iotools category
    """
    from clinica.iotools.utils.data_handling_cli import CmdParserSubjectsSessions
    from clinica.iotools.utils.data_handling_cli import CmdParserMergeTsv
    from clinica.iotools.utils.data_handling_cli import CmdParserMissingModalities

    io_tools = [
        CmdParserSubjectsSessions(),
        CmdParserMergeTsv(),
        CmdParserMissingModalities(),
    ]

    HELP_IO_TOOLS = 'Tools to handle BIDS/CAPS datasets.'
    io_parser = sub_parser.add_parser(
        'iotools',
        add_help=False,
        help=HELP_IO_TOOLS,
    )
    io_parser.description = (Fore.GREEN + HELP_IO_TOOLS + Fore.RESET)
    io_parser._positionals.title = (
        Fore.YELLOW +
        'clinica iotools expects one of the following BIDS/CAPS utilities' +
        Fore.RESET)
    io_parser._optionals.title = OPTIONAL_TITLE

    init_cmdparser_objects(
        parser, io_parser.add_subparsers(metavar='', dest='iotools'), io_tools)
    """
    visualize category: run one of the available pipelines
    """
    from clinica.engine import CmdParser

    from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_visualizer import T1FreeSurferVisualizer

    visualizers = ClinicaClassLoader(baseclass=CmdParser,
                                     extra_dir="pipelines").load()
    visualizers += [
        T1FreeSurferVisualizer(),
    ]

    visualize_parser = sub_parser.add_parser(
        'visualize',
        add_help=False,
        formatter_class=argparse.RawTextHelpFormatter,
        help='To visualize outputs of Clinica pipelines.')
    visualize_parser.description = (Fore.GREEN +
                                    'Visualize outputs of Clinica pipelines.' +
                                    Fore.RESET)
    visualize_parser._positionals.title = (
        Fore.YELLOW +
        'clinica visualize expects one of the following pipelines' +
        Fore.RESET)

    init_cmdparser_objects(
        parser, visualize_parser.add_subparsers(metavar='', dest='visualize'),
        visualizers)
    """
    generate category: template
    """
    generate_parser = sub_parser.add_parser(
        'generate',
        add_help=False,
        help=('To generate pre-filled files when creating '
              'new pipelines (for developers).'),
    )
    generate_parser.description = (
        Fore.GREEN + ('Generate pre-filled files when creating new pipelines '
                      '(for  developers).') + Fore.RESET)
    generate_parser._positionals.title = (
        Fore.YELLOW + 'clinica generate expects one of the following tools' +
        Fore.RESET)
    generate_parser._optionals.title = OPTIONAL_TITLE

    from clinica.engine.template import CmdGenerateTemplates
    init_cmdparser_objects(
        parser, generate_parser.add_subparsers(metavar='', dest='generate'),
        [CmdGenerateTemplates()])
    """
    Silent all sub-parser errors methods except the one which is called
    otherwise the output console will display useless messages
    """
    def silent_help():
        pass

    def single_error_message(p):
        def error(x):
            from colorama import Fore
            print('%sError %s%s\n' % (Fore.RED, x, Fore.RESET))
            p.print_help()
            parser.print_help = silent_help
            exit(-1)

        return error

    for p in [run_parser, io_parser, convert_parser, generate_parser]:
        p.error = single_error_message(p)

    # Do not want stderr message
    def silent_msg(x):
        pass

    parser.error = silent_msg
    """
    Parse the command and check that everything went fine
    """
    args = None
    unknown_args = None
    try:
        argcomplete.autocomplete(parser)
        args, unknown_args = parser.parse_known_args()
    except SystemExit:
        exit(0)
    except Exception:
        parser.print_help()
        exit(-1)

    # if unknown_args:
    #    if '--verbose' or '-v' in unknown_args:
    #        cprint('Verbose flag detected')
    #    raise ValueError('Unknown flag detected: %s' % unknown_args)

    if 'run' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica run` on the terminal
        run_parser.print_help()
        exit(0)
    elif 'convert' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica convert` on the terminal
        convert_parser.print_help()
        exit(0)
    elif 'iotools' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica iotools` on the terminal
        io_parser.print_help()
        exit(0)
    elif 'visualize' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica visualize` on the terminal
        visualize_parser.print_help()
        exit(0)
    elif 'generate' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica generate` on the terminal
        generate_parser.print_help()
        exit(0)
    elif args is None or hasattr(args, 'func') is False:
        # Case when we type `clinica` on the terminal
        parser.print_help()
        exit(0)

    import clinica.utils.stream as var
    var.clinica_verbose = args.verbose

    if args.verbose is False:
        """
        Enable only cprint(msg) --> clinica print(msg)
        - All the print() will be ignored!
        - All the logging will be redirect to the log file.
        """
        from clinica.utils.stream import FilterOut
        sys.stdout = FilterOut(sys.stdout)
        import logging as python_logging
        from logging import Filter, ERROR
        import os

        # Resolve bug
        # "Assuming non interactive session since isatty found missing"
        # at the begining of any pipeline caused by logger in duecredit package
        # (utils.py)
        # Deactivate stdout, then reactivate it
        sys.stdout = open(os.devnull, 'w')
        from nipype import config
        sys.stdout = sys.__stdout__

        from nipype import logging

        # Configure Nipype logger for our needs
        config.update_config({
            'logging': {
                'workflow_level': 'INFO',
                'log_directory': os.getcwd(),
                'log_to_file': True
            },
            'execution': {
                'stop_on_first_crash': False,
                'hash_method': 'content'
            }
        })
        logging.update_logging(config)

        # Define the LogFilter for ERROR detection
        class LogFilter(Filter):
            """
            The LogFilter class ables to monitor if an ERROR log signal is sent
            from Clinica/Nipype. If detected, the user will be warned.
            """
            def filter(self, record):
                if record.levelno >= ERROR:
                    cprint(
                        "An ERROR was generated: please check the log file for more information"
                    )
                return True

        logger = logging.getLogger('nipype.workflow')
        logger.addFilter(LogFilter())

        # Remove all handlers associated with the root logger object
        for handler in python_logging.root.handlers[:]:
            python_logging.root.removeHandler(handler)

        logging.disable_file_logging()

        # Enable file logging using a filename
        def enable_file_logging(self, filename):
            """
            Hack to define a filename for the log file! It overloads the
            'enable_file_logging' method in 'nipype/utils/logger.py' file.
            """
            import logging
            from logging.handlers import RotatingFileHandler as RFHandler
            config = self._config
            LOG_FILENAME = os.path.join(config.get('logging', 'log_directory'),
                                        filename)
            hdlr = RFHandler(LOG_FILENAME,
                             maxBytes=int(config.get('logging', 'log_size')),
                             backupCount=int(
                                 config.get('logging', 'log_rotate')))
            formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)
            hdlr.setFormatter(formatter)
            self._logger.addHandler(hdlr)
            self._fmlogger.addHandler(hdlr)
            self._iflogger.addHandler(hdlr)
            self._hdlr = hdlr

        enable_file_logging(logging, args.logname)

        class Stream:
            def write(self, text):
                print(text)
                sys.stdout.flush()

        python_logging.basicConfig(format=logging.fmt,
                                   datefmt=logging.datefmt,
                                   stream=Stream())

    # Finally, run the pipelines
    args.func(args)
Example #57
0
    def process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        # Initialization
        if os.path.exists(
                os.path.join(self.base_directory, "LOG", "pypeline.log")):
            os.unlink(os.path.join(self.base_directory, "LOG", "pypeline.log"))
        config.update_config({
            'logging': {
                'log_directory': os.path.join(self.base_directory, "LOG"),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False
            }
        })
        logging.update_logging(config)
        iflogger = logging.getLogger('interface')

        # Data import
        datasource = pe.Node(
            interface=nio.DataGrabber(outfields=['diffusion', 'T1', 'T2']),
            name='datasource')
        datasource.inputs.base_directory = os.path.join(
            self.base_directory, 'NIFTI')
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(
            diffusion=self.global_conf.imaging_model + '.nii.gz',
            T1='T1.nii.gz',
            T2='T2.nii.gz')
        datasource.inputs.sort_filelist = False

        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="diffusion_sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory,
                                                    "RESULTS")

        # Clear previous outputs
        self.clear_stages_outputs()

        # Create common_flow
        common_flow = self.create_common_flow()

        # Create diffusion flow

        diffusion_flow = pe.Workflow(name='diffusion_pipeline')
        diffusion_inputnode = pe.Node(interface=util.IdentityInterface(fields=[
            "diffusion", "T1", "T2", "wm_mask_file", "roi_volumes",
            "subjects_dir", "subject_id", "atlas_info", "parcellation_scheme"
        ]),
                                      name="inputnode")
        diffusion_outputnode = pe.Node(
            interface=util.IdentityInterface(fields=["connectivity_matrices"]),
            name="outputnode")
        diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            diffusion_flow.connect([
                (diffusion_inputnode, preproc_flow, [("diffusion",
                                                      "inputnode.diffusion")]),
            ])

        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            diffusion_flow.connect([
                (diffusion_inputnode, reg_flow,
                 [('T1', 'inputnode.T1'), ('T2', 'inputnode.T2'),
                  ('wm_mask_file', 'inputnode.wm_mask'),
                  ('roi_volumes', 'inputnode.roi_volumes')]),
                (preproc_flow, reg_flow, [('outputnode.diffusion_preproc',
                                           'inputnode.target')])
            ])
            if self.stages[
                    'Registration'].config.registration_mode == "BBregister (FS)":
                diffusion_flow.connect([
                    (diffusion_inputnode, reg_flow,
                     [('subjects_dir', 'inputnode.subjects_dir'),
                      ('subject_id', 'inputnode.subject_id')]),
                ])

        if self.stages['Diffusion'].enabled:
            diff_flow = self.create_stage_flow("Diffusion")
            diffusion_flow.connect([
                (preproc_flow, diff_flow, [('outputnode.diffusion_preproc',
                                            'inputnode.diffusion')]),
                (reg_flow, diff_flow, [('outputnode.wm_mask_registered',
                                        'inputnode.wm_mask_registered')]),
                (reg_flow, diff_flow, [('outputnode.roi_volumes_registered',
                                        'inputnode.roi_volumes')])
            ])

        if self.stages['Connectome'].enabled:
            if self.stages['Diffusion'].config.processing_tool == 'FSL':
                self.stages['Connectome'].config.probtrackx = True
            else:
                self.stages['Connectome'].config.probtrackx = False
            con_flow = self.create_stage_flow("Connectome")
            diffusion_flow.connect([
                (diffusion_inputnode, con_flow,
                 [('parcellation_scheme', 'inputnode.parcellation_scheme')]),
                (diff_flow, con_flow,
                 [('outputnode.track_file', 'inputnode.track_file'),
                  ('outputnode.gFA', 'inputnode.gFA'),
                  ('outputnode.roi_volumes',
                   'inputnode.roi_volumes_registered'),
                  ('outputnode.skewness', 'inputnode.skewness'),
                  ('outputnode.kurtosis', 'inputnode.kurtosis'),
                  ('outputnode.P0', 'inputnode.P0')]),
                (con_flow, diffusion_outputnode,
                 [('outputnode.connectivity_matrices', 'connectivity_matrices')
                  ])
            ])

            if self.stages[
                    'Parcellation'].config.parcellation_scheme == "Custom":
                diffusion_flow.connect([(diffusion_inputnode, con_flow, [
                    ('atlas_info', 'inputnode.atlas_info')
                ])])

        # Create NIPYPE flow

        flow = pe.Workflow(name='NIPYPE',
                           base_dir=os.path.join(self.base_directory))

        flow.connect([
            (datasource, common_flow, [("T1", "inputnode.T1")]),
            (datasource, diffusion_flow, [("diffusion", "inputnode.diffusion"),
                                          ("T1", "inputnode.T1"),
                                          ("T2", "inputnode.T2")]),
            (common_flow, diffusion_flow,
             [("outputnode.subjects_dir", "inputnode.subjects_dir"),
              ("outputnode.subject_id", "inputnode.subject_id"),
              ("outputnode.wm_mask_file", "inputnode.wm_mask_file"),
              ("outputnode.roi_volumes", "inputnode.roi_volumes"),
              ("outputnode.parcellation_scheme",
               "inputnode.parcellation_scheme"),
              ("outputnode.atlas_info", "inputnode.atlas_info")]),
            (diffusion_flow, sinker, [("outputnode.connectivity_matrices",
                                       "%s.%s.connectivity_matrices" %
                                       (self.global_conf.imaging_model, now))])
        ])

        # Process pipeline

        iflogger.info("**** Processing ****")

        if (self.number_of_cores != 1):
            flow.run(plugin='MultiProc',
                     plugin_args={'n_procs': self.number_of_cores})
        else:
            flow.run()

        self.fill_stages_outputs()

        # Clean undesired folders/files
        rm_file_list = ['rh.EC_average', 'lh.EC_average', 'fsaverage']
        for file_to_rm in rm_file_list:
            if os.path.exists(os.path.join(self.base_directory, file_to_rm)):
                os.remove(os.path.join(self.base_directory, file_to_rm))

        # copy .ini and log file
        outdir = os.path.join(self.base_directory, "RESULTS",
                              self.global_conf.imaging_model, now)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file, outdir)
        shutil.copy(os.path.join(self.base_directory, 'LOG', 'pypeline.log'),
                    outdir)

        iflogger.info("**** Processing finished ****")

        return True, 'Processing sucessful'
Example #58
0
    try:
        os.makedirs(op.join(opd, 'log'))
    except OSError:
        pass

    config.update_config({  'logging': {
                                        'log_directory': op.join(opd, 'log'),
                                        'log_to_file': True,
                                        'workflow_level': 'INFO',
                                        'interface_level': 'INFO'
                                      },
                            'execution': {
                                        'stop_on_first_crash': False
                                        }
                        })
    logging.update_logging(config)

    # load the sequence parameters from json file
    with open(os.path.join(raw_data_dir, 'acquisition_parameters.json')) as f:
        json_s = f.read()
        acquisition_parameters = json.loads(json_s)

    # load the analysis parameters from json file
    with open(os.path.join(raw_data_dir, 'analysis_parameters.json')) as f:
        json_s = f.read()
        analysis_info = json.loads(json_s)

    # load the analysis/experimental parameters for this subject from json file
    with open(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json')) as f:
        json_s = f.read()
        experimental_parameters = json.loads(json_s)