Exemple #1
0
def direct_nifti_to_directory(base_directory, dicom_header, niftis, bvals=None, bvecs=None):
    """Move nifti file to the correct directory for the subject
    @param dicom_headers: dicom_header object created by dicom.read_file and stored in a pickle dump
    @param nifti_file: a list of nifti files to move
    @param subject_directory: string representing the main directory for the subject

    @return nifti_destination: string representing where the file moved to

    """
    import dicom
    import os
    
    import nipype.utils.filemanip
    
    ANATOMY = 0
    BOLD = 1
    DTI = 2
    FIELDMAP = 3
    LOCALIZER = 4
    REFERENCE = 5
    DERIVED = 6
    
    from nipype import logging
    iflogger = logging.getLogger('interface')
    wlogger = logging.getLogger('workflow')
    
    iflogger.debug('Enetering direct nifti with inputs {} {} {} '.format(dicom_header, niftis, base_directory))
    
    dicom_header_filename = dicom_header
    dicom_header = nipype.utils.filemanip.loadpkl(dicom_header)
    
    scan_keys = [['MPRAGE','FSE','T1w','T2w','PDT','PD-T2','tse2d','t2spc','t2_spc'],
        ['epfid'],
        ['ep_b'],
        ['fieldmap','field_mapping'],
        ['localizer','Scout'],
        ['SBRef']]
        
    file_type = None 

    destination = ""
    
    if type(niftis) is str:
        niftis = [niftis]
    
    try:
        for i in range(0,6):
            for key in scan_keys[i]:
                if (dicom_header.ProtocolName.lower().find(key.lower()) > -1) or \
                    (dicom_header.SeriesDescription.lower().find(key.lower()) > -1) or \
                    (dicom_header.SequenceName.lower().find(key.lower()) > -1):
                        file_type = i
                        
        if dicom_header.ImageType[0] != "ORIGINAL":
            file_type = DERIVED
                
    except AttributeError, e:
        iflogger.warning("Nifti File(s) {} not processed because Dicom header dataset throwing error {} \n ".format(niftis, e)
                         + "Check your dicom headers")
        return []
def return_subject_data(subject_id, data_file):
    import csv
    from nipype import logging
    iflogger = logging.getLogger('interface')

    f = open(data_file, 'r')
    csv_line_by_line = csv.reader(f)
    found = False
    # Must be stored in this order!:
    # 'subject_id', 'dose', 'weight', 'delay', 'glycemie', 'scan_time']
    for line in csv_line_by_line:
        if line[0] == subject_id:
            dose, weight, delay, glycemie, scan_time = [
                float(x) for x in line[1:]]
            iflogger.info('Subject %s found' % subject_id)
            iflogger.info('Dose: %s' % dose)
            iflogger.info('Weight: %s' % weight)
            iflogger.info('Delay: %s' % delay)
            iflogger.info('Glycemie: %s' % glycemie)
            iflogger.info('Scan Time: %s' % scan_time)
            found = True
            break
    if not found:
        raise Exception("Subject id %s was not in the data file!" % subject_id)
    return dose, weight, delay, glycemie, scan_time
def World(in_file, some_parameter):
   from nipype import logging
   iflogger = logging.getLogger('interface')
   message = "World! " + 'some_parameter: ' + str(some_parameter)
   iflogger.info(message)
   with open(in_file, 'a') as fp:
       fp.write(message)
def calc_surface_potential_fn(dipole_file, dipole_row, leadfield, mesh_file, mesh_id):
    import os.path as op
    import numpy as np
    import h5py
    from forward.mesh import get_closest_element_to_point

    from nipype import logging
    iflogger = logging.getLogger('interface')

    dipole_data = np.loadtxt(dipole_file)
    if len(dipole_data.shape) == 1:
        dipole_data = [dipole_data]
    dip = dipole_data[int(dipole_row)]
    
    x, y, z = [float(i) for i in dip[2:5]]
    q_x, q_y, q_z = [float(j) for j in dip[6:9]]
    _, element_idx, centroid, element_data, lf_idx = get_closest_element_to_point(mesh_file, mesh_id, [[x, y, z]])

    lf_data_file = h5py.File(leadfield, "r")
    lf_data = lf_data_file.get("leadfield")
    leadfield_matrix = lf_data.value

    L = np.transpose(leadfield_matrix[lf_idx * 3:lf_idx * 3 + 3])
    J = np.array([q_x,q_y,q_z])
    potential = np.dot(L,J)
    out_potential = op.abspath("potential.npy")
    np.save(out_potential,potential)
    return out_potential
def Hello():
   import os
   from nipype import logging
   iflogger = logging.getLogger('interface')
   message = "Hello "
   file_name =  'hello.txt'
   iflogger.info(message)
   with open(file_name, 'w') as fp:
       fp.write(message)
   return os.path.abspath(file_name)
def swap_element_ids(mesh_filename, elem_list, new_elem_id, new_phys_id):
    import time
    import os.path as op
    from nipype.utils.filemanip import split_filename
    from nipype import logging
    iflogger = logging.getLogger('interface')

    iflogger.info("Reading mesh file: %s" % mesh_filename)

    start_time = time.time()
    mesh_file = open(mesh_filename, 'r')
    _, name, _ = split_filename(mesh_filename)
    out_file = op.abspath(name + "_mask.msh")

    f = open(out_file, 'w')
    while True:
        line = mesh_file.readline()
        f.write(line)

        if line == '$Nodes\n':
            line = mesh_file.readline()
            f.write(line)
            number_of_nodes = int(line)
            iflogger.info("%d nodes in mesh" % number_of_nodes)

            for i in xrange(0, number_of_nodes):
                line = mesh_file.readline()
                f.write(line)

        elif line == '$Elements\n':
            line = mesh_file.readline()
            f.write(line)
            number_of_elements = int(line)
            iflogger.info("%d elements in mesh" % number_of_elements)
            elem_lines = []
            for i in xrange(0, number_of_elements):
                # -- If all elements were quads, each line has 10 numbers.
                #    The first one is a tag, and the last 4 are node numbers.
                line = mesh_file.readline()
                elem_lines.append(line)
                elem_data = line.split()
                if int(elem_data[0]) in elem_list:
                    elem_data[3] = str(new_phys_id)
                    elem_data[4] = str(new_elem_id)
                line = " ".join(elem_data) + "\n"
                f.write(line)

        elif line == '$EndElementData\n' or len(line) == 0:
            break

    mesh_file.close()
    f.close()
    elapsed_time = time.time() - start_time
    print(elapsed_time)
    return out_file
Exemple #7
0
def parse_and_return_mats(one_d_file, mask_arr):
    '''
    '''

    # Import packages
    import numpy as np
    import scipy.sparse as sparse
    from nipype import logging

    # Init logger
    logger = logging.getLogger('workflow')

    # Parse out numbers
    logger.info('Parsing contents...')
    graph_arr = np.loadtxt(one_d_file, skiprows=6)

    # Cast as numpy arrays and extract i, j, w
    logger.info('Creating arrays...')
    one_d_rows = graph_arr.shape[0]

    # Extract 3d indices
    ijk1 = graph_arr[:, 2:5].astype('int32')
    ijk2 = graph_arr[:, 5:8].astype('int32')
    # Weighted array and binarized array
    w_arr = graph_arr[:,-1].astype('float32')
    del graph_arr
    b_arr = np.ones(w_arr.shape)

    # Non-zero elements from mask is size of similarity matrix
    mask_idx = np.argwhere(mask_arr)
    mask_voxs = mask_idx.shape[0]

    # Extract the ijw's from 1D file
    i_arr = [np.where((mask_idx == ijk1[ii]).all(axis=1))[0][0] \
             for ii in range(one_d_rows)]
    del ijk1
    j_arr = [np.where((mask_idx == ijk2[ii]).all(axis=1))[0][0] \
             for ii in range(one_d_rows)]
    del ijk2
    i_arr = np.array(i_arr, dtype='int32')
    j_arr = np.array(j_arr, dtype='int32')

    # Construct the sparse matrix
    logger.info('Constructing sparse matrix...')
    wmat_upper_tri = sparse.coo_matrix((w_arr, (i_arr, j_arr)),
                                       shape=(mask_voxs, mask_voxs))
    bmat_upper_tri = sparse.coo_matrix((b_arr, (i_arr, j_arr)),
                                       shape=(mask_voxs, mask_voxs))

    # Make symmetric
    w_similarity_matrix = wmat_upper_tri + wmat_upper_tri.T
    b_similarity_matrix = bmat_upper_tri + bmat_upper_tri.T

    # Return the symmetric matrices and affine
    return b_similarity_matrix, w_similarity_matrix
def calc_tpm_fn(tracks):
   import os
   from nipype import logging
   from nipype.utils.filemanip import split_filename
   path, name, ext = split_filename(tracks)
   file_name = os.path.abspath(name + 'TPM.nii')
   iflogger = logging.getLogger('interface')
   iflogger.info(tracks)
   import subprocess
   iflogger.info(" ".join(["tracks2prob","-vox", "1.0", "-totallength", tracks, file_name]))
   subprocess.call(["tracks2prob", "-vox", "1.0", "-totallength", tracks, file_name])
   return file_name
def split_warp_volumes_fn(in_file):
    from nipype import logging
    from nipype.utils.filemanip import split_filename
    import nibabel as nb
    import os.path as op
    iflogger = logging.getLogger('interface')
    iflogger.info(in_file)
    path, name, ext = split_filename(in_file)
    image = nb.load(in_file)
    x_img, y_img, z_img = nb.four_to_three(image)
    x = op.abspath(name + '_x' + ".nii.gz")
    y = op.abspath(name + '_y' + ".nii.gz")
    z = op.abspath(name + '_z' + ".nii.gz")
    nb.save(x_img, x)
    nb.save(y_img, y)
    nb.save(z_img, z)
    return x, y, z
Exemple #10
0
def setup_logger(logger_name, file_path, level, to_screen=False):
    '''
    Function to initialize and configure a logger that can write to file
    and (optionally) the screen.

    Parameters
    ----------
    logger_name : string
        name of the logger
    file_path : string
        file path to the log file on disk
    level : integer
        indicates the level at which the logger should log; this is
        controlled by integers that come with the python logging
        package. (e.g. logging.INFO=20, logging.DEBUG=10)
    to_screen : boolean (optional)
        flag to indicate whether to enable logging to the screen

    Returns
    -------
    logger : logging.Logger object
        Python logging.Logger object which is capable of logging run-
        time information about the program to file and/or screen
    '''

    # Import packages
    import logging

    # Init logger, formatter, filehandler, streamhandler
    logger = logging.getLogger(logger_name)
    logger.setLevel(level)
    formatter = logging.Formatter('%(asctime)s : %(message)s')

    # Write logs to file
    fileHandler = logging.FileHandler(file_path)
    fileHandler.setFormatter(formatter)
    logger.addHandler(fileHandler)

    # Write to screen, if desired
    if to_screen:
        streamHandler = logging.StreamHandler()
        streamHandler.setFormatter(formatter)
        logger.addHandler(streamHandler)

    # Return the logger
    return logger
Exemple #11
0
def get_dicom_headers(dicom_file):
    import dicom
    import nipype.utils.filemanip
    from nipype import logging
    
    if type(dicom_file) is list:
        dicom_file = dicom_file.pop()
    
    iflogger = logging.getLogger('interface')
    iflogger.debug('Getting headers from {}'.format(dicom_file))
    headers = dicom.read_file(dicom_file)
    
    
    
    iflogger.debug('Returning headers from {}'.format(dicom_file))
    nipype.utils.filemanip.savepkl(dicom_file + '.pklz', headers)
    
    return dicom_file + '.pklz'
def extract_frommask_component(realigned_file, mask_file):
    import os
    import nibabel as nb
    import numpy as np
    from utils import mean_roi_signal
    from nipype import logging
    iflogger = logging.getLogger('interface')

    data = nb.load(realigned_file).get_data().astype('float64')
    mask = nb.load(mask_file).get_data().astype('float64')
    iflogger.info('Data and mask loaded.')
    mask_comp = mean_roi_signal(data, mask.astype('bool'))

    components_file = os.path.join(os.getcwd(), 'mask_mean_component.txt')
    iflogger.info('Saving components file:' + components_file)
    np.savetxt(components_file, mask_comp)

    return components_file
Exemple #13
0
def degree_centrality(corr_matrix, r_value, method, out=None):
    """
    Calculate centrality for the rows in the corr_matrix using
    a specified correlation threshold. The centrality output can 
    be binarized or weighted.
    
    Paramaters
    ---------
    corr_matrix : numpy.ndarray
    r_value : float
    method : str
        Can be 'binarize' or 'weighted'
    out : numpy.ndarray (optional)
        If specified then should have shape of `corr_matrix.shape[0]`
    
    Returns
    -------
    out : numpy.ndarray
    """

    # Import packages
    from nipype import logging

    # Init logger
    logger = logging.getLogger('workflow')

    if method not in ["binarize", "weighted"]:
        raise Exception("Method must be one of binarize or weighted and not %s" % method)
    
    if corr_matrix.dtype.itemsize == 8:
        dtype   = "double"
        r_value = np.float64(r_value)
    else:
        dtype   = "float"
        r_value = np.float32(r_value)
    
    if out is None:
        out = np.zeros(corr_matrix.shape[0], dtype=corr_matrix.dtype)
    logger.info('about to call thresh_and_sum')
    func_name   = "centrality_%s_%s" % (method, dtype)
    func        = globals()[func_name]
    func(corr_matrix, out, r_value)
    
    return out
def clean_warp_field_fn(combined_warp_x, combined_warp_y, combined_warp_z, default_value):
    import os.path as op
    from nipype import logging
    from nipype.utils.filemanip import split_filename
    import nibabel as nb
    import numpy as np
    path, name, ext = split_filename(combined_warp_x)
    out_file = op.abspath(name + 'CleanedWarp.nii')
    iflogger = logging.getLogger('interface')
    iflogger.info(default_value)
    imgs = []
    filenames = [combined_warp_x, combined_warp_y, combined_warp_z]
    for fname in filenames:
        img = nb.load(fname)
        data = img.get_data()
        data[data==default_value] = np.NaN
        new_img = nb.Nifti1Image(data=data, header=img.get_header(), affine=img.get_affine())
        imgs.append(new_img)
    image4d = nb.concat_images(imgs, check_affines=True)
    nb.save(image4d, out_file)
    return out_file
def read_mesh_elem_data(mesh_filename, view_name="Cost function"):
    import numpy as np

    from nipype import logging
    iflogger = logging.getLogger('interface')

    iflogger.info("Reading mesh file: %s" % mesh_filename)

    mesh_file = open(mesh_filename, 'r')
    while True:
        line = mesh_file.readline()
        if '$ElementData' in line:
            line = mesh_file.readline()
            name = mesh_file.readline()
            if name == '"' + view_name + '"\n':
                line = mesh_file.readline()
                line = mesh_file.readline()
                line = mesh_file.readline()
                line = mesh_file.readline()
                line = mesh_file.readline()
                number_of_elements = int(mesh_file.readline().replace("\n",""))

                elem_data = []
                iflogger.info("%d elements in element data" % number_of_elements)
                for i in xrange(0, number_of_elements):
                    line = mesh_file.readline()
                    line_data = line.split()
                    polygon = {}
                    polygon["element_id"] = int(line_data[0])
                    polygon["data"] = float(line_data[1])
                    elem_data.append(polygon)

                iflogger.info("Done reading element data")
                break
    
    # Loop through and assign points to each polygon, save as a dictionary
    num_polygons = len(elem_data)
    iflogger.info("%d polygons found" % num_polygons)

    return elem_data
def register_template(hemi, sphere_file, transform, templates_path, template):
    """
    Register surface to template with FreeSurfer's mris_register.

    Transform the labels from multiple atlases via a template
    (using FreeSurfer's mris_register).

    """
    from os import path
    from nipype.interfaces.base import CommandLine
    from nipype import logging

    logger = logging.getLogger("interface")

    template_file = path.join(templates_path, hemi + "." + template)
    output_file = hemi + "." + transform
    cli = CommandLine(command="mris_register")
    cli.inputs.args = " ".join(["-curv", sphere_file, template_file, output_file])
    logger.info(cli.cmdline)
    cli.run()

    return transform
def rewrite_mesh_from_binary_mask(mask_file, mesh_file, mesh_id, new_phys_id=1015):
    # Takes about 8 minutes
    import numpy as np
    import nibabel as nb
    import os.path as op
    from forward.mesh import read_mesh
    from nipype import logging

    iflogger = logging.getLogger('interface')

    mask_img = nb.load(mask_file)
    mask_data = mask_img.get_data()
    mask_affine = mask_img.get_affine()
    mask_header = mask_img.get_header()
    #tensor_data = np.flipud(tensor_image.get_data())

    # Define various constants
    elements_to_consider = [1002] #Use only white matter
    vx, vy, vz = mask_header.get_zooms()[0:3]
    max_x, max_y, max_z = np.shape(mask_data)[0:3]
    halfx, halfy, halfz = np.array((vx*max_x, vy*max_y, vz*max_z))/2.0

    mesh_data, _, _, _ = read_mesh(mesh_file, elements_to_consider)

    elem_list = []
    for idx, poly in enumerate(mesh_data):
        i = np.round((poly['centroid'][0]+halfx)/vx).astype(int)
        j = np.round((poly['centroid'][1]+halfy)/vy).astype(int)
        k = np.round((poly['centroid'][2]+halfz)/vz).astype(int)
        if mask_data[i,j,k]:
            elem_list.append(poly['element_id'])
        #iflogger.info("%3.3f%%" % (float(idx)/num_polygons*100.0))
    new_elem_id = new_phys_id+2000
    out_file = swap_element_ids(mesh_file, elem_list, new_elem_id, new_phys_id)
    print('Writing binary masked mesh file to %s' % out_file)
    return out_file
Exemple #18
0
def main():
    """Entry point"""
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    from ..viz.reports import generate_reports
    from ..utils.bids import write_derivative_description
    set_start_method('forkserver')

    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args()

    exec_env = os.name

    # special variable set in the container
    if os.getenv('IS_DOCKER_8395080871'):
        exec_env = 'singularity'
        cgroup = Path('/proc/1/cgroup')
        if cgroup.exists() and 'docker' in cgroup.read_text():
            exec_env = 'docker'
            if os.getenv('DOCKER_VERSION_8395080871'):
                exec_env = 'fmriprep-docker'

    sentry_sdk = None
    if not opts.notrack:
        import sentry_sdk
        from ..__about__ import __version__
        environment = "prod"
        release = __version__
        if not __version__:
            environment = "dev"
            release = "dev"
        elif bool(int(os.getenv('FMRIPREP_DEV', 0))) or ('+' in __version__):
            environment = "dev"

        def before_send(event, hints):
            # Filtering log messages about crashed nodes
            if 'logentry' in event and 'message' in event['logentry']:
                msg = event['logentry']['message']
                if msg.startswith("could not run node:"):
                    return None
                elif msg.startswith("Saving crash info to "):
                    return None
                elif re.match("Node .+ failed to run on host .+", msg):
                    return None

            if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list):
                fingerprints_to_propagate = ['no-disk-space', 'memory-error', 'permission-denied',
                                             'keyboard-interrupt']
                for bc in event['breadcrumbs']:
                    msg = bc.get('message', 'empty-msg')
                    if msg in fingerprints_to_propagate:
                        event['fingerprint'] = [msg]
                        break

            return event

        sentry_sdk.init("https://[email protected]/1137693",
                        release=release,
                        environment=environment,
                        before_send=before_send)
        with sentry_sdk.configure_scope() as scope:
            scope.set_tag('exec_env', exec_env)

            if exec_env == 'fmriprep-docker':
                scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871'))

            free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1)
            scope.set_tag('free_mem_at_start', free_mem_at_start)
            scope.set_tag('cpu_count', cpu_count())

            # Memory policy may have a large effect on types of errors experienced
            overcommit_memory = Path('/proc/sys/vm/overcommit_memory')
            if overcommit_memory.exists():
                policy = {'0': 'heuristic',
                          '1': 'always',
                          '2': 'never'}.get(overcommit_memory.read_text().strip(), 'unknown')
                scope.set_tag('overcommit_memory', policy)
                if policy == 'never':
                    overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory')
                    kb = overcommit_kbytes.read_text().strip()
                    if kb != '0':
                        limit = '{}kB'.format(kb)
                    else:
                        overcommit_ratio = Path('/proc/sys/vm/overcommit_ratio')
                        limit = '{}%'.format(overcommit_ratio.read_text().strip())
                    scope.set_tag('overcommit_limit', limit)
                else:
                    scope.set_tag('overcommit_limit', 'n/a')
            else:
                scope.set_tag('overcommit_memory', 'n/a')
                scope.set_tag('overcommit_limit', 'n/a')

            for k, v in vars(opts).items():
                scope.set_tag(k, v)

    # Validate inputs
    if not opts.skip_bids_validation:
        print("Making sure the input data is BIDS compliant (warnings can be ignored in most "
              "cases).")
        validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)

    # FreeSurfer license
    default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license)
    if not os.path.exists(license_file):
        raise RuntimeError(
            'ERROR: a valid license file is required for FreeSurfer to run. '
            'FMRIPREP looked for an existing license file at several paths, in this '
            'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
            'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
            'Get it (for free) by registering at https://'
            'surfer.nmr.mgh.harvard.edu/registration.html')
    os.environ['FS_LICENSE'] = license_file

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    errno = 0

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        fmriprep_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        work_dir = retval['work_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        if not opts.notrack:
            with sentry_sdk.configure_scope() as scope:
                scope.set_tag('run_uuid', run_uuid)
                scope.set_tag('npart', len(subject_list))

        retcode = retval['return_code']

    if fmriprep_wf is None:
        sys.exit(1)

    if opts.write_graph:
        fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Sentry tracking
    if not opts.notrack:
        sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
        sentry_sdk.capture_message('fMRIPrep started', level='info')

    # Check workflow for missing commands
    missing = check_deps(fmriprep_wf)
    if missing:
        print("Cannot run fMRIPrep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        fmriprep_wf.run(**plugin_settings)
    except RuntimeError as e:
        errno = 1
        if "Workflow did not execute cleanly" not in str(e):
            sentry_sdk.capture_exception(e)
            raise
    finally:
        # Generate reports phase
        errno += generate_reports(subject_list, output_dir, work_dir, run_uuid,
                                  sentry_sdk=sentry_sdk)
        write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep'))

    if not opts.notrack and errno == 0:
        sentry_sdk.capture_message('fMRIPrep finished without errors', level='info')
    sys.exit(int(errno > 0))
Exemple #19
0
def map_centrality_matrix(centrality_matrix, aff, mask, template_type):
    '''
    Method to map centrality matrix to a nifti image

    Parameters
    ----------
    centrality_matrix : tuple (string, array_like)
        tuple containing matrix name and degree/eigenvector centrality matrix
    aff : ndarray
        Affine matrix of the input data
    mask : ndarray
        Mask or roi data matrix
    template_type : int
        type of template: 0 for mask, 1 for roi

    Returns
    -------
    out_file : string (nifti image)
        nifti image mapped from the centrality matrix

    Raises
    ------
    Exception
    '''

    # Import packages
    import os
    import nibabel as nib
    import numpy as np
    from nipype import logging

    # Init logger
    logger = logging.getLogger('workflow')

    try:
        out_file, matrix = centrality_matrix

        out_file = os.path.join(os.getcwd(), out_file + '.nii.gz')
        sparse_m = np.zeros((mask.shape), dtype=float)

        logger.info('mapping centrality matrix to nifti image: %s' % out_file)

        if int(template_type) == 0:
            cords = np.argwhere(mask)
            index=0
            for val in cords:
                x,y,z=val
                sparse_m[x,y,z]= matrix[index]
                index+=1

        elif int(template_type) == 1:
            nodes = np.unique(mask).tolist()
            nodes.sort()
            index = 0
            for n in nodes:
                if n> 0:
                    cords = np.argwhere(mask==n)
                    for val in cords:
                        x,y,z = val
                        if isinstance(matrix[index], list):
                            sparse_m[x,y,z] = matrix[index][0]
                        else:
                            sparse_m[x,y,z]=matrix[index]
                    index+=1

        nifti_img = nib.Nifti1Image(sparse_m, aff)
        nifti_img.to_filename(out_file)

        return out_file
    except Exception as exc:
        err_msg = 'Error in mapping centrality matrix to nifti image. '\
                  'Error: %s' % exc
        raise Exception(err_msg)
Exemple #20
0
def extract_noise_components(realigned_file, noise_mask_file, num_components,
                             csf_mask_file, selector,
                             realignment_parameters=None, outlier_file=None, regress_before_PCA=True):
    """Derive components most reflective of physiological noise
    
    Parameters
    ----------
    realigned_file :
    noise_mask_file :
    num_components :
    csf_mask_file :
    selector :
    
    Returns
    -------
    components_file :
    """

    import os
    from nibabel import load
    import numpy as np
    import scipy as sp
    from scipy.signal import detrend
    from nipype import logging
    logger = logging.getLogger('interface')

    def try_import(fname):
        try:
            a = np.genfromtxt(fname)
            return a
        except:
            return np.array([])

    options = np.array([noise_mask_file, csf_mask_file])
    selector = np.array(selector)
    imgseries = load(realigned_file)
    nuisance_matrix = np.ones((imgseries.shape[-1], 1))
    if realignment_parameters is not None:
        logger.debug('adding motion pars')
        logger.debug('%s %s' % (str(nuisance_matrix.shape),
            str(np.genfromtxt(realignment_parameters).shape)))
        nuisance_matrix = np.hstack((nuisance_matrix,
                                     np.genfromtxt(realignment_parameters)))
    if outlier_file is not None:
        logger.debug('collecting outliers')
        outliers = try_import(outlier_file)
        if outliers.shape == ():  # 1 outlier
            art = np.zeros((imgseries.shape[-1], 1))
            art[np.int_(outliers), 0] = 1 #  art outputs 0 based indices
            nuisance_matrix = np.hstack((nuisance_matrix, art))
        elif outliers.shape[0] == 0:  # empty art file
            pass
        else:  # >1 outlier
            art = np.zeros((imgseries.shape[-1], len(outliers)))
            for j, t in enumerate(outliers):
                art[np.int_(t), j] = 1 #  art outputs 0 based indices
            nuisance_matrix = np.hstack((nuisance_matrix, art))
    if selector.all():  # both values of selector are true, need to concatenate
        tcomp = load(noise_mask_file)
        acomp = load(csf_mask_file)
        voxel_timecourses = imgseries.get_data()[np.nonzero(tcomp.get_data() +
                                                            acomp.get_data())]
    else:
        noise_mask_file = options[selector][0]
        noise_mask = load(noise_mask_file)
        voxel_timecourses = imgseries.get_data()[np.nonzero(noise_mask.get_data())]

    voxel_timecourses = voxel_timecourses.byteswap().newbyteorder()
    voxel_timecourses[np.isnan(np.sum(voxel_timecourses,axis=1)),:] = 0
    if regress_before_PCA:
        logger.debug('Regressing motion')
        for timecourse in voxel_timecourses:
            #timecourse[:] = detrend(timecourse, type='constant')
            coef_, _, _, _ = np.linalg.lstsq(nuisance_matrix, timecourse[:, None])
            timecourse[:] = (timecourse[:, None] - np.dot(nuisance_matrix,
                                                          coef_)).ravel()

    pre_svd = os.path.abspath('pre_svd.npz')
    np.savez(pre_svd,voxel_timecourses=voxel_timecourses)
    _, _, v = sp.linalg.svd(voxel_timecourses, full_matrices=False)
    components_file = os.path.join(os.getcwd(), 'noise_components.txt')
    np.savetxt(components_file, v[:num_components, :].T)
    return components_file, pre_svd
Exemple #21
0
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Estimate fieldmaps for :abbr:`SDC (susceptibility distortion correction)`."""
from nipype import logging
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.engine.workflows import LiterateWorkflow as Workflow

LOGGER = logging.getLogger("nipype.workflow")


def init_brainextraction_wf(name="brainextraction_wf"):
    """
    Remove nonbrain tissue from images.

    Parameters
    ----------
    name : :obj:`str`, optional
        Workflow name (default: ``"brainextraction_wf"``)

    Inputs
    ------
    in_file : :obj:`str`
        the GRE magnitude or EPI reference to be brain-extracted

    Outputs
    -------
    out_file : :obj:`str`
        the input file after N4 and smart clipping
    out_brain : :obj:`str`
        the output file, just the brain extracted
Exemple #22
0
import os
from collections import defaultdict

import nibabel as nb
from nipype import logging

from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu

from ...interfaces import MergeDWIs, ConformDwi
from ...interfaces.mrtrix import DWIDenoise

from ...engine import Workflow

DEFAULT_MEMORY_MIN_GB = 0.01
LOGGER = logging.getLogger('nipype.workflow')


def init_merge_and_denoise_wf(dwi_denoise_window,
                              denoise_before_combining,
                              mem_gb=1,
                              omp_nthreads=1,
                              name="merge_and_denoise_wf"):
    """

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from qsiprep.workflows.dwi import init_merge_and_denoise_wf
        wf = init_merge_and_dwnoise_wf(dwi_denoise_window=7,
Exemple #23
0
    def process(self):
        # Process time
        self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        subject_directory = os.path.join(self.base_directory, self.subject)
        deriv_subject_directory = os.path.join(self.base_directory,
                                               "derivatives", "cmp",
                                               self.subject)

        # Initialization
        if os.path.isfile(os.path.join(deriv_subject_directory,
                                       "pypeline.log")):
            os.unlink(os.path.join(deriv_subject_directory, "pypeline.log"))
        config.update_config({
            'logging': {
                'log_directory': deriv_subject_directory,
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False
            }
        })
        logging.update_logging(config)
        iflogger = logging.getLogger('interface')

        # Data import
        #datasource = pe.Node(interface=nio.DataGrabber(outfields = ['T1','T2','diffusion','bvecs','bvals']), name='datasource')
        datasource = pe.Node(interface=nio.DataGrabber(
            outfields=['T1', 'diffusion', 'bvecs', 'bvals']),
                             name='datasource')
        datasource.inputs.base_directory = deriv_subject_directory
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        #datasource.inputs.field_template = dict(T1='anat/T1.nii.gz', T2='anat/T2.nii.gz', diffusion='dwi/dwi.nii.gz', bvecs='dwi/dwi.bvec', bvals='dwi/dwi.bval')
        datasource.inputs.field_template = dict(
            T1='anat/' + self.subject + '_T1w.nii.gz',
            diffusion='dwi/' + self.subject + '_dwi.nii.gz',
            bvecs='dwi/' + self.subject + '_dwi.bvec',
            bvals='dwi/' + self.subject + '_dwi.bval')
        #datasource.inputs.field_template_args = dict(T1=[['subject']], T2=[['subject']], diffusion=[['subject', ['subject']]], bvecs=[['subject', ['subject']]], bvals=[['subject', ['subject']]])
        datasource.inputs.sort_filelist = False
        #datasource.inputs.subject = self.subject

        print datasource.inputs

        datasource.run()

        print datasource.outputs

        # try:
        #     datasource.run()
        # except Exception as e:
        #     print e

        #templates =    {"T1": "derivatives/cmp/{subject}/anat/{subject}_T1w_reo.nii.gz",
        #                "T2": "derivatives/cmp/{subject}/anat/{subject}_T2w_reo.nii.gz",
        #                "diffusion": "{subject}/dwi/{subject}_dwi.nii.gz",
        #                "bvecs": "{subject}/dwi/{subject}_dwi.bvec",
        #                "bvals": "{subject}/dwi/{subject}_dwi.bval",}
        #datasource = pe.Node(interface=nio.SelectFiles(templates, base_directory=base_dir, subject=self.subject), name='datasource')
        #res = datasource.run()

        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="diffusion_sinker")
        sinker.inputs.base_directory = os.path.join(deriv_subject_directory)

        #Dataname substitutions in order to comply with BIDS derivatives specifications
        sinker.inputs.substitutions = [
            ('T1_registered_crop',
             self.subject + '_T1w_space-T1w-crop_preproc'),
            ('roi_volumes_flirt_crop',
             self.subject + '_T1w_space-T1w-crop_labels'),
            ('brain_registered_crop',
             self.subject + '_T1w_space-T1w-crop_brain'),
            ('brain_mask_registered_crop',
             self.subject + '_T1w_space-T1w-crop_brainmask'),
            ('wm_registered_crop',
             self.subject + '_T1w_space-T1w-crop_class-GM'),
            ('connectome', self.subject + '_dwi_connectome'),
            ('dwi.nii.gz', self.subject + '_dwi.nii.gz'),
            ('dwi.bval', self.subject + '_dwi.bval'),
            ('dwi.bvec', self.subject + '_dwi.bvec'),
            ('diffusion_resampled_CSD.mif',
             self.subject + '_dwi_space-T1w-crop_CSD.mif'),
            ('diffusion_resampled_CSD_tracked',
             self.subject + '_dwi_space-T1w-crop_tract'),
            ('eddy_corrected.nii.gz.eddy_rotated_bvecs',
             self.subject + '_dwi_preproc.eddy_rotated_bvec'),
            ('eddy_corrected.nii.gz', self.subject + '_dwi_preproc.nii.gz'),
            ('dwi_brain_mask', self.subject + '_dwi_brainmask'),
            ('FA', self.subject + '_dwi_FA'),
            ('grad.txt', self.subject + '_dwi_grad.txt'),
            ('target_epicorrected',
             self.subject + '_dwi_space-T1w-crop_preproc')
        ]

        # Clear previous outputs
        self.clear_stages_outputs()

        flow = pe.Workflow(name='nipype',
                           base_dir=os.path.join(deriv_subject_directory,
                                                 'tmp'))

        # Create common_flow
        common_flow = self.create_common_flow()

        flow.connect([(datasource, common_flow, [("T1", "inputnode.T1")])])

        # Create diffusion flow

        diffusion_flow = pe.Workflow(name='diffusion_pipeline')
        diffusion_inputnode = pe.Node(interface=util.IdentityInterface(fields=[
            'diffusion', 'bvecs', 'bvals', 'T1', 'brain', 'T2', 'brain_mask',
            'wm_mask_file', 'roi_volumes', 'subjects_dir', 'subject_id',
            'atlas_info', 'parcellation_scheme'
        ]),
                                      name='inputnode')
        diffusion_outputnode = pe.Node(
            interface=util.IdentityInterface(fields=['connectivity_matrices']),
            name='outputnode')
        diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])

        flow.connect([
            #(datasource,diffusion_flow,[("T2","inputnode.T2")]),
            (datasource, diffusion_flow, [("diffusion", "inputnode.diffusion"),
                                          ("bvecs", "inputnode.bvecs"),
                                          ("bvals", "inputnode.bvals")]),
            (common_flow, diffusion_flow,
             [("outputnode.subjects_dir", "inputnode.subjects_dir"),
              ("outputnode.subject_id", "inputnode.subject_id"),
              ("outputnode.T1", "inputnode.T1"),
              ("outputnode.brain", "inputnode.brain"),
              ("outputnode.brain_mask", "inputnode.brain_mask"),
              ("outputnode.wm_mask_file", "inputnode.wm_mask_file"),
              ("outputnode.roi_volumes", "inputnode.roi_volumes"),
              ("outputnode.parcellation_scheme",
               "inputnode.parcellation_scheme"),
              ("outputnode.atlas_info", "inputnode.atlas_info")]),
            (diffusion_flow, sinker, [("outputnode.connectivity_matrices",
                                       "dwi.@connectivity_matrices")])
        ])

        print diffusion_inputnode.outputs

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            diffusion_flow.connect([
                (diffusion_inputnode, preproc_flow,
                 [('diffusion', 'inputnode.diffusion'),
                  ('brain', 'inputnode.brain'),
                  ('brain_mask', 'inputnode.brain_mask'),
                  ('wm_mask_file', 'inputnode.wm_mask_file'),
                  ('roi_volumes', 'inputnode.roi_volumes'),
                  ('bvecs', 'inputnode.bvecs'), ('bvals', 'inputnode.bvals'),
                  ('T1', 'inputnode.T1')]),
            ])

        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            diffusion_flow.connect([
                #(diffusion_inputnode,reg_flow,[('T2','inputnode.T2')]),
                (diffusion_inputnode, reg_flow, [("bvals", "inputnode.bvals")]
                 ),
                (preproc_flow, reg_flow,
                 [('outputnode.T1', 'inputnode.T1'),
                  ('outputnode.bvecs_rot', 'inputnode.bvecs'),
                  ('outputnode.wm_mask_file', 'inputnode.wm_mask'),
                  ('outputnode.roi_volumes', 'inputnode.roi_volumes'),
                  ("outputnode.brain", "inputnode.brain"),
                  ("outputnode.brain_mask", "inputnode.brain_mask"),
                  ("outputnode.brain_mask_full", "inputnode.brain_mask_full"),
                  ('outputnode.diffusion_preproc', 'inputnode.target'),
                  ('outputnode.dwi_brain_mask', 'inputnode.target_mask')]),
                (preproc_flow, sinker, [("outputnode.bvecs_rot",
                                         "dwi.@bvecs_rot")]),
                (preproc_flow, sinker, [("outputnode.diffusion_preproc",
                                         "dwi.@cdiffusion_preproc")]),
                (preproc_flow, sinker, [("outputnode.dwi_brain_mask",
                                         "dwi.@diffusion_brainmask")])
            ])
            if self.stages[
                    'Registration'].config.registration_mode == "BBregister (FS)":
                diffusion_flow.connect([
                    (diffusion_inputnode, reg_flow,
                     [('subjects_dir', 'inputnode.subjects_dir'),
                      ('subject_id', 'inputnode.subject_id')]),
                ])

        if self.stages['Diffusion'].enabled:
            diff_flow = self.create_stage_flow("Diffusion")
            diffusion_flow.connect([
                (reg_flow, diff_flow, [('outputnode.target_epicorrected',
                                        'inputnode.diffusion')]),
                (reg_flow, diff_flow, [('outputnode.wm_mask_registered_crop',
                                        'inputnode.wm_mask_registered')]),
                (reg_flow, diff_flow,
                 [('outputnode.roi_volumes_registered_crop',
                   'inputnode.roi_volumes')]),
                (reg_flow, diff_flow, [('outputnode.grad', 'inputnode.grad')]),
                (reg_flow, sinker, [("outputnode.target_epicorrected",
                                     "dwi.@bdiffusion_reg_crop")]),
                (reg_flow, sinker, [("outputnode.grad", "dwi.@diffusion_grad")
                                    ]),
                (reg_flow, sinker, [("outputnode.T1_registered_crop",
                                     "anat.@T1_reg_crop")]),
                (reg_flow, sinker, [("outputnode.brain_registered_crop",
                                     "anat.@brain_reg_crop")]),
                (reg_flow, sinker, [("outputnode.brain_mask_registered_crop",
                                     "anat.@brain_mask_reg_crop")]),
                (reg_flow, sinker, [("outputnode.wm_mask_registered_crop",
                                     "anat.@wm_mask_reg_crop")]),
                (reg_flow, sinker, [("outputnode.roi_volumes_registered_crop",
                                     "anat.@vrois_reg_crop")])
            ])

        if self.stages['MRTrixConnectome'].enabled:
            if self.stages['Diffusion'].config.processing_tool == 'FSL':
                self.stages['MRTrixConnectome'].config.probtrackx = True
            else:
                self.stages['MRTrixConnectome'].config.probtrackx = False
            con_flow = self.create_stage_flow("MRTrixConnectome")
            diffusion_flow.connect([
                (diffusion_inputnode, con_flow,
                 [('parcellation_scheme', 'inputnode.parcellation_scheme')]),
                (diff_flow, con_flow,
                 [('outputnode.diffusion_model', 'inputnode.diffusion_model'),
                  ('outputnode.track_file', 'inputnode.track_file'),
                  ('outputnode.fod_file', 'inputnode.fod_file'),
                  ('outputnode.gFA', 'inputnode.gFA'),
                  ('outputnode.roi_volumes',
                   'inputnode.roi_volumes_registered'),
                  ('outputnode.skewness', 'inputnode.skewness'),
                  ('outputnode.kurtosis', 'inputnode.kurtosis'),
                  ('outputnode.P0', 'inputnode.P0')]),
                (con_flow, diffusion_outputnode,
                 [('outputnode.connectivity_matrices', 'connectivity_matrices')
                  ]),
                (diff_flow, sinker, [('outputnode.track_file',
                                      'dwi.@track_file'),
                                     ('outputnode.fod_file', 'dwi.@fod_file'),
                                     ('outputnode.gFA', 'dwi.@gFA'),
                                     ('outputnode.skewness', 'dwi.@skewness'),
                                     ('outputnode.kurtosis', 'dwi.@kurtosis'),
                                     ('outputnode.P0', 'dwi.@P0')])
            ])

            if self.stages[
                    'Parcellation'].config.parcellation_scheme == "Custom":
                diffusion_flow.connect([(diffusion_inputnode, con_flow, [
                    ('atlas_info', 'inputnode.atlas_info')
                ])])

        iflogger.info("**** Processing ****")

        if (self.number_of_cores != 1):
            flow.run(plugin='MultiProc',
                     plugin_args={'n_procs': self.number_of_cores})
        else:
            flow.run()

        self.fill_stages_outputs()

        # Clean undesired folders/files
        # rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        # for file_to_rm in rm_file_list:
        #     if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
        #         os.remove(os.path.join(self.base_directory,file_to_rm))

        # copy .ini and log file
        outdir = os.path.join(self.base_directory, "derivatives", "cmp",
                              self.subject)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file, outdir)
        #shutil.copy(os.path.join(self.base_directory,"derivatives","cmp",self.subject,'pypeline.log'),outdir)

        iflogger.info("**** Processing finished ****")

        return True, 'Processing sucessful'
Exemple #24
0
def main():
    """Entry point"""
    import sys
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    set_start_method('forkserver')

    # Run parser
    opts = get_parser().parse_args()

    # Analysis levels
    analysis_levels = set(opts.analysis_level)
    if not opts.participant_label:
        analysis_levels.add('group')

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, 1))

    # Set logging level
    logging.getLogger('mriqc').setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    logger = logging.getLogger('mriqc')
    INIT_MSG = """
    Running MRIQC version {version}:
      * BIDS dataset path: {bids_dir}.
      * Output folder: {output_dir}.
      * Analysis levels: {levels}.
    """.format(version=__version__,
               bids_dir=opts.bids_dir.expanduser().resolve(),
               output_dir=opts.output_dir.expanduser().resolve(),
               levels=', '.join(reversed(list(analysis_levels))))
    logger.log(25, INIT_MSG)

    # Set up participant level
    if 'participant' in analysis_levels:
        logger.info('Participant level started. Checking BIDS dataset...')

        # Call build_workflow(opts, retval)
        with Manager() as mgr:
            retval = mgr.dict()
            p = Process(target=init_mriqc, args=(opts, retval))
            p.start()
            p.join()

            if p.exitcode != 0:
                sys.exit(p.exitcode)

            mriqc_wf = retval['workflow']
            plugin_settings = retval['plugin_settings']
            subject_list = retval['subject_list']

        if not subject_list:
            logger.critical(
                'MRIQC did not find any target image file under the given BIDS '
                'folder (%s). Please check that the dataset is BIDS valid at '
                'http://bids-standard.github.io/bids-validator/ .',
                opts.bids_dir.resolve())

            bids_selectors = []
            for entity in [
                    'participant-label', 'modalities', 'session-id', 'task-id',
                    'run-id'
            ]:
                values = getattr(opts, entity.replace('-', '_'), None)
                if values:
                    bids_selectors += ['--%s %s' % (entity, ' '.join(values))]
            if bids_selectors:
                logger.warning(
                    'The following BIDS entities were selected as filters: %s. '
                    'Please, check whether their combinations are possible.',
                    ', '.join(bids_selectors))
            sys.exit(1)

        if mriqc_wf is None:
            logger.error(
                'Failed to create the MRIQC workflow, please report the issue '
                'to https://github.com/poldracklab/mriqc/issues')
            sys.exit(1)

        # Clean up master process before running workflow, which may create forks
        gc.collect()
        if not opts.dry_run:
            # Warn about submitting measures BEFORE
            if not opts.no_sub:
                logger.warning(DSA_MESSAGE)

            # run MRIQC
            mriqc_wf.run(**plugin_settings)

            # Warn about submitting measures AFTER
            if not opts.no_sub:
                logger.warning(DSA_MESSAGE)
        logger.info('Participant level finished successfully.')

    # Set up group level
    if 'group' in analysis_levels:
        from ..reports import group_html
        from ..utils.misc import generate_tsv  # , generate_pred

        logger.info('Group level started...')

        # Generate reports
        mod_group_reports = []
        for mod in opts.modalities:
            dataframe, out_tsv = generate_tsv(
                opts.output_dir.expanduser().resolve(), mod)
            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                continue

            logger.info('Generated summary TSV table for the %s data (%s)',
                        mod, out_tsv)

            # out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
            # if out_pred is not None:
            #     log.info('Predicted QA CSV table for the %s data generated (%s)',
            #                    mod, out_pred)

            out_html = opts.output_dir / ('group_%s.html' % mod)
            group_html(out_tsv,
                       mod,
                       csv_failed=opts.output_dir /
                       ('group_variant-failed_%s.csv' % mod),
                       out_file=out_html)
            logger.info('Group-%s report generated (%s)', mod, out_html)
            mod_group_reports.append(mod)

        if not mod_group_reports:
            raise Exception(
                "No data found. No group level reports were generated.")

        logger.info('Group level finished successfully.')
Exemple #25
0
def main():
    """Entry point"""
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    from ..utils.bids import write_derivative_description, validate_input_dir
    set_start_method('forkserver')

    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args()

    exec_env = os.name

    # special variable set in the container
    if os.getenv('IS_DOCKER_8395080871'):
        exec_env = 'singularity'
        cgroup = Path('/proc/1/cgroup')
        if cgroup.exists() and 'docker' in cgroup.read_text():
            exec_env = 'docker'
            if os.getenv('DOCKER_VERSION_8395080871'):
                exec_env = 'fmriprep-docker'

    sentry_sdk = None
    if not opts.notrack:
        import sentry_sdk
        from ..utils.sentry import sentry_setup
        sentry_setup(opts, exec_env)

    if opts.debug:
        print('WARNING: Option --debug is deprecated and has no effect',
              file=sys.stderr)

    # Validate inputs
    if not opts.skip_bids_validation:
        print("Making sure the input data is BIDS compliant (warnings can be ignored in most "
              "cases).")
        validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)

    # FreeSurfer license
    default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    license_file = opts.fs_license_file or Path(os.getenv('FS_LICENSE', default_license))
    if not license_file.exists():
        raise RuntimeError("""\
ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html""")
    os.environ['FS_LICENSE'] = str(license_file.resolve())

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        retcode = p.exitcode or retval.get('return_code', 0)

        bids_dir = Path(retval.get('bids_dir'))
        output_dir = Path(retval.get('output_dir'))
        work_dir = Path(retval.get('work_dir'))
        plugin_settings = retval.get('plugin_settings', None)
        subject_list = retval.get('subject_list', None)
        fmriprep_wf = retval.get('workflow', None)
        run_uuid = retval.get('run_uuid', None)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    if fmriprep_wf and opts.write_graph:
        fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)

    retcode = retcode or int(fmriprep_wf is None)
    if retcode != 0:
        sys.exit(retcode)

    # Check workflow for missing commands
    missing = check_deps(fmriprep_wf)
    if missing:
        print("Cannot run fMRIPrep. Missing dependencies:", file=sys.stderr)
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)
    # Clean up master process before running workflow, which may create forks
    gc.collect()

    # Sentry tracking
    if not opts.notrack:
        from ..utils.sentry import start_ping
        start_ping(run_uuid, len(subject_list))

    errno = 1  # Default is error exit unless otherwise set
    try:
        fmriprep_wf.run(**plugin_settings)
    except Exception as e:
        if not opts.notrack:
            from ..utils.sentry import process_crashfile
            crashfolders = [output_dir / 'fmriprep' / 'sub-{}'.format(s) / 'log' / run_uuid
                            for s in subject_list]
            for crashfolder in crashfolders:
                for crashfile in crashfolder.glob('crash*.*'):
                    process_crashfile(crashfile)

            if "Workflow did not execute cleanly" not in str(e):
                sentry_sdk.capture_exception(e)
        logger.critical('fMRIPrep failed: %s', e)
        raise
    else:
        if opts.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(api.get('fsaverage', suffix='dseg', extensions=['.tsv']))
            _copy_any(dseg_tsv,
                      str(output_dir / 'fmriprep' / 'desc-aseg_dseg.tsv'))
            _copy_any(dseg_tsv,
                      str(output_dir / 'fmriprep' / 'desc-aparcaseg_dseg.tsv'))
        errno = 0
        logger.log(25, 'fMRIPrep finished without errors')
        if not opts.notrack:
            sentry_sdk.capture_message('fMRIPrep finished without errors',
                                       level='info')
    finally:
        from niworkflows.reports import generate_reports
        # Generate reports phase
        failed_reports = generate_reports(
            subject_list, output_dir, work_dir, run_uuid, packagename='fmriprep')
        write_derivative_description(bids_dir, output_dir / 'fmriprep')

        if failed_reports and not opts.notrack:
            sentry_sdk.capture_message(
                'Report generation failed for %d subjects' % failed_reports,
                level='error')
        sys.exit(int((errno + failed_reports) > 0))
from nipype import logging, LooseVersion
from nipype.utils.filemanip import fname_presuffix, check_depends
from nipype.interfaces.io import FreeSurferSource

from nipype.interfaces.base import (TraitedSpec, File, traits, Directory,
                                    InputMultiPath, OutputMultiPath,
                                    CommandLine, CommandLineInputSpec,
                                    isdefined, BaseInterfaceInputSpec,
                                    BaseInterface)

from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, FSCommandOpenMP, Info
from nipype.interfaces.freesurfer.utils import copy2subjdir

__docformat__ = "restructuredtext"
iflogger = logging.getLogger("nipype.interface")

# Keeping this to avoid breaking external programs that depend on it, but
# this should not be used internally
FSVersion = Info.looseversion().vstring


class ReconAllStatsInputSpec(BaseInterfaceInputSpec):

    subject_id = traits.Str("recon_all",
                            argstr="%s",
                            desc="subject id of surface file",
                            usedefault=True)
    subjects_dir = traits.String(mandatory=True,
                                 argstr="%s",
                                 desc="subject dir of surface file")
    def process(self):
        # Enable the use of the W3C PROV data model to capture and represent provenance in Nipype
        # config.enable_provenance()

        # Process time
        self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        if '_' in self.subject:
            self.subject = self.subject.split('_')[0]

        # old_subject = self.subject

        if self.global_conf.subject_session == '':
            cmp_deriv_subject_directory = os.path.join(self.output_directory,
                                                       "cmp", self.subject)
            nipype_deriv_subject_directory = os.path.join(
                self.output_directory, "nipype", self.subject)
        else:
            cmp_deriv_subject_directory = os.path.join(
                self.output_directory, "cmp", self.subject,
                self.global_conf.subject_session)
            nipype_deriv_subject_directory = os.path.join(
                self.output_directory, "nipype", self.subject,
                self.global_conf.subject_session)

            self.subject = "_".join(
                (self.subject, self.global_conf.subject_session))

        if not os.path.exists(
                os.path.join(nipype_deriv_subject_directory,
                             "anatomical_pipeline")):
            try:
                os.makedirs(
                    os.path.join(nipype_deriv_subject_directory,
                                 "anatomical_pipeline"))
            except os.error:
                print("%s was already existing" % os.path.join(
                    nipype_deriv_subject_directory, "anatomical_pipeline"))

        # Initialization
        if os.path.isfile(
                os.path.join(nipype_deriv_subject_directory,
                             "anatomical_pipeline", "pypeline.log")):
            os.unlink(
                os.path.join(nipype_deriv_subject_directory,
                             "anatomical_pipeline", "pypeline.log"))
        config.update_config({
            'logging': {
                'log_directory':
                os.path.join(nipype_deriv_subject_directory,
                             "anatomical_pipeline"),
                'log_to_file':
                True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'use_relative_paths': True,
                'crashfile_format': "txt"
            }
        })
        logging.update_logging(config)
        iflogger = logging.getLogger('nipype.interface')

        iflogger.info("**** Processing ****")
        anat_flow = self.create_pipeline_flow(
            cmp_deriv_subject_directory=cmp_deriv_subject_directory,
            nipype_deriv_subject_directory=nipype_deriv_subject_directory)
        anat_flow.write_graph(graph2use='colored',
                              format='svg',
                              simple_form=True)

        if (self.number_of_cores != 1):
            anat_flow.run(plugin='MultiProc',
                          plugin_args={'n_procs': self.number_of_cores})
        else:
            anat_flow.run()

        # self.fill_stages_outputs()

        # Clean undesired folders/files
        # rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        # for file_to_rm in rm_file_list:
        #     if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
        #         os.remove(os.path.join(self.base_directory,file_to_rm))

        # copy .ini and log file
        # outdir = os.path.join(cmp_deriv_subject_directory,'config')
        # if not os.path.exists(outdir):
        #     os.makedirs(outdir)
        #
        # try:
        #     shutil.copy(self.config_file,outdir)
        # except shutil.Error:
        #     print("Skipped copy of config file")

        # shutil.copy(os.path.join(self.base_directory,"derivatives","cmp",self.subject,'pypeline.log'),outdir)

        iflogger.info("**** Processing finished ****")

        return True, 'Processing successful'
    def process(self):
        """Executes the anatomical pipeline workflow and returns True if successful."""
        anat_flow = None  # noqa

        # Enable the use of the W3C PROV data model to capture and represent provenance in Nipype
        # config.enable_provenance()

        # Process time
        self.now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        # Create the paths <output_dir>/cmp-<version>/sub-<label>(/ses-<label>)
        # and <output_dir>/nipype-<version>/sub-<label>(/ses-<label>)
        # self.subject is updated to "sub-<label>_ses-<label>"
        # when subject has multiple sessions.
        cmp_deriv_subject_directory, nipype_deriv_subject_directory, nipype_anatomical_pipeline_subject_dir = \
            self.init_subject_derivatives_dirs()

        # Initialization
        log_file = os.path.join(nipype_anatomical_pipeline_subject_dir,
                                "pypeline.log")
        if os.path.isfile(log_file):
            os.unlink(log_file)

        config.update_config({
            "logging": {
                "workflow_level": "INFO",
                "interface_level": "INFO",
                "log_directory": nipype_anatomical_pipeline_subject_dir,
                "log_to_file": True,
            },
            "execution": {
                "remove_unnecessary_outputs": False,
                "stop_on_first_crash": True,
                "stop_on_first_rerun": False,
                "try_hard_link_datasink": True,
                "use_relative_paths": True,
                "crashfile_format": "txt",
            },
        })
        logging.update_logging(config)

        iflogger = logging.getLogger("nipype.interface")
        iflogger.info("**** Processing ****")

        anat_flow = self.create_pipeline_flow(
            cmp_deriv_subject_directory=cmp_deriv_subject_directory,
            nipype_deriv_subject_directory=nipype_deriv_subject_directory,
        )
        anat_flow.write_graph(graph2use="colored",
                              format="svg",
                              simple_form=True)
        # Create dictionary of arguments passed to plugin_args
        plugin_args = {
            'maxtasksperchild': 1,
            'n_procs': self.number_of_cores,
            'raise_insufficient': False,
        }
        anat_flow.run(plugin="MultiProc", plugin_args=plugin_args)

        self._update_parcellation_scheme()

        iflogger.info("**** Processing finished ****")

        return True
Exemple #29
0
def main():
    """Entry point"""
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    from ..viz.reports import generate_reports
    from ..utils.bids import write_derivative_description

    try:
        set_start_method('forkserver')
    except RuntimeError:
        pass

    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args()

    # FreeSurfer license
    default_license = str(
        Path(str(os.getenv('FREESURFER_HOME'))) / 'license.txt')
    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    license_file = opts.fs_license_file or os.getenv('FS_LICENSE',
                                                     default_license)
    if not os.path.exists(license_file):
        raise RuntimeError(
            'ERROR: a valid license file is required for FreeSurfer to run. '
            'qsiprep looked for an existing license file at several paths, in'
            'this order: 1) command line argument ``--fs-license-file``; 2) '
            '``$FS_LICENSE`` environment variable; and 3) the '
            '``$FREESURFER_HOME/license.txt`` path. '
            'Get it (for free) by registering at https://'
            'surfer.nmr.mgh.harvard.edu/registration.html')
    os.environ['FS_LICENSE'] = license_file

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    errno = 0
    mode = "recon" if opts.recon_only else "prep"
    if mode == "recon":
        logger.info("running qsirecon")
        building_func = build_recon_workflow
    else:
        logger.info("running qsiprep")
        building_func = build_qsiprep_workflow

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=building_func, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        qsiprep_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        work_dir = retval['work_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if qsiprep_wf is None:
        sys.exit(1)

    if opts.write_graph:
        qsiprep_wf.write_graph(graph2use="colored",
                               format='svg',
                               simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(qsiprep_wf)
    if missing:
        print("Cannot run qsiprep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        qsiprep_wf.run(**plugin_settings)
    except RuntimeError as e:
        if "Workflow did not execute cleanly" in str(e):
            errno = 1
        else:
            raise

    # No reports for recon mode yet
    if mode == "recon":
        sys.exit(int(errno > 0))

    # Generate reports phase
    errno += generate_reports(subject_list, output_dir, work_dir, run_uuid)
    write_derivative_description(bids_dir, str(Path(output_dir) / 'qsiprep'))
    if opts.recon_spec is None:
        logger.info("No additional workflows to run.")
        sys.exit(int(errno > 0))

    # Run an additional workflow if preproc + recon are requested
    opts.recon_input = output_dir + "/qsiprep"
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_recon_workflow, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        qsirecon_post_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        work_dir = retval['work_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if qsirecon_post_wf is None:
        sys.exit(1)

    if opts.write_graph:
        qsirecon_post_wf.write_graph(graph2use="colored",
                                     format='svg',
                                     simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(qsirecon_post_wf)
    if missing:
        print("Cannot run qsiprep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        qsirecon_post_wf.run(**plugin_settings)
    except RuntimeError as e:
        if "Workflow did not execute cleanly" in str(e):
            errno = 1
        else:
            raise
Exemple #30
0
def execute():
    import argparse
    from colorama import Fore
    MANDATORY_TITLE = (Fore.YELLOW + 'Mandatory arguments' + Fore.RESET)
    OPTIONAL_TITLE = (Fore.YELLOW + 'Optional arguments' + Fore.RESET)
    """
    Define and parse the command line argument
    """
    parser = ArgumentParser(add_help=False)
    parser.add_argument('-h',
                        '--help',
                        action='help',
                        default=argparse.SUPPRESS,
                        help=argparse.SUPPRESS)
    parser._positionals.title = (
        Fore.YELLOW + 'clinica expects one of the following keywords' +
        Fore.RESET)
    parser._optionals.title = OPTIONAL_TITLE

    sub_parser = parser.add_subparsers(metavar='')
    parser.add_argument("-v",
                        "--verbose",
                        dest='verbose',
                        action='store_true',
                        default=False,
                        help='Verbose: print all messages to the console')
    parser.add_argument("-l",
                        "--logname",
                        dest='logname',
                        default="clinica.log",
                        metavar=('file.log'),
                        help='Define the log file name (default: clinica.log)')
    """
    run category: run one of the available pipelines
    """
    from clinica.engine import CmdParser

    from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_cli import T1FreeSurferCrossSectionalCLI  # noqa
    from clinica.pipelines.t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_cli import T1VolumeTissueSegmentationCLI  # noqa
    from clinica.pipelines.t1_volume_create_dartel.t1_volume_create_dartel_cli import T1VolumeCreateDartelCLI  # noqa
    from clinica.pipelines.t1_volume_existing_dartel.t1_volume_existing_dartel_cli import T1VolumeExistingDartelCLI  # noqa
    from clinica.pipelines.t1_volume_dartel2mni.t1_volume_dartel2mni_cli import T1VolumeDartel2MNICLI  # noqa
    from clinica.pipelines.t1_volume_new_template.t1_volume_new_template_cli import T1VolumeNewTemplateCLI  # noqa
    from clinica.pipelines.t1_volume_existing_template.t1_volume_existing_template_cli import T1VolumeExistingTemplateCLI  # noqa
    from clinica.pipelines.t1_volume_parcellation.t1_volume_parcellation_cli import T1VolumeParcellationCLI
    from clinica.pipelines.dwi_preprocessing_using_phasediff_fieldmap.dwi_preprocessing_using_phasediff_fieldmap_cli import DwiPreprocessingUsingPhaseDiffFieldmapCli  # noqa
    from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_cli import DwiPreprocessingUsingT1Cli  # noqa
    from clinica.pipelines.dwi_dti.dwi_dti_cli import DwiDtiCli  # noqa
    # from clinica.pipelines.dwi_connectome.dwi_connectome_cli import DwiConnectomeCli  # noqa
    from clinica.pipelines.fmri_preprocessing.fmri_preprocessing_cli import fMRIPreprocessingCLI  # noqa
    from clinica.pipelines.pet_volume.pet_volume_cli import PETVolumeCLI  # noqa
    from clinica.pipelines.pet_surface.pet_surface_cli import PetSurfaceCLI  # noqa
    from clinica.pipelines.machine_learning_spatial_svm.spatial_svm_cli import SpatialSVMCLI  # noqa
    from clinica.pipelines.statistics_surface.statistics_surface_cli import StatisticsSurfaceCLI  # noqa
    pipelines = ClinicaClassLoader(baseclass=CmdParser,
                                   extra_dir="pipelines").load()
    pipelines += [
        T1FreeSurferCrossSectionalCLI(),
        T1VolumeNewTemplateCLI(),
        DwiPreprocessingUsingPhaseDiffFieldmapCli(),
        DwiPreprocessingUsingT1Cli(),
        DwiDtiCli(),
        # DwiConnectomeCli(),
        fMRIPreprocessingCLI(),
        PETVolumeCLI(),
        PetSurfaceCLI(),
        SpatialSVMCLI(),
        StatisticsSurfaceCLI(),
        T1VolumeExistingTemplateCLI(),
        T1VolumeTissueSegmentationCLI(),
        T1VolumeCreateDartelCLI(),
        T1VolumeExistingDartelCLI(),
        T1VolumeDartel2MNICLI(),
        T1VolumeParcellationCLI()
    ]

    run_parser = sub_parser.add_parser(
        'run',
        add_help=False,
        formatter_class=argparse.RawTextHelpFormatter,
        help='To run pipelines on BIDS/CAPS datasets.')
    run_parser.description = (Fore.GREEN +
                              'Run pipelines on BIDS/CAPS datasets.' +
                              Fore.RESET)
    run_parser._positionals.title = (
        Fore.YELLOW + 'clinica run expects one of the following pipelines' +
        Fore.RESET)

    init_cmdparser_objects(parser,
                           run_parser.add_subparsers(metavar='', dest='run'),
                           pipelines)
    """
    convert category: convert one of the supported datasets into BIDS hierarchy
    """
    from clinica.iotools.converters.aibl_to_bids.aibl_to_bids_cli import AiblToBidsCLI  # noqa
    from clinica.iotools.converters.adni_to_bids.adni_to_bids_cli import AdniToBidsCLI  # noqa
    from clinica.iotools.converters.oasis_to_bids.oasis_to_bids_cli import OasisToBidsCLI  # noqa

    converters = ClinicaClassLoader(baseclass=CmdParser,
                                    extra_dir="iotools/converters").load()
    converters += [
        AdniToBidsCLI(),
        AiblToBidsCLI(),
        OasisToBidsCLI(),
    ]

    convert_parser = sub_parser.add_parser(
        'convert',
        add_help=False,
        help='To convert unorganized datasets into a BIDS hierarchy.',
    )
    convert_parser.description = (
        Fore.GREEN +
        'Tools to convert unorganized datasets into a BIDS hierarchy.' +
        Fore.RESET)
    convert_parser._positionals.title = (
        Fore.YELLOW + 'clinica convert expects one of the following datasets' +
        Fore.RESET)
    convert_parser._optionals.title = OPTIONAL_TITLE
    init_cmdparser_objects(
        parser, convert_parser.add_subparsers(metavar='', dest='convert'),
        converters)
    """
    iotools category
    """
    from clinica.iotools.utils.data_handling_cli import CmdParserSubjectsSessions
    from clinica.iotools.utils.data_handling_cli import CmdParserMergeTsv
    from clinica.iotools.utils.data_handling_cli import CmdParserMissingModalities

    io_tools = [
        CmdParserSubjectsSessions(),
        CmdParserMergeTsv(),
        CmdParserMissingModalities(),
    ]

    HELP_IO_TOOLS = 'Tools to handle BIDS/CAPS datasets.'
    io_parser = sub_parser.add_parser(
        'iotools',
        add_help=False,
        help=HELP_IO_TOOLS,
    )
    io_parser.description = (Fore.GREEN + HELP_IO_TOOLS + Fore.RESET)
    io_parser._positionals.title = (
        Fore.YELLOW +
        'clinica iotools expects one of the following BIDS/CAPS utilities' +
        Fore.RESET)
    io_parser._optionals.title = OPTIONAL_TITLE

    init_cmdparser_objects(
        parser, io_parser.add_subparsers(metavar='', dest='iotools'), io_tools)
    """
    visualize category: run one of the available pipelines
    """
    from clinica.engine import CmdParser

    from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_visualizer import T1FreeSurferVisualizer

    visualizers = ClinicaClassLoader(baseclass=CmdParser,
                                     extra_dir="pipelines").load()
    visualizers += [
        T1FreeSurferVisualizer(),
    ]

    visualize_parser = sub_parser.add_parser(
        'visualize',
        add_help=False,
        formatter_class=argparse.RawTextHelpFormatter,
        help='To visualize outputs of Clinica pipelines.')
    visualize_parser.description = (Fore.GREEN +
                                    'Visualize outputs of Clinica pipelines.' +
                                    Fore.RESET)
    visualize_parser._positionals.title = (
        Fore.YELLOW +
        'clinica visualize expects one of the following pipelines' +
        Fore.RESET)

    init_cmdparser_objects(
        parser, visualize_parser.add_subparsers(metavar='', dest='visualize'),
        visualizers)
    """
    generate category: template
    """
    generate_parser = sub_parser.add_parser(
        'generate',
        add_help=False,
        help=('To generate pre-filled files when creating '
              'new pipelines (for developers).'),
    )
    generate_parser.description = (
        Fore.GREEN + ('Generate pre-filled files when creating new pipelines '
                      '(for  developers).') + Fore.RESET)
    generate_parser._positionals.title = (
        Fore.YELLOW + 'clinica generate expects one of the following tools' +
        Fore.RESET)
    generate_parser._optionals.title = OPTIONAL_TITLE

    from clinica.engine.template import CmdGenerateTemplates
    init_cmdparser_objects(
        parser, generate_parser.add_subparsers(metavar='', dest='generate'),
        [CmdGenerateTemplates()])
    """
    Silent all sub-parser errors methods except the one which is called
    otherwise the output console will display useless messages
    """
    def silent_help():
        pass

    def single_error_message(p):
        def error(x):
            from colorama import Fore
            print('%sError %s%s\n' % (Fore.RED, x, Fore.RESET))
            p.print_help()
            parser.print_help = silent_help
            exit(-1)

        return error

    for p in [run_parser, io_parser, convert_parser, generate_parser]:
        p.error = single_error_message(p)

    # Do not want stderr message
    def silent_msg(x):
        pass

    parser.error = silent_msg
    """
    Parse the command and check that everything went fine
    """
    args = None
    unknown_args = None
    try:
        argcomplete.autocomplete(parser)
        args, unknown_args = parser.parse_known_args()
    except SystemExit:
        exit(0)
    except Exception:
        parser.print_help()
        exit(-1)

    # if unknown_args:
    #    if '--verbose' or '-v' in unknown_args:
    #        cprint('Verbose flag detected')
    #    raise ValueError('Unknown flag detected: %s' % unknown_args)

    if 'run' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica run` on the terminal
        run_parser.print_help()
        exit(0)
    elif 'convert' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica convert` on the terminal
        convert_parser.print_help()
        exit(0)
    elif 'iotools' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica iotools` on the terminal
        io_parser.print_help()
        exit(0)
    elif 'visualize' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica visualize` on the terminal
        visualize_parser.print_help()
        exit(0)
    elif 'generate' in args and hasattr(args, 'func') is False:
        # Case when we type `clinica generate` on the terminal
        generate_parser.print_help()
        exit(0)
    elif args is None or hasattr(args, 'func') is False:
        # Case when we type `clinica` on the terminal
        parser.print_help()
        exit(0)

    import clinica.utils.stream as var
    var.clinica_verbose = args.verbose

    if args.verbose is False:
        """
        Enable only cprint(msg) --> clinica print(msg)
        - All the print() will be ignored!
        - All the logging will be redirect to the log file.
        """
        from clinica.utils.stream import FilterOut
        sys.stdout = FilterOut(sys.stdout)
        import logging as python_logging
        from logging import Filter, ERROR
        import os
        from nipype import config, logging
        from nipype import logging

        # Configure Nipype logger for our needs
        config.update_config({
            'logging': {
                'workflow_level': 'INFO',
                'log_directory': os.getcwd(),
                'log_to_file': True
            },
            'execution': {
                'stop_on_first_crash': False,
                'hash_method': 'content'
            }
        })
        logging.update_logging(config)

        # Define the LogFilter for ERROR detection
        class LogFilter(Filter):
            """
            The LogFilter class ables to monitor if an ERROR log signal is sent
            from Clinica/Nipype. If detected, the user will be warned.
            """
            def filter(self, record):
                if record.levelno >= ERROR:
                    cprint(
                        "An ERROR was generated: please check the log file for more information"
                    )
                return True

        logger = logging.getLogger('nipype.workflow')
        logger.addFilter(LogFilter())

        # Remove all handlers associated with the root logger object
        for handler in python_logging.root.handlers[:]:
            python_logging.root.removeHandler(handler)

        logging.disable_file_logging()

        # Enable file logging using a filename
        def enable_file_logging(self, filename):
            """
            Hack to define a filename for the log file! It overloads the
            'enable_file_logging' method in 'nipype/utils/logger.py' file.
            """
            import logging
            from logging.handlers import RotatingFileHandler as RFHandler
            config = self._config
            LOG_FILENAME = os.path.join(config.get('logging', 'log_directory'),
                                        filename)
            hdlr = RFHandler(LOG_FILENAME,
                             maxBytes=int(config.get('logging', 'log_size')),
                             backupCount=int(
                                 config.get('logging', 'log_rotate')))
            formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)
            hdlr.setFormatter(formatter)
            self._logger.addHandler(hdlr)
            self._fmlogger.addHandler(hdlr)
            self._iflogger.addHandler(hdlr)
            self._hdlr = hdlr

        enable_file_logging(logging, args.logname)

        class Stream:
            def write(self, text):
                print(text)
                sys.stdout.flush()

        python_logging.basicConfig(format=logging.fmt,
                                   datefmt=logging.datefmt,
                                   stream=Stream())

    # Finally, run the pipelines
    args.func(args)
Exemple #31
0
def build_collect_workflow(args, retval):
    import re
    import os
    import glob
    import warnings
    warnings.filterwarnings("ignore")
    import ast
    import pkg_resources
    from pathlib import Path
    import yaml

    try:
        import pynets

        print(f"\n\nPyNets Version:\n{pynets.__version__}\n\n")
    except ImportError:
        print("PyNets not installed! Ensure that you are using the correct"
              " python version.")

    # Set Arguments to global variables
    resources = args.pm
    if resources == "auto":
        from multiprocessing import cpu_count
        import psutil
        nthreads = cpu_count() - 1
        procmem = [
            int(nthreads),
            int(list(psutil.virtual_memory())[4] / 1000000000)
        ]
    else:
        procmem = list(eval(str(resources)))
    plugin_type = args.plug
    if isinstance(plugin_type, list):
        plugin_type = plugin_type[0]
    verbose = args.v
    working_path = args.basedir
    work_dir = args.work
    modality = args.modality
    if isinstance(modality, list):
        modality = modality[0]

    os.makedirs(f"{str(Path(working_path))}/{modality}_group_topology_auc",
                exist_ok=True)

    wf = collect_all(working_path, modality)

    with open(pkg_resources.resource_filename("pynets", "runconfig.yaml"),
              "r") as stream:
        try:
            hardcoded_params = yaml.load(stream)
            runtime_dict = {}
            execution_dict = {}
            for i in range(len(hardcoded_params["resource_dict"])):
                runtime_dict[list(hardcoded_params["resource_dict"][i].keys(
                ))[0]] = ast.literal_eval(
                    list(hardcoded_params["resource_dict"][i].values())[0][0])
            for i in range(len(hardcoded_params["execution_dict"])):
                execution_dict[list(
                    hardcoded_params["execution_dict"][i].keys())[0]] = list(
                        hardcoded_params["execution_dict"][i].values())[0][0]
        except FileNotFoundError:
            print("Failed to parse runconfig.yaml")

    os.makedirs(f"{work_dir}{'/pynets_out_collection'}", exist_ok=True)
    wf.base_dir = f"{work_dir}{'/pynets_out_collection'}"

    if verbose is True:
        from nipype import config, logging

        cfg_v = dict(
            logging={
                "workflow_level": "DEBUG",
                "utils_level": "DEBUG",
                "interface_level": "DEBUG",
                "filemanip_level": "DEBUG",
                "log_directory": str(wf.base_dir),
                "log_to_file": True,
            },
            monitoring={
                "enabled": True,
                "sample_frequency": "0.1",
                "summary_append": True,
                "summary_file": str(wf.base_dir),
            },
        )
        logging.update_logging(config)
        config.update_config(cfg_v)
        config.enable_debug_mode()
        config.enable_resource_monitor()

        import logging

        callback_log_path = f"{wf.base_dir}{'/run_stats.log'}"
        logger = logging.getLogger("callback")
        logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(callback_log_path)
        logger.addHandler(handler)

    execution_dict["crashdump_dir"] = str(wf.base_dir)
    execution_dict["plugin"] = str(plugin_type)
    cfg = dict(execution=execution_dict)
    for key in cfg.keys():
        for setting, value in cfg[key].items():
            wf.config[key][setting] = value
    try:
        wf.write_graph(graph2use="colored", format="png")
    except BaseException:
        pass
    if verbose is True:
        from nipype.utils.profiler import log_nodes_cb

        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "status_callback": log_nodes_cb,
            "scheduler": "mem_thread",
        }
    else:
        plugin_args = {
            "n_procs": int(procmem[0]),
            "memory_gb": int(procmem[1]),
            "scheduler": "mem_thread",
        }
    print("%s%s%s" % ("\nRunning with ", str(plugin_args), "\n"))
    wf.run(plugin=plugin_type, plugin_args=plugin_args)
    if verbose is True:
        from nipype.utils.draw_gantt_chart import generate_gantt_chart

        print("Plotting resource profile from run...")
        generate_gantt_chart(callback_log_path, cores=int(procmem[0]))
        handler.close()
        logger.removeHandler(handler)

    all_files = glob.glob(
        f"{str(Path(working_path))}/{modality}_group_topology_auc/*.csv")

    files_ = [i for i in all_files if '_clean.csv' in i]

    print("Aggregating dataframes...")
    dfs = []
    for file_ in files_:
        df = pd.read_csv(file_, chunksize=100000).read()
        try:
            df.drop(df.filter(regex="Unname"), axis=1, inplace=True)
        except BaseException:
            pass
        dfs.append(df)
        del df
    df_concat(dfs, working_path, modality)

    # Cleanup
    for j in all_files:
        if j not in files_:
            os.remove(j)

    print('\nDone!')
    return
Exemple #32
0
"""Nibabel-based interfaces."""
import numpy as np
import nibabel as nb
from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (
    traits,
    TraitedSpec,
    BaseInterfaceInputSpec,
    File,
    SimpleInterface,
    OutputMultiObject,
    InputMultiObject,
)

IFLOGGER = logging.getLogger("nipype.interface")


class _ApplyMaskInputSpec(BaseInterfaceInputSpec):
    in_file = File(exists=True, mandatory=True, desc="an image")
    in_mask = File(exists=True, mandatory=True, desc="a mask")
    threshold = traits.Float(
        0.5,
        usedefault=True,
        desc="a threshold to the mask, if it is nonbinary")


class _ApplyMaskOutputSpec(TraitedSpec):
    out_file = File(exists=True, desc="masked file")

Exemple #33
0
For using multi-echo EPI data.

Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../data/'))
>>> os.chdir(datadir)

"""
import os

from nipype import logging
from nipype.interfaces.base import (traits, TraitedSpec, File, CommandLine,
                                    CommandLineInputSpec)

LOGGER = logging.getLogger('nipype.interface')


class T2SMapInputSpec(CommandLineInputSpec):
    in_files = traits.List(File(exists=True),
                           argstr='-d %s',
                           position=1,
                           mandatory=True,
                           minlen=3,
                           desc='multi-echo BOLD EPIs')
    echo_times = traits.List(traits.Float,
                             argstr='-e %s',
                             position=2,
                             mandatory=True,
                             minlen=3,
                             desc='echo times')
Exemple #34
0
def calc_blocksize(timeseries, memory_allocated=None, 
                   include_full_matrix=False, sparsity_thresh=0.0):
    '''
    Method to calculate blocksize to calculate correlation matrix
    as per the memory allocated by the user. By default, the block
    size is 1000 when no memory limit is specified.

    If memory allocated is specified, then block size is calculated
    as memory allocated subtracted by the memory of the timeseries 
    and centrality output, then divided by the size of one correlation 
    map. That is how many correlation maps can we calculate simultaneously 
    in memory?

    Parameters
    ----------
    timeseries : numpy array
       timeseries data: `nvoxs` x `ntpts`
    memory_allocated : float
       memory allocated in GB for degree centrality
    include_full_matrix : boolean
        Boolean indicating if we're using the entire correlation matrix
        in RAM (needed during eigenvector centrality).
        Default is False
    sparsity_thresh : float
        a number between 0 and 1 that represents the number of
        connections to keep during sparsity thresholding.
        Default is 0.0.

    Returns
    -------
    block_size : an integer
      size of block for matrix calculation
    '''

    # Import packages
    import numpy as np
    from nipype import logging

    # Init variables
    logger = logging.getLogger('workflow')
    block_size = 1000   # default

    nvoxs   = timeseries.shape[0]
    ntpts   = timeseries.shape[1]
    nbytes  = timeseries.dtype.itemsize

    # If we need the full matrix for centrality calculation
    if include_full_matrix:
        memory_for_full_matrix = nvoxs * nvoxs * nbytes
    # Otherwise, we're doing it in blocks
    else:
        memory_for_full_matrix = 0

    # Memory variables
    memory_for_timeseries   = nvoxs * ntpts * nbytes
    memory_for_output       = 2 * nvoxs * nbytes    # bin and wght outputs
    needed_memory = memory_for_timeseries + \
                    memory_for_output + \
                    memory_for_full_matrix

    if memory_allocated:
        available_memory = memory_allocated * 1024.0**3  # assume it is in GB
        ## memory_for_block = # of seed voxels * nvoxs * nbytes
        block_size = int( (available_memory - needed_memory)/(nvoxs*nbytes) )
        # If we're doing degree/sparisty thresholding, calculate block_size
        if sparsity_thresh:
            # k - block_size, v - nvoxs, d - nbytes, m - memory_allocated
            # Polynomial for sparsity algorithm memory usage
            # Solve for k: (-d/2 - 20.5)*k^2 + (41*v + d*v -d/2 - 20.5)*k - m = 0
            coeffs = np.zeros(3)
            coeffs[0] = -nbytes/2 - 20.5
            coeffs[1] = 41*nvoxs + nbytes*nvoxs - nbytes/2 - 20.5
            coeffs[2] = -(available_memory - needed_memory)
            roots = np.roots(coeffs)
            # If roots are complex, then the block_size needed to occupy all of
            # the available memory is bigger than the number of voxels.
            # So set block_size = nvoxs
            if np.iscomplex(roots[0]):
                block_size = nvoxs
            # If the roots are real, test the roots for condition
            else:
                root = roots[np.where(roots <= nvoxs)]
                root = root[np.where(root > 0)]
                if len(root) == 1:
                    block_size = np.floor(root[0])
                else:
                    block_size = 1000

    # Test if calculated block size is beyond max/min limits
    if block_size > nvoxs:
        block_size = nvoxs
    elif block_size < 1:
        memory_usage = (needed_memory + 2.0*nvoxs*nbytes)/1024.0**3
        raise MemoryError('Not enough memory available to perform degree '\
                          'centrality. Need a minimum of %.2fGB' % memory_usage)

    # Convert block_size to an integer before returning
    block_size = int(block_size)

    # Return memory usage and block size
    if sparsity_thresh:
        # Calculate RAM usage by blocking algorithm
        m = (-nbytes/2 - 20.5)*block_size**2 + \
            (41*nvoxs + nbytes*nvoxs - nbytes/2 - 20.5)*block_size
        # Calculate RAM usage by sparse matrix at end
        max_conns = nvoxs**2-nvoxs
        # Max number of connections * i + j (32-bit ints) + w
        m2 = np.round(max_conns*sparsity_thresh)*(4 + 4 + nbytes)
        if m2 > m:
            m = m2
        memory_usage = (needed_memory + m)/1024.0**3
    else:
        memory_usage = (needed_memory + block_size*nvoxs*nbytes)/1024.0**3

    # Log information
    logger.info('block_size -> %i voxels' % block_size)
    logger.info('# of blocks -> %i' % np.ceil(float(nvoxs)/block_size))
    logger.info('expected usage -> %.2fGB' % memory_usage)

    return block_size
Exemple #35
0
def build_opts(opts):
    """Entry point"""
    import os
    from pathlib import Path
    import logging
    import sys
    import gc
    import warnings
    from multiprocessing import set_start_method, Process, Manager
    from nipype import logging as nlogging

    set_start_method('forkserver')

    logging.addLevelName(25, 'IMPORTANT')  # Add a new level between INFO and WARNING
    logging.addLevelName(15, 'VERBOSE')  # Add a new level between INFO and DEBUG
    logger = logging.getLogger('cli')

    def _warn_redirect(message, category, filename, lineno, file=None, line=None):
        logger.warning('Captured warning (%s): %s', category, message)

    warnings.showwarning = _warn_redirect

    # FreeSurfer license
    default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
    # Precedence: --fs-license-file, $FS_LICENSE, default_license
    license_file = Path(opts.fs_license_file or os.getenv('FS_LICENSE', default_license))
    if not license_file.exists():
        raise RuntimeError(
            'ERROR: a valid license file is required for FreeSurfer to run. '
            'sMRIPrep looked for an existing license file at several paths, in this '
            'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` '
            'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. '
            'Get it (for free) by registering at https://'
            'surfer.nmr.mgh.harvard.edu/registration.html')
    os.environ['FS_LICENSE'] = str(license_file)

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    errno = 0

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_workflow, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        smriprep_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        work_dir = retval['work_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if smriprep_wf is None:
        sys.exit(1)

    if opts.write_graph:
        smriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(smriprep_wf)
    if missing:
        print("Cannot run sMRIPrep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        smriprep_wf.run(**plugin_settings)
    except RuntimeError:
        errno = 1
    else:
        if opts.run_reconall:
            from templateflow import api
            from niworkflows.utils.misc import _copy_any
            dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv']))
            _copy_any(dseg_tsv,
                      str(Path(output_dir) / 'smriprep' / 'desc-aseg_dseg.tsv'))
            _copy_any(dseg_tsv,
                      str(Path(output_dir) / 'smriprep' / 'desc-aparcaseg_dseg.tsv'))
        logger.log(25, 'sMRIPrep finished without errors')
    finally:
        from niworkflows.reports import generate_reports
        from ..utils.bids import write_derivative_description

        logger.log(25, 'Writing reports for participants: %s', ', '.join(subject_list))
        # Generate reports phase
        errno += generate_reports(subject_list, output_dir, work_dir, run_uuid,
                                  packagename='smriprep')
        write_derivative_description(bids_dir, str(Path(output_dir) / 'smriprep'))
    sys.exit(int(errno > 0))
Exemple #36
0
    def _run_interface(self, runtime):
        # Get workflow logger for runtime profile error reporting
        from nipype import logging
        logger = logging.getLogger('workflow')

        # Create function handle
        function_handle = create_function_from_source(self.inputs.function_str,
                                                      self.imports)

        # Wrapper for running function handle in multiprocessing.Process
        # Can catch exceptions and report output via multiprocessing.Queue
        def _function_handle_wrapper(queue, **kwargs):
            try:
                out = function_handle(**kwargs)
                queue.put(out)
            except Exception as exc:
                queue.put(exc)

        # Get function args
        args = {}
        for name in self._input_names:
            value = getattr(self.inputs, name)
            if isdefined(value):
                args[name] = value

        # Profile resources if set
        if runtime_profile:
            from nipype.interfaces.base import get_max_resources_used
            import multiprocessing
            # Init communication queue and proc objs
            queue = multiprocessing.Queue()
            proc = multiprocessing.Process(target=_function_handle_wrapper,
                                           args=(queue, ),
                                           kwargs=args)

            # Init memory and threads before profiling
            mem_mb = 0
            num_threads = 0

            # Start process and profile while it's alive
            proc.start()
            while proc.is_alive():
                mem_mb, num_threads = \
                    get_max_resources_used(proc.pid, mem_mb, num_threads,
                                           pyfunc=True)

            # Get result from process queue
            out = queue.get()
            # If it is an exception, raise it
            if isinstance(out, Exception):
                raise out

            # Function ran successfully, populate runtime stats
            setattr(runtime, 'runtime_memory_gb', mem_mb / 1024.0)
            setattr(runtime, 'runtime_threads', num_threads)
        else:
            out = function_handle(**args)

        if len(self._output_names) == 1:
            self._out[self._output_names[0]] = out
        else:
            if isinstance(out,
                          tuple) and (len(out) != len(self._output_names)):
                raise RuntimeError('Mismatch in number of expected outputs')

            else:
                for idx, name in enumerate(self._output_names):
                    self._out[name] = out[idx]

        return runtime
Exemple #37
0
    def old_process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")

        # Initialization
        if os.path.exists(
                os.path.join(self.base_directory, "LOG", "pypeline.log")):
            os.unlink(os.path.join(self.base_directory, "LOG", "pypeline.log"))
        config.update_config({
            'logging': {
                'log_directory': os.path.join(self.base_directory, "LOG"),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False
            }
        })
        logging.update_logging(config)
        iflogger = logging.getLogger('interface')

        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(
            outfields=['diffusion', 'bvecs', 'bvals', 'T1', 'T2']),
                             name='datasource')
        datasource.inputs.base_directory = os.path.join(
            self.base_directory, 'NIFTI')
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(
            diffusion=self.global_conf.diffusion_imaging_model + '.nii.gz',
            bvecs=self.global_conf.diffusion_imaging_model + '.bvec',
            bvals=self.global_conf.diffusion_imaging_model + '.bval',
            T1='T1.nii.gz',
            T2='T2.nii.gz')
        datasource.inputs.sort_filelist = False

        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="diffusion_sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory,
                                                    "RESULTS")

        # Clear previous outputs
        self.clear_stages_outputs()

        flow = pe.Workflow(name='NIPYPE',
                           base_dir=os.path.join(self.base_directory))

        # Create common_flow
        common_flow = self.create_common_flow()

        flow.connect([(datasource, common_flow, [("T1", "inputnode.T1")])])

        # Create diffusion flow

        diffusion_flow = pe.Workflow(name='diffusion_pipeline')
        diffusion_inputnode = pe.Node(interface=util.IdentityInterface(fields=[
            'diffusion', 'bvecs', 'bvals', 'T1', 'brain', 'T2', 'brain_mask',
            'wm_mask_file', 'roi_volumes', 'subjects_dir', 'subject_id',
            'atlas_info', 'parcellation_scheme'
        ]),
                                      name='inputnode')
        diffusion_outputnode = pe.Node(
            interface=util.IdentityInterface(fields=['connectivity_matrices']),
            name='outputnode')
        diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])

        flow.connect([(datasource, diffusion_flow,
                       [("diffusion", "inputnode.diffusion"),
                        ("bvecs", "inputnode.bvecs"),
                        ("bvals", "inputnode.bvals"), ("T2", "inputnode.T2")]),
                      (common_flow, diffusion_flow,
                       [("outputnode.subjects_dir", "inputnode.subjects_dir"),
                        ("outputnode.subject_id", "inputnode.subject_id"),
                        ("outputnode.T1", "inputnode.T1"),
                        ("outputnode.brain", "inputnode.brain"),
                        ("outputnode.brain_mask", "inputnode.brain_mask"),
                        ("outputnode.wm_mask_file", "inputnode.wm_mask_file"),
                        ("outputnode.roi_volumes", "inputnode.roi_volumes"),
                        ("outputnode.parcellation_scheme",
                         "inputnode.parcellation_scheme"),
                        ("outputnode.atlas_info", "inputnode.atlas_info")]),
                      (diffusion_flow, sinker,
                       [("outputnode.connectivity_matrices",
                         "%s.%s.connectivity_matrices" %
                         (self.global_conf.diffusion_imaging_model, now))])])

        print diffusion_inputnode.outputs

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            diffusion_flow.connect([
                (diffusion_inputnode, preproc_flow,
                 [('diffusion', 'inputnode.diffusion'),
                  ('brain', 'inputnode.brain'),
                  ('brain_mask', 'inputnode.brain_mask'),
                  ('wm_mask_file', 'inputnode.wm_mask_file'),
                  ('roi_volumes', 'inputnode.roi_volumes'),
                  ('bvecs', 'inputnode.bvecs'), ('bvals', 'inputnode.bvals'),
                  ('T1', 'inputnode.T1')]),
            ])

        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            diffusion_flow.connect([
                (diffusion_inputnode, reg_flow, [('T2', 'inputnode.T2'),
                                                 ("bvals", "inputnode.bvals")
                                                 ]),
                (preproc_flow, reg_flow,
                 [('outputnode.T1', 'inputnode.T1'),
                  ('outputnode.bvecs_rot', 'inputnode.bvecs'),
                  ('outputnode.wm_mask_file', 'inputnode.wm_mask'),
                  ('outputnode.roi_volumes', 'inputnode.roi_volumes'),
                  ("outputnode.brain", "inputnode.brain"),
                  ("outputnode.brain_mask", "inputnode.brain_mask"),
                  ("outputnode.brain_mask_full", "inputnode.brain_mask_full"),
                  ('outputnode.diffusion_preproc', 'inputnode.target'),
                  ('outputnode.dwi_brain_mask', 'inputnode.target_mask')])
            ])
            if self.stages[
                    'Registration'].config.registration_mode == "BBregister (FS)":
                diffusion_flow.connect([
                    (diffusion_inputnode, reg_flow,
                     [('subjects_dir', 'inputnode.subjects_dir'),
                      ('subject_id', 'inputnode.subject_id')]),
                ])

        if self.stages['Diffusion'].enabled:
            diff_flow = self.create_stage_flow("Diffusion")
            diffusion_flow.connect([
                (reg_flow, diff_flow, [('outputnode.target_epicorrected',
                                        'inputnode.diffusion')]),
                (reg_flow, diff_flow, [('outputnode.wm_mask_registered_crop',
                                        'inputnode.wm_mask_registered')]),
                (reg_flow, diff_flow,
                 [('outputnode.roi_volumes_registered_crop',
                   'inputnode.roi_volumes')]),
                (reg_flow, diff_flow, [('outputnode.grad', 'inputnode.grad')])
            ])

        if self.stages['MRTrixConnectome'].enabled:
            if self.stages['Diffusion'].config.processing_tool == 'FSL':
                self.stages['MRTrixConnectome'].config.probtrackx = True
            else:
                self.stages['MRTrixConnectome'].config.probtrackx = False
            con_flow = self.create_stage_flow("MRTrixConnectome")
            diffusion_flow.connect([
                (diffusion_inputnode, con_flow,
                 [('parcellation_scheme', 'inputnode.parcellation_scheme')]),
                (diff_flow, con_flow,
                 [('outputnode.diffusion_imaging_model',
                   'inputnode.diffusion_imaging_model'),
                  ('outputnode.track_file', 'inputnode.track_file'),
                  ('outputnode.fod_file', 'inputnode.fod_file'),
                  ('outputnode.gFA', 'inputnode.gFA'),
                  ('outputnode.roi_volumes',
                   'inputnode.roi_volumes_registered'),
                  ('outputnode.skewness', 'inputnode.skewness'),
                  ('outputnode.kurtosis', 'inputnode.kurtosis'),
                  ('outputnode.P0', 'inputnode.P0')]),
                (con_flow, diffusion_outputnode,
                 [('outputnode.connectivity_matrices', 'connectivity_matrices')
                  ])
            ])

            if self.stages[
                    'Parcellation'].config.parcellation_scheme == "Custom":
                diffusion_flow.connect([(diffusion_inputnode, con_flow, [
                    ('atlas_info', 'inputnode.atlas_info')
                ])])

        # Create NIPYPE flow

        # flow = pe.Workflow(name='NIPYPE', base_dir=os.path.join(self.base_directory))

        # flow.connect([
        #               (datasource,common_flow,[("T1","inputnode.T1")]),
        #               (datasource,diffusion_flow,[("diffusion","inputnode.diffusion"),("T1","inputnode.T1"),("bvecs","inputnode.bvecs"),("bvals","inputnode.bvals"),("T2","inputnode.T2")]),
        #               (common_flow,diffusion_flow,[("outputnode.subjects_dir","inputnode.subjects_dir"),("outputnode.subject_id","inputnode.subject_id"),
        #                                             ("outputnode.brain_eroded","inputnode.brain_mask"),
        #                                            ("outputnode.wm_mask_file","inputnode.wm_mask_file"),
        #                                            ( "outputnode.roi_volumes","inputnode.roi_volumes"),
        #                                            ("outputnode.parcellation_scheme","inputnode.parcellation_scheme"),
        #                                            ("outputnode.atlas_info","inputnode.atlas_info")]),
        #               (diffusion_flow,sinker,[("outputnode.connectivity_matrices","%s.%s.connectivity_matrices"%(self.global_conf.diffusion_imaging_model,now))])
        #             ])

        # Process pipeline

        iflogger.info("**** Processing ****")

        if (self.number_of_cores != 1):
            flow.run(plugin='MultiProc',
                     plugin_args={'n_procs': self.number_of_cores})
        else:
            flow.run()

        self.fill_stages_outputs()

        # Clean undesired folders/files
        # rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        # for file_to_rm in rm_file_list:
        #     if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
        #         os.remove(os.path.join(self.base_directory,file_to_rm))

        # copy .ini and log file
        outdir = os.path.join(self.base_directory, "derivatives", "cmp",
                              self.subject, "connectome", now)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file, outdir)
        shutil.copy(
            os.path.join(self.base_directory, "derivatives", "cmp",
                         self.subject, 'pypeline.log'), outdir)

        iflogger.info("**** Processing finished ****")

        return True, 'Processing sucessful'
Exemple #38
0
def cli():
    """Run templateflow on commandline"""
    from pathlib import Path
    from argparse import ArgumentParser
    from argparse import RawTextHelpFormatter

    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawTextHelpFormatter)
    parser.add_argument('bids_dir',
                        action='store',
                        help='BIDS directory root',
                        type=Path)
    parser.add_argument('--participant-label',
                        nargs='*',
                        action='store',
                        help='list of participants to be processed')
    parser.add_argument('--output-dir',
                        action='store',
                        help='output directory',
                        type=Path)
    parser.add_argument('--reference',
                        '-r',
                        action='store',
                        help='the reference template')
    parser.add_argument('--moving',
                        '-m',
                        action='store',
                        help='the moving template')
    parser.add_argument('--cpu-count',
                        '--nproc',
                        action='store',
                        type=int,
                        default=cpu_count(),
                        help='number of processors')
    parser.add_argument('--omp-nthreads',
                        action='store',
                        type=int,
                        default=cpu_count(),
                        help='number of threads')
    parser.add_argument('--testing',
                        action='store_true',
                        default=False,
                        help='run in testing mode')
    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbose_count",
        action="count",
        default=0,
        help="increases log verbosity for each occurence, debug level is -vvv")
    parser.add_argument('--legacy',
                        action='store_true',
                        default=False,
                        help='use LegacyMultiProc')
    parser.add_argument('-w',
                        '--work-dir',
                        action='store',
                        type=Path,
                        default=Path() / 'work',
                        help='work directory')
    parser.add_argument('--freesurfer',
                        action='store',
                        type=Path,
                        help='path to precomputed freesurfer results')
    opts = parser.parse_args()

    plugin_settings = {'plugin': 'Linear'}
    if opts.cpu_count > 1:
        plugin_settings = {
            'plugin': 'LegacyMultiProc' if opts.legacy else 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
                'n_procs': opts.cpu_count,
            }
        }

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    bids_dir = opts.bids_dir.resolve()
    if not bids_dir.exists():
        raise RuntimeError('Input BIDS directory "%s" does not exist.' %
                           bids_dir)

    if not opts.output_dir:
        output_dir = bids_dir / 'derivatives' / 'templateflow-0.0.1'
    else:
        output_dir = opts.output_dir.resolve()

    all_subjects = set([
        Path(s).name.split('_')[0].split('-')[-1]
        for s in bids_dir.glob('sub-*')
    ])

    if not all_subjects:
        raise RuntimeError(
            'Input BIDS directory "%s" does not look like a valid BIDS '
            'tree.' % bids_dir)

    if not opts.participant_label:
        participants = all_subjects
    else:
        part_label = [
            s[4:] if s.startswith('sub-') else s
            for s in opts.participant_label
        ]
        participants = all_subjects.intersection(part_label)

    if not participants:
        raise RuntimeError(
            'No subject with the specified participant labels (%s) matched.' %
            ', '.join(opts.participant_label))

    tf = init_templateflow_wf(
        bids_dir,
        output_dir,
        participants,
        opts.moving,
        ref_template=opts.reference,
        omp_nthreads=opts.omp_nthreads,
        normalization_quality='precise' if not opts.testing else 'testing',
        fs_subjects_dir=opts.freesurfer,
    )
    tf.base_dir = str(opts.work_dir.resolve())
    tf.run(**plugin_settings)
Exemple #39
0
def write_to_log(workflow, log_dir, index, inputs, scan_id):
    """
    Method to write into log file the status of the workflow run.
    """

    import os
    import CPAC
    from nipype import logging
    iflogger = logging.getLogger('interface')

    version = CPAC.__version__

    subject_id = os.path.basename(log_dir)

    if scan_id == None:
        scan_id = "scan_anat"

    strategy = ""

    import time
    import datetime
    ts = time.time()

    stamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
    try:
        if workflow != 'DONE':
            wf_path = os.path.dirname(
                (os.getcwd()).split(workflow)[1]).strip("/")

            if wf_path and wf_path != "":
                if '/' in wf_path:
                    scan_id, strategy = wf_path.split('/', 1)
                    scan_id = scan_id.strip('_')
                    strategy = strategy.replace("/", "")
                else:
                    scan_id = wf_path.strip('_')

            file_path = os.path.join(log_dir, scan_id, workflow)
            try:
                os.makedirs(file_path)
            except Exception:
                iflogger.info(
                    "filepath already exist, filepath- %s, curr_dir - %s" %
                    (file_path, os.getcwd()))

        else:
            file_path = os.path.join(log_dir, scan_id)
    except Exception:
        print "ERROR in write log"
        raise

    out_file = os.path.join(file_path, 'log_%s.yaml' % strategy)
    f = open(out_file, 'w')

    print >> f, "version : %s" % (str(version))
    print >> f, "timestamp: %s" % (str(stamp))
    print >> f, "pipeline_index: %d" % (index)
    print >> f, "subject_id: %s" % (subject_id)
    print >> f, "scan_id: %s" % (scan_id)
    print >> f, "strategy: %s" % (strategy)
    print >> f, "workflow_name: %s" % (workflow)

    iflogger.info("CPAC custom log :")

    if isinstance(inputs, list):
        inputs = inputs[0]

    if os.path.exists(inputs):

        print >> f, "wf_status: DONE"

        iflogger.info(" version - %s, timestamp -%s, subject_id -%s, scan_id - %s, strategy -%s, workflow - %s, status -%s"\
                      %(str(version), str(stamp), subject_id, scan_id,strategy,workflow,'COMPLETED') )

    else:

        iflogger.info(" version - %s, timestamp -%s, subject_id -%s, scan_id - %s, strategy -%s, workflow - %s, status -%s"\
                      %(str(version), str(stamp), subject_id, scan_id,strategy,workflow,'ERROR') )

        print >> f, "wf_status: ERROR"

    f.close()

    os.system("log_py2js.py %s %s" % (out_file, log_dir))

    return out_file
Exemple #40
0
    def run(self, config_file=None, partic_list=None):
        """Establish where and how we're running the pipeline and set up the
        run. (Entry point)

        - This is the entry point for pipeline building and connecting.
          Depending on the inputs, the appropriate workflow runner will
          be selected and executed.

        :type config_file: str
        :param config_file: Filepath to the pipeline configuration file in
                            YAML format.
        :type partic_list: str
        :param partic_list: Filepath to the participant list file in YAML
                            format.
        """

        from time import strftime
        from qap_utils import raise_smart_exception, \
                              check_config_settings

        # in case we are overloading
        if config_file:
            from qap.script_utils import read_yml_file
            self._config = read_yml_file(config_file)
            self.validate_config_dict()
            self._config["pipeline_config_yaml"] = config_file

        if not self._config:
            raise Exception("config not found!")

        if partic_list:
            self._config["subject_list"] = partic_list

        # Get configurations and settings
        check_config_settings(self._config, "num_processors")
        check_config_settings(self._config, "num_sessions_at_once")
        check_config_settings(self._config, "available_memory")
        check_config_settings(self._config, "output_directory")
        check_config_settings(self._config, "working_directory")

        self._num_bundles_at_once = 1
        write_report = self._config.get('write_report', False)

        if "cluster_system" in self._config.keys() and not self._bundle_idx:
            res_mngr = self._config["cluster_system"]
            if (res_mngr == None) or ("None" in res_mngr) or \
                ("none" in res_mngr):
                self._platform = None
            else:
                platforms = ["SGE", "PBS", "SLURM"]
                self._platform = str(res_mngr).upper()
                if self._platform not in platforms:
                    msg = "The resource manager %s provided in the pipeline "\
                          "configuration file is not one of the valid " \
                          "choices. It must be one of the following:\n%s" \
                          % (self._platform, str(platforms))
                    raise_smart_exception(locals(), msg)
        else:
            self._platform = None

        # Create output directory
        try:
            os.makedirs(self._config["output_directory"])
        except:
            if not op.isdir(self._config["output_directory"]):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % self._config["output_directory"]
                raise Exception(err)
            else:
                pass

        # Create working directory
        try:
            os.makedirs(self._config["working_directory"])
        except:
            if not op.isdir(self._config["working_directory"]):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % self._config["working_directory"]
                raise Exception(err)
            else:
                pass

        results = []

        # set up callback logging
        import logging
        from nipype.pipeline.plugins.callback_log import log_nodes_cb

        cb_log_filename = os.path.join(self._config["output_directory"],
                                       "callback.log")
        # Add handler to callback log file
        cb_logger = logging.getLogger('callback')
        cb_logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(cb_log_filename)
        cb_logger.addHandler(handler)

        # settle run arguments (plugins)
        self.runargs = {}
        self.runargs['plugin'] = 'MultiProc'
        self.runargs['plugin_args'] = \
            {'memory_gb': int(self._config["available_memory"]),
             'status_callback': log_nodes_cb}
        n_procs = {'n_procs': self._config["num_processors"]}
        self.runargs['plugin_args'].update(n_procs)

        # load the participant list file into dictionary
        subdict = self.load_sublist()

        # flatten the participant dictionary
        self._sub_dict = self.create_session_dict(subdict)

        # create the list of bundles
        self._bundles_list = self.create_bundles()
        num_bundles = len(self._bundles_list)

        if not self._bundle_idx:
            # want to initialize the run-level log directory (not the bundle-
            # level) only the first time we run the script, due to the
            # timestamp. if sub-nodes are being kicked off by a batch file on
            # a cluster, we don't want a new timestamp for every new node run
            self._run_log_dir = op.join(
                self._config['output_directory'],
                '_'.join([self._run_name, "logs"]), '_'.join(
                    [strftime("%Y%m%d_%H_%M_%S"),
                     "%dbundles" % num_bundles]))

        if self._run_log_dir:
            if not os.path.isdir(self._run_log_dir):
                try:
                    os.makedirs(self._run_log_dir)
                except:
                    if not op.isdir(self._run_log_dir):
                        err = "[!] Log directory unable to be created.\n" \
                              "Path: %s\n\n" % self._run_log_dir
                        raise Exception(err)
                    else:
                        pass

        if num_bundles == 1:
            self._config["num_sessions_at_once"] = \
                len(self._bundles_list[0])

        # Start the magic
        if not self._platform and not self._bundle_idx:
            # not a cluster/grid run
            for idx in range(1, num_bundles + 1):
                results.append(self.run_one_bundle(idx))

        elif not self._bundle_idx:
            # there is a self._bundle_idx only if the pipeline runner is run
            # with bundle_idx as a parameter - only happening either manually,
            # or when running on a cluster
            self.submit_cluster_batch_file(num_bundles)

        else:
            # if there is a bundle_idx supplied to the runner
            results = self.run_one_bundle(self._bundle_idx)
Exemple #41
0
    def process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
       
        # Initialization
        if os.path.exists(os.path.join(self.base_directory,"LOG","pypeline.log")):
            os.unlink(os.path.join(self.base_directory,"LOG","pypeline.log"))
        config.update_config({'logging': {'log_directory': os.path.join(self.base_directory,"LOG"),
                                  'log_to_file': True},
                              'execution': {}
                              })
        logging.update_logging(config)
        flow = pe.Workflow(name='diffusion_pipeline', base_dir=os.path.join(self.base_directory,'NIPYPE'))
        iflogger = logging.getLogger('interface')
       
        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(outfields = ['diffusion','T1','T2']), name='datasource')
        datasource.inputs.base_directory = os.path.join(self.base_directory,'NIFTI')
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(diffusion=self.global_conf.imaging_model+'.nii.gz',T1='T1.nii.gz',T2='T2.nii.gz')
       
        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory, "RESULTS")
        
        # Clear previous outputs
        self.clear_stages_outputs()

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            flow.connect([
                (datasource,preproc_flow,[("diffusion","inputnode.diffusion")]),
                ])
       
        if self.stages['Segmentation'].enabled:
            if self.stages['Segmentation'].config.seg_tool == "Freesurfer":
                if self.stages['Segmentation'].config.use_existing_freesurfer_data == False:
                    self.stages['Segmentation'].config.freesurfer_subjects_dir = os.path.join(self.base_directory)
                    self.stages['Segmentation'].config.freesurfer_subject_id = os.path.join(self.base_directory,'FREESURFER')
                    if (not os.path.exists(os.path.join(self.base_directory,'NIPYPE/diffusion_pipeline/segmentation_stage/reconall/result_reconall.pklz'))) and os.path.exists(os.path.join(self.base_directory,'FREESURFER')):
                        shutil.rmtree(os.path.join(self.base_directory,'FREESURFER'))
            seg_flow = self.create_stage_flow("Segmentation")
            if self.stages['Segmentation'].config.seg_tool == "Freesurfer":
                flow.connect([(datasource,seg_flow, [('T1','inputnode.T1')])])
       
        if self.stages['Parcellation'].enabled:
            parc_flow = self.create_stage_flow("Parcellation")
            if self.stages['Segmentation'].config.seg_tool == "Freesurfer":
                flow.connect([(seg_flow,parc_flow, [('outputnode.subjects_dir','inputnode.subjects_dir'),
                                                    ('outputnode.subject_id','inputnode.subject_id')]),
                            ])
            else:
                flow.connect([
                            (seg_flow,parc_flow,[("outputnode.custom_wm_mask","inputnode.custom_wm_mask")])
                            ])
                                               
        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            flow.connect([
              (datasource,reg_flow,[('T1','inputnode.T1'),('T2','inputnode.T2')]),
                          (preproc_flow,reg_flow, [('outputnode.diffusion_preproc','inputnode.diffusion')]),
                          (parc_flow,reg_flow, [('outputnode.wm_mask_file','inputnode.wm_mask'),('outputnode.roi_volumes','inputnode.roi_volumes')]),
                          ])
            if self.stages['Registration'].config.registration_mode == "BBregister (FS)":
                flow.connect([
                          (seg_flow,reg_flow, [('outputnode.subjects_dir','inputnode.subjects_dir'),
                                                ('outputnode.subject_id','inputnode.subject_id')]),
                          ])
       
        if self.stages['Diffusion'].enabled:
            diff_flow = self.create_stage_flow("Diffusion")
            flow.connect([
                        (preproc_flow,diff_flow, [('outputnode.diffusion_preproc','inputnode.diffusion')]),
                        (reg_flow,diff_flow, [('outputnode.wm_mask_registered','inputnode.wm_mask_registered')]),
            (reg_flow,diff_flow,[('outputnode.roi_volumes_registered','inputnode.roi_volumes')])
                        ])
                       
        if self.stages['Connectome'].enabled:
            if self.stages['Diffusion'].config.processing_tool == 'FSL':
                self.stages['Connectome'].config.probtrackx = True
            con_flow = self.create_stage_flow("Connectome")
            flow.connect([
		                (parc_flow,con_flow, [('outputnode.parcellation_scheme','inputnode.parcellation_scheme')]),
		                (diff_flow,con_flow, [('outputnode.track_file','inputnode.track_file'),('outputnode.gFA','inputnode.gFA'),
                                              ('outputnode.roi_volumes','inputnode.roi_volumes_registered'),
		                                      ('outputnode.skewness','inputnode.skewness'),('outputnode.kurtosis','inputnode.kurtosis'),
		                                      ('outputnode.P0','inputnode.P0')]),
		                (con_flow,sinker, [('outputnode.connectivity_matrices',now+'.connectivity_matrices')])
		                ])
            
            if self.stages['Parcellation'].config.parcellation_scheme == "Custom":
                flow.connect([(parc_flow,con_flow, [('outputnode.atlas_info','inputnode.atlas_info')])])
                
       
        iflogger.info("**** Processing ****")
       
        if(self.number_of_cores != 1):
            flow.run(plugin='MultiProc', plugin_args={'n_procs' : self.number_of_cores})
        else:
            flow.run()
       
        self.fill_stages_outputs()
        
        # Clean undesired folders/files
        rm_file_list = ['rh.EC_average','lh.EC_average','fsaverage']
        for file_to_rm in rm_file_list:
            if os.path.exists(os.path.join(self.base_directory,file_to_rm)):
                os.remove(os.path.join(self.base_directory,file_to_rm))
       
        # copy .ini and log file
        outdir = os.path.join(self.base_directory,"RESULTS",now)
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        shutil.copy(self.config_file,outdir)
        shutil.copy(os.path.join(self.base_directory,'LOG','pypeline.log'),outdir)
       
        iflogger.info("**** Processing finished ****")
       
        return True,'Processing sucessful'
import nipype.interfaces.utility as util  # utility
import nipype.pipeline.engine as pe  # pypeline engine
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.dipy as dipy
import os, os.path as op

from nipype import logging

iflogger = logging.getLogger("interface")

fsl.FSLCommand.set_default_output_type("NIFTI_GZ")


def nifti_tensors_to_gmsh(in_file, fa_file, threshold=0.8, lower_triangular=True):
    import numpy as np
    import nibabel as nb
    from nipype.utils.filemanip import split_filename
    import os.path as op

    tensor_image = nb.load(in_file)
    fa_image = nb.load(fa_file)
    path, name, ext = split_filename(in_file)
    out_file = op.abspath(name + ".pos")
    f = open(out_file, "w")
    print "Writing tensors to {f}".format(f=out_file)
    tensor_data = tensor_image.get_data()
    orig_t = tensor_data.copy()
    tensor_data = np.flipud(tensor_data)

    tensor_data[:, :, :, 1] *= -1
Exemple #43
0
def compare_leadfields(leadfield1, leadfield2, mesh_file, write_mesh=True):
    '''
    Compares two leadfield matrices and outputs the difference as
    scalars attached to a mesh file. The mesh file must have the 
    same number of elements as the leadfield's second dimension

    FEM reciprocity leadfield is M x N where M is the number of sensors
    and N is the number of elements. Mesh file must have N elements.

    e.g. isotropic white matter conductivity
    vs. anisotropic conductivity from estimated diffusion tensors
    '''
    import os.path as op
    import h5py
    import numpy as np
    import shutil
    
    from forward.mesh import read_mesh

    from nipype.utils.filemanip import split_filename

    from nipype import logging    
    iflogger = logging.getLogger('interface')

    data_name = "leadfield"

    print("Reading leadfield 1: %s" % leadfield1)
    lf1_data_file = h5py.File(leadfield1, "r")
    lf1_data = lf1_data_file.get(data_name)
    leadfield_matrix1 = lf1_data.value

    print("Reading leadfield 2: %s" % leadfield2)
    lf2_data_file = h5py.File(leadfield2, "r")
    lf2_data = lf2_data_file.get(data_name)
    leadfield_matrix2 = lf2_data.value


    path, name, ext = split_filename(mesh_file)

    if write_mesh:
        # Electric field elements are only saved in the gray matter
        #elements_to_consider = [1001] #For Sphere
        elements_to_consider = [1002] #For head models
        mesh_data, _, _, _ = read_mesh(mesh_file, elements_to_consider)
        # Create the output mesh file
        rms_mesh_file = op.abspath(name + "_rmse.msh")
        iflogger.info('Copying current mesh file to %s' % rms_mesh_file)
        shutil.copyfile(mesh_file, rms_mesh_file)
        f = open(rms_mesh_file,'a') #Append to the end of the file
        iflogger.info('Appending root mean squared error scalars to %s' % rms_mesh_file)
    else:
        rms_mesh_file = None

    out_rms_hdf5_file_x = op.abspath(name + "_rmse_x.hdf5")
    out_rms_hdf5_file_y = op.abspath(name + "_rmse_y.hdf5")
    out_rms_hdf5_file_z = op.abspath(name + "_rmse_z.hdf5")
    out_rms_hdf5_file_avg = op.abspath(name + "_rmse_avg.hdf5")

    rms_hdf5_file_x = h5py.File(out_rms_hdf5_file_x, "w")
    rms_hdf5_file_y = h5py.File(out_rms_hdf5_file_y, "w")
    rms_hdf5_file_z = h5py.File(out_rms_hdf5_file_z, "w")
    rms_hdf5_file_avg = h5py.File(out_rms_hdf5_file_avg, "w")


    # Write the tag information to the file:
    if write_mesh:
        num_polygons = len(mesh_data)
    _, name1, _ = split_filename(leadfield1)
    _, name2, _ = split_filename(leadfield2)

    #For testing
    #noise = np.random.normal(0,1,leadfield_matrix2.shape)
    #leadfield_matrix2 = leadfield_matrix2 + noise

    # Reshape the leadfields so they are M x 3 x N vectors (x, y, z direction of electric field)
    leadfield_mesh1 = np.reshape(leadfield_matrix1, (leadfield_matrix1.shape[0]/3,3,leadfield_matrix1.shape[1]))
    leadfield_mesh2 = np.reshape(leadfield_matrix2, (leadfield_matrix2.shape[0]/3,3,leadfield_matrix2.shape[1]))

    #Check that the dimensions are appropriate
    try:
        if write_mesh:
            assert(len(mesh_data) == leadfield_mesh1.shape[0] == leadfield_mesh2.shape[0])
        else:
            assert(leadfield_mesh1.shape[0] == leadfield_mesh2.shape[0])
    except AssertionError:
        iflogger.error("Lead fields could not be compared because the " \
            "number of elements in the files do not match")
        if write_mesh:
            iflogger.error("Elements in %s: %d" % (mesh_file, len(mesh_data)))
        iflogger.error("Elements in leadfield 1 %s: %d" % (leadfield1, leadfield_mesh1.shape[0]))
        iflogger.error("Elements in leadfield 2 %s: %d" % (leadfield2, leadfield_mesh2.shape[0]))
        raise Exception

    # Get the mean squared difference between electric field vectors by sensor and element
    diff = leadfield_mesh2 - leadfield_mesh1

    # Gets the norm of the difference for X, Y, and Z directions
    norm_x_diff = np.linalg.norm(diff[:,0,:],axis=1)
    norm_y_diff = np.linalg.norm(diff[:,1,:],axis=1)
    norm_z_diff = np.linalg.norm(diff[:,2,:],axis=1)

    norm_lf1_x = np.linalg.norm(leadfield_mesh1[:,0,:], axis=1)
    norm_lf1_y = np.linalg.norm(leadfield_mesh1[:,1,:], axis=1)
    norm_lf1_z = np.linalg.norm(leadfield_mesh1[:,2,:], axis=1)

    rmse_x = norm_x_diff / norm_lf1_x
    rmse_y = norm_y_diff / norm_lf1_y
    rmse_z = norm_z_diff / norm_lf1_z

    rmse = (rmse_x + rmse_y + rmse_z) / 3.
    assert(leadfield_mesh2.shape[0] == rmse.shape[0])

    if write_mesh:
        rmse_avg_list = []
        rmse_x_list = []
        rmse_y_list = []
        rmse_z_list = []
        for idx, poly in enumerate(mesh_data):
            rmse_avg_str = ('%d %e \n' % (poly["element_id"], rmse[idx]))
            rmse_avg_list.append(rmse_avg_str)
            rmse_x_str = ('%d %e \n' % (poly["element_id"], rmse_x[idx]))
            rmse_x_list.append(rmse_x_str)
            rmse_y_str = ('%d %e \n' % (poly["element_id"], rmse_y[idx]))
            rmse_y_list.append(rmse_y_str)
            rmse_z_str  = ('%d %e \n' % (poly["element_id"], rmse_z[idx]))        
            rmse_z_list.append(rmse_z_str)
            iflogger.info("%3.3f%%" % (float(idx)/num_polygons*100.0))


        # ----- Average RMSE ----- #
        f.write('$ElementData\n')
        str_tag = '"Average Root Mean Squared Error"'
        timestep = 0.0001
        f.write('1\n') #Num String tags
        f.write(str_tag + '\n')
        f.write('1\n') #Num Real tags
        f.write('%f\n' % timestep)
        #Three integer tags: timestep, num field components, num elements
        f.write('3\n') #Three int tags
        f.write('0\n') #Time step index
        f.write('1\n') #Num field components
        f.write('%d\n' % len(mesh_data)) #Num nonzero field components
        for elementdata_str in rmse_avg_list:
            f.write(elementdata_str)
        f.write('$EndElementData\n')


        # ----- RMSE X ----- #
        f.write('$ElementData\n')
        str_tag = '"Root Mean Squared Error X-direction"'
        timestep = 0.0002
        f.write('1\n') #Num String tags
        f.write(str_tag + '\n')
        f.write('1\n') #Num Real tags
        f.write('%f\n' % timestep)
        #Three integer tags: timestep, num field components, num elements
        f.write('3\n') #Three int tags
        f.write('0\n') #Time step index
        f.write('1\n') #Num field components
        f.write('%d\n' % len(mesh_data)) #Num nonzero field components
        for elementdata_str in rmse_x_list:
            f.write(elementdata_str)
        f.write('$EndElementData\n')

        # ----- RMSE X ----- #
        f.write('$ElementData\n')
        str_tag = '"Root Mean Squared Error Y-direction"'
        timestep = 0.0003
        f.write('1\n') #Num String tags
        f.write(str_tag + '\n')
        f.write('1\n') #Num Real tags
        f.write('%f\n' % timestep)
        #Three integer tags: timestep, num field components, num elements
        f.write('3\n') #Three int tags
        f.write('0\n') #Time step index
        f.write('1\n') #Num field components
        f.write('%d\n' % len(mesh_data)) #Num nonzero field components
        for elementdata_str in rmse_y_list:
            f.write(elementdata_str)
        f.write('$EndElementData\n')

        # ----- RMSE X ----- #
        f.write('$ElementData\n')
        str_tag = '"Root Mean Squared Error Z-direction"'
        timestep = 0.0004
        f.write('1\n') #Num String tags
        f.write(str_tag + '\n')
        f.write('1\n') #Num Real tags
        f.write('%f\n' % timestep)
        #Three integer tags: timestep, num field components, num elements
        f.write('3\n') #Three int tags
        f.write('0\n') #Time step index
        f.write('1\n') #Num field components
        f.write('%d\n' % len(mesh_data)) #Num nonzero field components
        for elementdata_str in rmse_z_list:
            f.write(elementdata_str)
        f.write('$EndElementData\n')

        f.close()

        iflogger.info("Finished writing to %s" % rms_mesh_file)

    ## Save RMSE data to an HDF5 file
    dset_x = rms_hdf5_file_x.create_dataset("rmse_x", data=rmse_x)
    dset_x[...] = rmse_x
    dset_y = rms_hdf5_file_y.create_dataset("rmse_y", data=rmse_y)
    dset_y[...] = rmse_y
    dset_z = rms_hdf5_file_z.create_dataset("rmse_z", data=rmse_z)
    dset_z[...] = rmse_z
    dset = rms_hdf5_file_avg.create_dataset("rmse_avg", data=rmse)
    dset[...] = rmse

    rms_hdf5_file_x.close()
    rms_hdf5_file_y.close()
    rms_hdf5_file_z.close()
    rms_hdf5_file_avg.close()
    print("Saved RMSE-X matrix as %s" % out_rms_hdf5_file_x)
    print("Saved RMSE-Y matrix as %s" % out_rms_hdf5_file_y)
    print("Saved RMSE-Z matrix as %s" % out_rms_hdf5_file_z)
    print("Saved RMSE-Avg matrix as %s" % out_rms_hdf5_file_avg)
    ###
    return rms_mesh_file, out_rms_hdf5_file_avg, out_rms_hdf5_file_x, out_rms_hdf5_file_y, out_rms_hdf5_file_z
Exemple #44
0
import nibabel as nb
import numpy as np
from skimage import morphology as sim
from scipy.ndimage.morphology import binary_fill_holes

from nilearn.masking import compute_epi_mask
from nilearn.image import concat_imgs

from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (
    traits, isdefined, TraitedSpec, BaseInterfaceInputSpec,
    File, InputMultiPath, SimpleInterface
)

LOGGER = logging.getLogger('interface')


class MaskEPIInputSpec(BaseInterfaceInputSpec):
    in_files = InputMultiPath(File(exists=True), mandatory=True,
                              desc='input EPI or list of files')
    lower_cutoff = traits.Float(0.2, usedefault=True)
    upper_cutoff = traits.Float(0.85, usedefault=True)
    connected = traits.Bool(True, usedefault=True)
    enhance_t2 = traits.Bool(False, usedefault=True,
                             desc='enhance T2 contrast on image')
    opening = traits.Int(2, usedefault=True)
    closing = traits.Bool(True, usedefault=True)
    fill_holes = traits.Bool(True, usedefault=True)
    exclude_zeros = traits.Bool(False, usedefault=True)
    ensure_finite = traits.Bool(True, usedefault=True)
    def _run_interface(self, runtime):
        import os
        import numpy as np
        import pandas as pd
        import SimpleITK as sitk
        import nilearn.image
        import nibabel as nb
        from nilearn.signal import butterworth
        from scipy.signal import detrend
        from rabies.confound_correction_pkg.utils import recover_3D,recover_4D,temporal_censoring,lombscargle_fill, exec_ICA_AROMA,butterworth
        from rabies.analysis_pkg.analysis_functions import closed_form

        ### set null returns in case the workflow is interrupted
        empty_img = sitk.GetImageFromArray(np.empty([1,1]))
        empty_file = os.path.abspath('empty.nii.gz')
        sitk.WriteImage(empty_img, empty_file)

        setattr(self, 'cleaned_path', empty_file)
        setattr(self, 'VE_file_path', empty_file)
        setattr(self, 'STD_file_path', empty_file)
        setattr(self, 'CR_STD_file_path', empty_file)
        setattr(self, 'frame_mask_file', empty_file)
        setattr(self, 'data_dict', empty_file)
        setattr(self, 'aroma_out', empty_file)
        ###

        bold_file = self.inputs.bold_file
        brain_mask_file = self.inputs.brain_mask_file
        CSF_mask_file = self.inputs.CSF_mask_file
        data_dict = self.inputs.data_dict
        cr_opts = self.inputs.cr_opts

        FD_trace=data_dict['FD_trace']
        confounds_array=data_dict['confounds_array']
        confounds_file=data_dict['confounds_csv']
        time_range=data_dict['time_range']
        confounds_6rigid_array=data_dict['confounds_6rigid_array']

        cr_out = os.getcwd()
        import pathlib  # Better path manipulation
        filename_split = pathlib.Path(bold_file).name.rsplit(".nii")

        brain_mask = sitk.GetArrayFromImage(sitk.ReadImage(brain_mask_file, sitk.sitkFloat32))
        volume_indices = brain_mask.astype(bool)

        data_img = sitk.ReadImage(bold_file, sitk.sitkFloat32)
        data_array = sitk.GetArrayFromImage(data_img)
        num_volumes = data_array.shape[0]
        timeseries = np.zeros([num_volumes, volume_indices.sum()])
        for i in range(num_volumes):
            timeseries[i, :] = (data_array[i, :, :, :])[volume_indices]
        timeseries = timeseries[time_range,:]

        if cr_opts.TR=='auto':
            TR = float(data_img.GetSpacing()[3])
        else:
            TR = float(cr_opts.TR)

        '''
        #1 - Compute and apply frame censoring mask (from FD and/or DVARS thresholds)
        '''
        frame_mask,FD_trace,DVARS = temporal_censoring(timeseries, FD_trace, 
                cr_opts.FD_censoring, cr_opts.FD_threshold, cr_opts.DVARS_censoring, cr_opts.minimum_timepoint)
        if frame_mask is None:
            return runtime

        timeseries = timeseries[frame_mask]
        confounds_array = confounds_array[frame_mask]

        '''
        #2 - Linear detrending of fMRI timeseries and nuisance regressors
        '''
        # apply simple detrending, after censoring
        timeseries = detrend(timeseries,axis=0)
        confounds_array = detrend(confounds_array,axis=0) # apply detrending to the confounds too

        '''
        #3 - Apply ICA-AROMA.
        '''
        if cr_opts.run_aroma:
            # write intermediary output files for timeseries and 6 rigid body parameters
            timeseries_3d = recover_4D(brain_mask_file, timeseries, bold_file)
            inFile = f'{cr_out}/{filename_split[0]}_aroma_input.nii.gz'
            sitk.WriteImage(timeseries_3d, inFile)

            confounds_6rigid_array=confounds_6rigid_array[frame_mask,:]
            confounds_6rigid_array = detrend(confounds_6rigid_array,axis=0) # apply detrending to the confounds too
            df = pd.DataFrame(confounds_6rigid_array)
            df.columns = ['mov1', 'mov2', 'mov3', 'rot1', 'rot2', 'rot3']
            mc_file = f'{cr_out}/{filename_split[0]}_aroma_input.csv'
            df.to_csv(mc_file)

            cleaned_file, aroma_out = exec_ICA_AROMA(inFile, mc_file, brain_mask_file, CSF_mask_file, TR, cr_opts.aroma_dim, random_seed=cr_opts.aroma_random_seed)
            # if AROMA failed, returns empty outputs
            if cleaned_file is None:
                return runtime
            setattr(self, 'aroma_out', aroma_out)

            data_img = sitk.ReadImage(cleaned_file, sitk.sitkFloat32)
            data_array = sitk.GetArrayFromImage(data_img)
            num_volumes = data_array.shape[0]
            timeseries = np.zeros([num_volumes, volume_indices.sum()])
            for i in range(num_volumes):
                timeseries[i, :] = (data_array[i, :, :, :])[volume_indices]

        if (not cr_opts.highpass is None) or (not cr_opts.lowpass is None):
            '''
            #4 - If frequency filtering and frame censoring are applied, simulate data in censored timepoints using the Lomb-Scargle periodogram, 
                as suggested in Power et al. (2014, Neuroimage), for both the fMRI timeseries and nuisance regressors prior to filtering.
            '''
            timeseries_filled = lombscargle_fill(x=timeseries,time_step=TR,time_mask=frame_mask)
            confounds_filled = lombscargle_fill(x=confounds_array,time_step=TR,time_mask=frame_mask)

            '''
            #5 - As recommended in Lindquist et al. (2019, Human brain mapping), make the nuisance regressors orthogonal
                to the temporal filter.
            '''
            confounds_filtered = butterworth(confounds_filled, TR=TR,
                                    high_pass=cr_opts.highpass, low_pass=cr_opts.lowpass)

            '''
            #6 - Apply highpass and/or lowpass filtering on the fMRI timeseries (with simulated timepoints).
            '''

            timeseries_filtered = butterworth(timeseries_filled, TR=TR,
                                    high_pass=cr_opts.highpass, low_pass=cr_opts.lowpass)

            # correct for edge effects of the filters
            num_cut = int(cr_opts.edge_cutoff/TR)
            if len(frame_mask)<2*num_cut:
                raise ValueError(f"The timeseries are too short to remove {cr_opts.edge_cutoff}sec of data at each edge.")

            if not num_cut==0:
                frame_mask[:num_cut]=0
                frame_mask[-num_cut:]=0


            '''
            #7 - Re-apply the frame censoring mask onto filtered fMRI timeseries and nuisance regressors, taking out the
                simulated timepoints. Edge artefacts from frequency filtering can also be removed as recommended in Power et al. (2014, Neuroimage).
            '''
            # re-apply the masks to take out simulated data points, and take off the edges
            timeseries = timeseries_filtered[frame_mask]
            confounds_array = confounds_filtered[frame_mask]
        
        if frame_mask.sum()<int(cr_opts.minimum_timepoint):
            from nipype import logging
            log = logging.getLogger('nipype.workflow')
            log.warning(f"CONFOUND CORRECTION LEFT LESS THAN {str(cr_opts.minimum_timepoint)} VOLUMES. THIS SCAN WILL BE REMOVED FROM FURTHER PROCESSING.")
            return runtime

        '''
        #8 - Apply confound regression using the selected nuisance regressors.
        '''
        # voxels that have a NaN value are set to 0
        nan_voxels = np.isnan(timeseries).sum(axis=0)>1
        timeseries[:,nan_voxels] = 0

        # estimate the VE from the CR selection, or 6 rigid motion parameters if no CR is applied
        X=confounds_array
        Y=timeseries
        try:
            predicted = X.dot(closed_form(X,Y))
            res = Y-predicted
        except:
            from nipype import logging
            log = logging.getLogger('nipype.workflow')
            log.warning("SINGULAR MATRIX ERROR DURING CONFOUND REGRESSION. THIS SCAN WILL BE REMOVED FROM FURTHER PROCESSING.")
            empty_img = sitk.GetImageFromArray(np.empty([1,1]))
            empty_file = os.path.abspath('empty.nii.gz')
            sitk.WriteImage(empty_img, empty_file)

            return runtime

        # derive features from the predicted timeseries
        predicted_std = predicted.std(axis=0)
        predicted_time = np.sqrt((predicted.T**2).mean(axis=0))

        VE_spatial = 1-(res.var(axis=0)/Y.var(axis=0))
        VE_temporal = 1-(res.var(axis=1)/Y.var(axis=1))

        if len(cr_opts.conf_list) > 0:
            # if confound regression is applied
            timeseries = res

        # save the temporal STD map prior to standardization and smoothing
        temporal_std = timeseries.std(axis=0)

        '''
        #8 - Standardize timeseries
        '''
        if cr_opts.standardize:
            timeseries = (timeseries-timeseries.mean(axis=0))/temporal_std

        # save output files
        VE_spatial_map = recover_3D(brain_mask_file, VE_spatial)
        STD_spatial_map = recover_3D(brain_mask_file, temporal_std)
        CR_STD_spatial_map = recover_3D(brain_mask_file, predicted_std)
        timeseries_3d = recover_4D(brain_mask_file, timeseries, bold_file)
        cleaned_path = cr_out+'/'+filename_split[0]+'_cleaned.nii.gz'
        sitk.WriteImage(timeseries_3d, cleaned_path)
        VE_file_path = cr_out+'/'+filename_split[0]+'_VE_map.nii.gz'
        sitk.WriteImage(VE_spatial_map, VE_file_path)
        STD_file_path = cr_out+'/'+filename_split[0]+'_STD_map.nii.gz'
        sitk.WriteImage(STD_spatial_map, STD_file_path)
        CR_STD_file_path = cr_out+'/'+filename_split[0]+'_CR_STD_map.nii.gz'
        sitk.WriteImage(CR_STD_spatial_map, CR_STD_file_path)
        frame_mask_file = cr_out+'/'+filename_split[0]+'_frame_censoring_mask.csv'
        pd.DataFrame(frame_mask).to_csv(frame_mask_file, index=False, header=['False = Masked Frames'])

        if cr_opts.smoothing_filter is not None:
            '''
            #9 - Apply Gaussian spatial smoothing.
            '''
            timeseries_3d = nilearn.image.smooth_img(nb.load(cleaned_path), cr_opts.smoothing_filter)
            timeseries_3d.to_filename(cleaned_path)

        # apply the frame mask to FD trace/DVARS
        DVARS = DVARS[frame_mask]
        FD_trace = FD_trace[frame_mask]

        # calculate temporal degrees of freedom left after confound correction
        num_timepoints = frame_mask.sum()
        if cr_opts.run_aroma:
            aroma_rm = (pd.read_csv(f'{aroma_out}/classification_overview.txt', sep='\t')['Motion/noise']).sum()
        else:
            aroma_rm = 0
        num_regressors = confounds_array.shape[1]
        tDOF = num_timepoints - (aroma_rm+num_regressors)

        data_dict = {'FD_trace':FD_trace, 'DVARS':DVARS, 'time_range':time_range, 'frame_mask':frame_mask, 'confounds_array':confounds_array, 'VE_temporal':VE_temporal, 'confounds_csv':confounds_file, 'predicted_time':predicted_time, 'tDOF':tDOF}

        setattr(self, 'cleaned_path', cleaned_path)
        setattr(self, 'VE_file_path', VE_file_path)
        setattr(self, 'STD_file_path', STD_file_path)
        setattr(self, 'CR_STD_file_path', CR_STD_file_path)
        setattr(self, 'frame_mask_file', frame_mask_file)
        setattr(self, 'data_dict', data_dict)

        return runtime
Exemple #46
0
"""Workflow for the registration of EPI datasets to anatomical space via reconstructed surfaces."""
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype import logging

LOGGER = logging.getLogger("workflow")


def init_bbreg_wf(
    *,
    omp_nthreads,
    debug=False,
    epi2t1w_init="register",
    epi2t1w_dof=6,
    name="bbreg_wf",
    use_bbr=None,
):
    """
    Build a workflow to run FreeSurfer's ``bbregister``.

    This workflow uses FreeSurfer's ``bbregister`` to register a EPI image to
    a T1-weighted structural image.
    It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`,
    which performs the same task using FSL's FLIRT with a BBR cost function.
    The ``use_bbr`` option permits a high degree of control over registration.
    If ``False``, standard, affine coregistration will be performed using
    FreeSurfer's ``mri_coreg`` tool.
    If ``True``, ``bbregister`` will be seeded with the initial transform found
    by ``mri_coreg`` (equivalent to running ``bbregister --init-coreg``).
    If ``None``, after ``bbregister`` is run, the resulting affine transform
    will be compared to the initial transform found by ``mri_coreg``.
Exemple #47
0
from math import sqrt
import scipy.ndimage as nd
from builtins import zip

from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (
    traits, TraitedSpec, File, isdefined, InputMultiPath, BaseInterfaceInputSpec,
    SimpleInterface
)

from ..utils.misc import _flatten_dict
from ..qc.anatomical import (snr, snr_dietrich, cnr, fber, efc, art_qi1,
                             art_qi2, volume_fraction, rpve, summary_stats,
                             cjv, wm2max)
IFLOGGER = logging.getLogger('nipype.interface')


class StructuralQCInputSpec(BaseInterfaceInputSpec):
    in_file = File(exists=True, mandatory=True, desc='file to be plotted')
    in_noinu = File(exists=True, mandatory=True, desc='image after INU correction')
    in_segm = File(exists=True, mandatory=True, desc='segmentation file from FSL FAST')
    in_bias = File(exists=True, mandatory=True, desc='bias file')
    head_msk = File(exists=True, mandatory=True, desc='head mask')
    air_msk = File(exists=True, mandatory=True, desc='air mask')
    rot_msk = File(exists=True, mandatory=True, desc='rotation mask')
    artifact_msk = File(exists=True, mandatory=True, desc='air mask')
    in_pvms = InputMultiPath(File(exists=True), mandatory=True,
                             desc='partial volume maps from FSL FAST')
    in_tpms = InputMultiPath(File(), desc='tissue probability maps from FSL FAST')
    mni_tpms = InputMultiPath(File(), desc='tissue probability maps from FSL FAST')
Exemple #48
0
False           False       False         HMC only
=============== =========== ============= ===============


"""

from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype import logging

# Fieldmap workflows
from .pepolar import init_pepolar_unwarp_wf
from .syn import init_syn_sdc_wf
from .unwarp import init_sdc_unwarp_wf

LOGGER = logging.getLogger('workflow')
FMAP_PRIORITY = {'epi': 0, 'fieldmap': 1, 'phasediff': 2, 'syn': 3}
DEFAULT_MEMORY_MIN_GB = 0.01


def init_sdc_wf(fmaps,
                bold_meta,
                omp_nthreads=1,
                debug=False,
                fmap_bspline=False,
                fmap_demean=True):
    """
    This workflow implements the heuristics to choose a
    :abbr:`SDC (susceptibility distortion correction)` strategy.
    When no field map information is present within the BIDS inputs,
    the EXPERIMENTAL "fieldmap-less SyN" can be performed, using
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date:   2015-10-16 12:52:35
# @Last Modified by:   Oscar Esteban
# @Last Modified time: 2015-10-22 12:16:13
import os
import os.path as op
import time
import argparse
import yaml

from nipype import logging
logger = logging.getLogger('workflow')


class QAProtocolCLI:
    """
    This class and the associated _run_workflow function implement what
    the former scripts (qap_anatomical_spatial.py, etc.) contained
    """

    def __init__(self):
        parser = argparse.ArgumentParser()

        group = parser.add_argument_group(
            "Regular Use Inputs (non-cloud runs)")
        cloudgroup = parser.add_argument_group(
            "AWS Cloud Inputs (only required for AWS Cloud runs)")
        req = parser.add_argument_group("Required Inputs")

        cloudgroup.add_argument('--subj_idx', type=int,
Exemple #50
0
def main():
    """Entry point"""
    from nipype import logging as nlogging
    from multiprocessing import set_start_method, Process, Manager
    from ..viz.reports import generate_reports
    from ..utils.bids import write_derivative_description

    try:
        set_start_method('forkserver')
    except RuntimeError:
        pass

    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args()

    exec_env = os.name

    # special variable set in the container
    if os.getenv('IS_DOCKER_8395080871'):
        exec_env = 'singularity'
        cgroup = Path('/proc/1/cgroup')
        if cgroup.exists() and 'docker' in cgroup.read_text():
            exec_env = 'docker'
            if os.getenv('DOCKER_VERSION_8395080871'):
                exec_env = 'qsiprep-docker'

    sentry_sdk = None
    if not opts.notrack:
        import sentry_sdk
        from ..utils.sentry import sentry_setup
        sentry_setup(opts, exec_env)

    # Check input files and directories
    validate_bids(opts)
    set_freesurfer_license(opts)

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    errno = 0
    mode = "recon" if opts.recon_only else "prep"
    if mode == "recon":
        logger.info("running qsirecon")
        building_func = build_recon_workflow
    else:
        logger.info("running qsiprep")
        building_func = build_qsiprep_workflow

    # Call build_workflow(opts, retval)
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=building_func, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        qsiprep_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        work_dir = retval['work_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if qsiprep_wf is None:
        sys.exit(1)

    if opts.write_graph:
        qsiprep_wf.write_graph(
            graph2use="colored", format='svg', simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(qsiprep_wf)
    if missing:
        print("Cannot run qsiprep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()

    # Sentry tracking
    if not opts.notrack:
        from ..utils.sentry import start_ping
        start_ping(run_uuid, len(subject_list))

    errno = 1
    try:
        qsiprep_wf.run(**plugin_settings)
    except Exception as e:
        if not opts.notrack:
            from ..utils.sentry import process_crashfile
            crashfolders = [Path(output_dir) / 'qsiprep' / 'sub-{}'.format(s) / 'log' / run_uuid
                            for s in subject_list]
            for crashfolder in crashfolders:
                for crashfile in crashfolder.glob('crash*.*'):
                    process_crashfile(crashfile)

            if "Workflow did not execute cleanly" not in str(e):
                sentry_sdk.capture_exception(e)
        logger.critical('QSIPrep failed: %s', e)
        raise
    else:
        errno = 0
        logger.log(25, 'QSI{} finished without errors'.format(mode))
        if not opts.notrack:
            sentry_sdk.capture_message('QSI{} finished without errors'.format(mode),
                                       level='info')

    # No reports for recon mode yet
    if mode == "recon":
        sys.exit(int(errno > 0))

    # Generate reports phase
    errno += generate_reports(subject_list, output_dir, work_dir, run_uuid)
    write_derivative_description(bids_dir, str(Path(output_dir) / 'qsiprep'))
    if opts.recon_spec is None:
        logger.info("No additional workflows to run.")
        sys.exit(int(errno > 0))

    # Run an additional workflow if preproc + recon are requested
    opts.recon_input = output_dir + "/qsiprep"
    with Manager() as mgr:
        retval = mgr.dict()
        p = Process(target=build_recon_workflow, args=(opts, retval))
        p.start()
        p.join()

        if p.exitcode != 0:
            sys.exit(p.exitcode)

        qsirecon_post_wf = retval['workflow']
        plugin_settings = retval['plugin_settings']
        bids_dir = retval['bids_dir']
        output_dir = retval['output_dir']
        work_dir = retval['work_dir']
        subject_list = retval['subject_list']
        run_uuid = retval['run_uuid']
        retcode = retval['return_code']

    if qsirecon_post_wf is None:
        sys.exit(1)

    if opts.write_graph:
        qsirecon_post_wf.write_graph(
            graph2use="colored", format='svg', simple_form=True)

    if opts.reports_only:
        sys.exit(int(retcode > 0))

    if opts.boilerplate:
        sys.exit(int(retcode > 0))

    # Check workflow for missing commands
    missing = check_deps(qsirecon_post_wf)
    if missing:
        print("Cannot run qsiprep. Missing dependencies:")
        for iface, cmd in missing:
            print("\t{} (Interface: {})".format(cmd, iface))
        sys.exit(2)

    # Clean up master process before running workflow, which may create forks
    gc.collect()
    try:
        qsirecon_post_wf.run(**plugin_settings)
    except Exception as e:
        if not opts.notrack:
            from ..utils.sentry import process_crashfile
            crashfolders = [output_dir / 'qsiprep' / 'sub-{}'.format(s) / 'log' / run_uuid
                            for s in subject_list]
            for crashfolder in crashfolders:
                for crashfile in crashfolder.glob('crash*.*'):
                    process_crashfile(crashfile)

            if "Workflow did not execute cleanly" not in str(e):
                sentry_sdk.capture_exception(e)
        logger.critical('QSIPrep failed: %s', e)
        raise
    else:
        errno += 0
        logger.log(25, 'QSIPrep finished without errors')
        if not opts.notrack:
            sentry_sdk.capture_message('QSIPostRecon finished without errors',
                                       level='info')
    sys.exit(int(errno > 0))
Exemple #51
0
import os
import os.path as op
from warnings import warn
from multiprocessing import cpu_count
from lockfile import LockFile

from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from nipype import config as ncfg
from nipype import logging

from mriqc.reports.generators import workflow_report
from mriqc.workflows import core as mwc
from mriqc import __version__

LOGGER = logging.getLogger('workflow')


def main():
    """Entry point"""
    parser = ArgumentParser(description='MRI Quality Control',
                            formatter_class=RawTextHelpFormatter)

    g_input = parser.add_argument_group('Inputs')
    g_input.add_argument('-B', '--bids-root', action='store', default=os.getcwd())
    g_input.add_argument('-i', '--input-folder', action='store')
    g_input.add_argument('-S', '--subject-id', nargs='*', action='store')
    g_input.add_argument('-s', '--session-id', action='store')
    g_input.add_argument('-r', '--run-id', action='store')
    g_input.add_argument('-d', '--data-type', action='store', nargs='*',
                         choices=['anat', 'func'], default=['anat', 'func'])
Exemple #52
0
import os
import os.path as op
import re
import simplejson as json
import gzip
from shutil import copytree, rmtree, copyfileobj

from nipype import logging
from nipype.interfaces.base import (traits, isdefined, TraitedSpec,
                                    BaseInterfaceInputSpec, File, Directory,
                                    InputMultiPath, OutputMultiPath, Str,
                                    SimpleInterface)
from nipype.utils.filemanip import copyfile

LOGGER = logging.getLogger('interface')
BIDS_NAME = re.compile(
    '^(.*\/)?(?P<subject_id>sub-[a-zA-Z0-9]+)(_(?P<session_id>ses-[a-zA-Z0-9]+))?'
    '(_(?P<task_id>task-[a-zA-Z0-9]+))?(_(?P<acq_id>acq-[a-zA-Z0-9]+))?'
    '(_(?P<rec_id>rec-[a-zA-Z0-9]+))?(_(?P<run_id>run-[a-zA-Z0-9]+))?')


class FileNotFoundError(IOError):
    pass


class BIDSInfoInputSpec(BaseInterfaceInputSpec):
    in_file = File(mandatory=True, desc='input file, part of a BIDS tree')


class BIDSInfoOutputSpec(TraitedSpec):
def build_functional_temporal_workflow(resource_pool, config, subject_info, \
                                           run_name, site_name=None):
    
    # build pipeline for each subject, individually

    # ~ 5 min 45 sec per subject
    # (roughly 345 seconds)

    import os
    import sys

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl.maths as fsl
    
    import glob
    import yaml

    import time
    from time import strftime
    from nipype import config as nyconfig
    from nipype import logging


    logger = logging.getLogger('workflow')


    sub_id = str(subject_info[0])

    if subject_info[1]:
        session_id = str(subject_info[1])
    else:
        session_id = "session_0"

    if subject_info[2]:
        scan_id = str(subject_info[2])
    else:
        scan_id = "scan_0"


    # define and create the output directory
    output_dir = os.path.join(config["output_directory"], run_name, \
                              sub_id, session_id, scan_id)

    try:
        os.makedirs(output_dir)
    except:
        if not os.path.isdir(output_dir):
            err = "[!] Output directory unable to be created.\n" \
                  "Path: %s\n\n" % output_dir
            raise Exception(err)
        else:
            pass


    log_dir = output_dir
   
    # set up logging
    nyconfig.update_config({'logging': {'log_directory': log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    # take date+time stamp for run identification purposes
    unique_pipeline_id = strftime("%Y%m%d%H%M%S")
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    
    pipeline_start_time = time.time()


    logger.info(pipeline_start_stamp)

    logger.info("Contents of resource pool:\n" + str(resource_pool))

    logger.info("Configuration settings:\n" + str(config))


        
    # for QAP spreadsheet generation only
    config["subject_id"] = sub_id

    config["session_id"] = session_id

    config["scan_id"] = scan_id
    
    config["run_name"] = run_name


    if site_name:
        config["site_name"] = site_name
    
    

    workflow = pe.Workflow(name=scan_id)

    workflow.base_dir = os.path.join(config["working_directory"], sub_id, \
                            session_id)
                            
    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}
    
    
    # update that resource pool with what's already in the output directory
    for resource in os.listdir(output_dir):
    
        if os.path.isdir(os.path.join(output_dir,resource)) and resource not in resource_pool.keys():
        
            resource_pool[resource] = glob.glob(os.path.join(output_dir, \
                                          resource, "*"))[0]
                 

    # resource pool check
    invalid_paths = []
    
    for resource in resource_pool.keys():
    
        if not os.path.isfile(resource_pool[resource]):
        
            invalid_paths.append((resource, resource_pool[resource]))
            
            
    if len(invalid_paths) > 0:
        
        err = "\n\n[!] The paths provided in the subject list to the " \
              "following resources are not valid:\n"
        
        for path_tuple in invalid_paths:
        
            err = err + path_tuple[0] + ": " + path_tuple[1] + "\n"
                  
        err = err + "\n\n"
        
        raise Exception(err)
                  
    
    
    # start connecting the pipeline
       
    if "qap_functional_temporal" not in resource_pool.keys():

        from qap.qap_workflows import qap_functional_temporal_workflow

        workflow, resource_pool = \
            qap_functional_temporal_workflow(workflow, resource_pool, config)

    

    # set up the datasinks
    new_outputs = 0
    
    if "write_all_outputs" not in config.keys():
        config["write_all_outputs"] = False

    if config["write_all_outputs"] == True:

        for output in resource_pool.keys():
    
            # we use a check for len()==2 here to select those items in the
            # resource pool which are tuples of (node, node_output), instead
            # of the items which are straight paths to files

            # resource pool items which are in the tuple format are the
            # outputs that have been created in this workflow because they
            # were not present in the subject list YML (the starting resource 
            # pool) and had to be generated

            if len(resource_pool[output]) == 2:
    
                ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
                ds.inputs.base_directory = output_dir
    
                node, out_file = resource_pool[output]

                workflow.connect(node, out_file, ds, output)
            
                new_outputs += 1

    else:

        # write out only the output CSV (default)

        output = "qap_functional_temporal"

        if len(resource_pool[output]) == 2:

            ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
            ds.inputs.base_directory = output_dir
    
            node, out_file = resource_pool[output]

            workflow.connect(node, out_file, ds, output)
            
            new_outputs += 1
         
    

    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
    
        workflow.write_graph(dotfilename=os.path.join(output_dir, run_name + \
                                                          ".dot"), \
                                                          simple_form=False)

        workflow.run(plugin='MultiProc', plugin_args= \
                         {'n_procs': config["num_cores_per_subject"]})

    else:

        print "\nEverything is already done for subject %s." % sub_id


    # Remove working directory when done
    if config["write_all_outputs"] == False:
        try:
            work_dir = os.path.join(workflow.base_dir, scan_id)

            if os.path.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            print "Couldn\'t remove the working directory!"
            pass


    pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    
    pipeline_end_time = time.time()

    logger.info("Elapsed time (minutes) since last start: %s" \
                % ((pipeline_end_time - pipeline_start_time)/60))

    logger.info("Pipeline end time: %s" % pipeline_end_stamp)



    return workflow
Exemple #54
0
        # enable logging

        from nipype import config
        from nipype import logging

        config.update_config(
            {'logging': {
                'log_directory': log_dir,
                'log_to_file': True
            }})

        # Temporarily disable until solved
        #logging.update_logging(config)

        iflogger = logging.getLogger('interface')
        ''' create the list of paths to all output files to go to model '''
        # create the 'ordered_paths' list, which is a list of all of the
        # output paths of the output files being included in the current
        # group-level analysis model
        #     'ordered_paths' is later connected to the 'zmap_files' input
        #     of the group analysis workflow - the files listed in this list
        #     are merged into the merged 4D file that goes into group analysis

        group_sublist = open(subject_list, 'r')
        sublist_items = group_sublist.readlines()

        input_subject_list = [line.rstrip('\n') for line in sublist_items \
                              if not (line == '\n') and not line.startswith('#')]

        ordered_paths = []
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.diffusion_toolkit as dtk
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.mrtrix as mrtrix
import nipype.interfaces.camino as camino
from nipype.utils.filemanip import split_filename
import nibabel as nib

from nipype.interfaces.base import CommandLine, CommandLineInputSpec,\
    traits, TraitedSpec, BaseInterface, BaseInterfaceInputSpec
import nipype.interfaces.base as nibase

from nipype import logging
iflogger = logging.getLogger('interface')

# Reconstruction configuration
    
class DTK_recon_config(HasTraits):
    imaging_model = Str
    maximum_b_value = Int(1000)
    gradient_table_file = Enum('siemens_06',['mgh_dti_006','mgh_dti_018','mgh_dti_030','mgh_dti_042','mgh_dti_060','mgh_dti_072','mgh_dti_090','mgh_dti_120','mgh_dti_144',
                          'siemens_06','siemens_12','siemens_20','siemens_30','siemens_64','siemens_256','Custom...'])
    gradient_table = Str
    custom_gradient_table = File
    flip_table_axis = List(editor=CheckListEditor(values=['x','y','z'],cols=3))
    dsi_number_of_directions = Enum([514,257,124])
    number_of_directions = Int(514)
    number_of_output_directions = Int(181)
    recon_matrix_file = Str('DSI_matrix_515x181.dat')
Exemple #56
0
def write_to_log(workflow, log_dir, index, inputs, scan_id):
    """
    Method to write into log file the status of the workflow run.
    """

    import os
    import time
    import datetime

    from CPAC import __version__
    from nipype import logging

    iflogger = logging.getLogger('nipype.interface')

    version = __version__
    subject_id = os.path.basename(log_dir)

    if scan_id is None:
        scan_id = "scan_anat"

    strategy = ""
    ts = time.time()
    stamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')

    try:
        if workflow != 'DONE':
            wf_path = \
                os.path.dirname((os.getcwd()).split(workflow)[1]).strip("/")

            if wf_path and wf_path != "":
                if '/' in wf_path:
                    scan_id, strategy = wf_path.split('/', 1)
                    scan_id = scan_id.strip('_')
                    strategy = strategy.replace("/", "")
                else:
                    scan_id = wf_path.strip('_')

            file_path = os.path.join(log_dir, scan_id, workflow)

            try:
                os.makedirs(file_path)
            except Exception:
                iflogger.info("filepath already exist, filepath- {0}, "
                              "curr_dir - {1}".format(file_path, os.getcwd()))

        else:
            file_path = os.path.join(log_dir, scan_id)
    except Exception:
        print("ERROR in write log")
        raise

    try:
        os.makedirs(file_path)
    except Exception:
        iflogger.info("filepath already exist, "
                      "filepath: {0}, "
                      "curr_dir: {1}".format(file_path, os.getcwd()))

    out_file = os.path.join(file_path, 'log_{0}.yml'.format(strategy))

    iflogger.info("CPAC custom log:")

    if isinstance(inputs, list):
        inputs = inputs[0]

    if os.path.exists(inputs):
        status_msg = "wf_status: DONE"
        iflogger.info("version: {0}, "
                      "timestamp: {1}, "
                      "subject_id: {2}, "
                      "scan_id: {3}, "
                      "strategy: {4}, "
                      "workflow: {5}, "
                      "status: COMPLETED".format(str(version), str(stamp),
                                                 subject_id, scan_id, strategy,
                                                 workflow))
    else:
        status_msg = "wf_status: ERROR"
        iflogger.info("version: {0}, "
                      "timestamp: {1}, "
                      "subject_id: {2}, "
                      "scan_id: {3}, "
                      "strategy: {4}, "
                      "workflow: {5}, "
                      "status: ERROR".format(str(version), str(stamp),
                                             subject_id, scan_id, strategy,
                                             workflow))

    with open(out_file, 'w') as f:
        f.write("version: {0}\n".format(str(version)))
        f.write("timestamp: {0}\n".format(str(stamp)))
        f.write("pipeline_index: {0}\n".format(index))
        f.write("subject_id: {0}\n".format(subject_id))
        f.write("scan_id: {0}\n".format(scan_id))
        f.write("strategy: {0}\n".format(strategy))
        f.write("workflow_name: {0}\n".format(workflow))
        f.write(status_msg)

    return out_file
def include_gmsh_tensor_elements(mesh_file, tensor_file, mask_file, mask_threshold=0.5, lower_triangular=True):
    import numpy as np
    import nibabel as nb
    from nipype.utils.filemanip import split_filename
    import os.path as op
    from forward.mesh import read_mesh
    from nipype import logging
    import shutil

    iflogger = logging.getLogger("interface")

    # Load 4D (6 volume upper or lower triangular) conductivity tensor image
    tensor_image = nb.load(tensor_file)
    tensor_data = np.flipud(tensor_image.get_data())

    # Correct the tensors after flipping in the X direction
    tensor_data[:, :, :, 1] *= -1
    if lower_triangular:
        tensor_data[:, :, :, 3] *= -1
    else:
        tensor_data[:, :, :, 2] *= -1

    # Load mask (usually fractional anisotropy) image
    mask_image = nb.load(mask_file)
    mask_data = np.flipud(mask_image.get_data())
    header = tensor_image.get_header()

    # Make sure the files share the same (3D) dimensions before continuing
    assert np.shape(tensor_data)[0:3] == np.shape(mask_data)[0:3]

    # Define various constants
    elements_to_consider = [1001]  # Use only white matter
    vx, vy, vz = header.get_zooms()[0:3]
    max_x, max_y, max_z = np.shape(tensor_data)[0:3]
    halfx, halfy, halfz = np.array((vx * max_x, vy * max_y, vz * max_z)) / 2.0

    mesh_data, _, _, _ = read_mesh(mesh_file, elements_to_consider)

    # Create the output mesh file
    path, name, ext = split_filename(mesh_file)
    out_file = op.abspath(name + "_cond.msh")
    iflogger.info("Copying current mesh file to %s" % out_file)
    shutil.copyfile(mesh_file, out_file)

    f = open(out_file, "a")  # Append to the end of the file
    iflogger.info("Appending Conductivity tensors to %s" % out_file)

    # Write the tag information to the file:
    num_polygons = len(mesh_data)
    f.write("$ElementData\n")
    str_tag = '"Conductivity"'
    timestep = 0.0001

    f.write("1\n")  # Num String tags
    f.write(str_tag + "\n")
    f.write("1\n")  # Num Real tags
    f.write("%f\n" % timestep)

    # Three integer tags: timestep, num field components, num elements
    f.write("3\n")  # Three int tags
    f.write("0\n")  # Time step index
    f.write("9\n")  # Num field components

    # Get the centroid of all white matter elements
    # Find out which voxel they lie inside
    iflogger.info("Getting tensor for each element")
    # ipdb.set_trace()
    nonzero = 0
    elem_list = []

    if lower_triangular:
        for idx, poly in enumerate(mesh_data):
            i = np.round((poly["centroid"][0] + halfx) / vx).astype(int)
            j = np.round((poly["centroid"][1] + halfy) / vy).astype(int)
            k = np.round((poly["centroid"][2] + halfz) / vz).astype(int)
            T = tensor_data[i, j, k]
            if not (all(T == 0) and mask_data[i, j, k] >= mask_threshold):
                elementdata_str = "%d %e %e %e %e %e %e %e %e %e\n" % (
                    poly["element_id"],
                    T[0],
                    T[1],
                    T[3],
                    T[1],
                    T[2],
                    T[4],
                    T[3],
                    T[4],
                    T[5],
                )
                elem_list.append(elementdata_str)
                nonzero += 1
            # iflogger.info("%3.3f%%" % (float(idx)/num_polygons*100.0))
    else:
        for idx, poly in enumerate(mesh_data):
            i = np.round((poly["centroid"][0] + halfx) / vx).astype(int)
            j = np.round((poly["centroid"][1] + halfy) / vy).astype(int)
            k = np.round((poly["centroid"][2] + halfz) / vz).astype(int)
            T = tensor_data[i, j, k]
            if not (all(T == 0) and mask_data[i, j, k] >= mask_threshold):
                elementdata_str = "%d %e %e %e %e %e %e %e %e %e\n" % (
                    poly["element_id"],
                    T[0],
                    T[1],
                    T[2],
                    T[1],
                    T[3],
                    T[4],
                    T[2],
                    T[4],
                    T[5],
                )
                elem_list.append(elementdata_str)
                nonzero += 1
            # iflogger.info("%3.3f%%" % (float(idx)/num_polygons*100.0))

    f.write("%d\n" % nonzero)  # Num nonzero field components
    for elementdata_str in elem_list:
        f.write(elementdata_str)

    f.write("$EndElementData\n")

    f.write("$ElementData\n")
    str_tag = '"FA"'
    timestep = 0.0002

    f.write("1\n")  # Num String tags
    f.write(str_tag + "\n")
    f.write("1\n")  # Num Real tags
    f.write("%f\n" % timestep)

    # Three integer tags: timestep, num field components, num elements
    f.write("3\n")  # Three int tags
    f.write("1\n")  # Time step index
    f.write("1\n")  # Num field components

    # Get the centroid of all white matter elements
    # Find out which voxel they lie inside
    iflogger.info("Writing FA for each element")
    # ipdb.set_trace()
    nonzero = 0
    elem_list = []
    for idx, poly in enumerate(mesh_data):
        i = np.round((poly["centroid"][0] + halfx) / vx).astype(int)
        j = np.round((poly["centroid"][1] + halfy) / vy).astype(int)
        k = np.round((poly["centroid"][2] + halfz) / vz).astype(int)
        if mask_data[i, j, k] > 0:
            elementdata_str = "%d %e\n" % (poly["element_id"], mask_data[i, j, k])
            elem_list.append(elementdata_str)
            nonzero += 1
        # iflogger.info("%3.3f%%" % (float(idx)/num_polygons*100.0))

    f.write("%d\n" % nonzero)  # Num nonzero field components
    for elementdata_str in elem_list:
        f.write(elementdata_str)

    f.write("$EndElementData\n")

    f.close()

    iflogger.info("Finished writing to %s" % out_file)
    return out_file
Exemple #58
0
from nipype import logging
logger = logging.getLogger('workflow')

import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.utility as util
from nipype.interfaces.afni import preprocess
from CPAC.utils import dbg_file_lineno


# workflow to edit the scan to the proscribed TRs
def create_wf_edit_func(wf_name="edit_func"):
    """
    Workflow Inputs::
    
        inputspec.func : func file or a list of func/rest nifti file 
            User input functional(T2*) Image
            
        inputspec.start_idx : string 
            Starting volume/slice of the functional image (optional)
            
        inputspec.stop_idx : string
            Last volume/slice of the functional image (optional)
            
    Workflow Outputs::
    
        outputspec.edited_func : string (nifti file)
            Path to Output image with the initial few slices dropped
          
           
    Order of commands:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

.. autofunction:: init_bold_t2s_wf

"""
from nipype import logging
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu

from niworkflows.engine.workflows import LiterateWorkflow as Workflow

from ...interfaces import T2SMap
from .util import init_skullstrip_bold_wf

DEFAULT_MEMORY_MIN_GB = 0.01
LOGGER = logging.getLogger('nipype.workflow')


# pylint: disable=R0914
def init_bold_t2s_wf(echo_times, mem_gb, omp_nthreads,
                     t2s_coreg=False, name='bold_t2s_wf'):
    """
    This workflow wraps the `tedana`_ `T2* workflow`_ to optimally
    combine multiple echos and derive a T2* map for optional use as a
    coregistration target.

    The following steps are performed:

    #. :abbr:`HMC (head motion correction)` on individual echo files.
    #. Compute the T2* map
    #. Create an optimally combined ME-EPI time series
Exemple #60
0
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
from nipype import logging

# import matplotlib.pyplot as plt

#  import nipype.interfaces.camino2trackvis as camino2trackvis
# import cmtklib.interfaces.camino2trackvis as camino2trackvis
from cmtklib.interfaces.mrtrix3 import Erode, StreamlineTrack
from cmtklib.interfaces.dipy import DirectionGetterTractography, TensorInformedEudXTractography
from cmtklib.interfaces.misc import Tck2Trk, extractHeaderVoxel2WorldMatrix, \
    make_mrtrix_seeds

# from cmtklib.diffusion import filter_fibers

iflogger = logging.getLogger('nipype.interface')


class Dipy_tracking_config(HasTraits):
    imaging_model = Str
    tracking_mode = Str
    SD = Bool
    number_of_seeds = Int(1000)
    seed_density = Float(
        1.0,
        desc='Number of seeds to place along each direction. '
        'A density of 2 is the same as [2, 2, 2] and will result in a total of 8 seeds per voxel.'
    )
    fa_thresh = Float(0.2)
    step_size = traits.Float(0.5)
    max_angle = Float(25.0)