コード例 #1
0
ファイル: track.py プロジェクト: sebastientourbier/PyNets
def track_ensemble(target_samples, atlas_data_wm_gm_int, labels_im_file,
                   recon_path, sphere, directget, curv_thr_list, step_list,
                   track_type, maxcrossing, roi_neighborhood_tol, min_length,
                   waymask, B0_mask, t1w2dwi, gm_in_dwi, vent_csf_in_dwi,
                   wm_in_dwi, tiss_class, cache_dir):
    """
    Perform native-space ensemble tractography, restricted to a vector of ROI
    masks.

    target_samples : int
        Total number of streamline samples specified to generate streams.
    atlas_data_wm_gm_int : str
        File path to Nifti1Image in T1w-warped native diffusion space,
        restricted to wm-gm interface.
    parcels : list
        List of 3D boolean numpy arrays of atlas parcellation ROI masks from a
        Nifti1Image in T1w-warped native diffusion space.
    recon_path : str
        File path to diffusion reconstruction model.
    tiss_classifier : str
        Tissue classification method.
    sphere : obj
        DiPy object for modeling diffusion directions on a sphere.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic),
        closest (clos), and prob (probabilistic).
    curv_thr_list : list
        List of integer curvature thresholds used to perform ensemble tracking.
    step_list : list
        List of float step-sizes used to perform ensemble tracking.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    maxcrossing : int
        Maximum number if diffusion directions that can be assumed per voxel
        while tracking.
    roi_neighborhood_tol : float
        Distance (in the units of the streamlines, usually mm). If any
        coordinate in the streamline is within this distance from the center
        of any voxel in the ROI, the filtering criterion is set to True for
        this streamline, otherwise False. Defaults to the distance between
        the center of each voxel and the corner of the voxel.
    min_length : int
        Minimum fiber length threshold in mm.
    waymask_data : ndarray
        Tractography constraint mask array in native diffusion space.
    B0_mask_data : ndarray
        B0 brain mask data.
    n_seeds_per_iter : int
        Number of seeds from which to initiate tracking for each unique
        ensemble combination. By default this is set to 250.
    max_length : int
        Maximum number of steps to restrict tracking.
    particle_count
        pft_back_tracking_dist : float
        Distance in mm to back track before starting the particle filtering
        tractography. The total particle filtering tractography distance is
        equal to back_tracking_dist + front_tracking_dist. By default this is
        set to 2 mm.
    pft_front_tracking_dist : float
        Distance in mm to run the particle filtering tractography after the
        the back track distance. The total particle filtering tractography
        distance is equal to back_tracking_dist + front_tracking_dist. By
        default this is set to 1 mm.
    particle_count : int
        Number of particles to use in the particle filter.
    min_separation_angle : float
        The minimum angle between directions [0, 90].

    Returns
    -------
    streamlines : ArraySequence
        DiPy list/array-like object of streamline points from tractography.

    References
    ----------
    .. [1] Takemura, H., Caiafa, C. F., Wandell, B. A., & Pestilli, F. (2016).
      Ensemble Tractography. PLoS Computational Biology.
      https://doi.org/10.1371/journal.pcbi.1004692

    """
    import os
    import gc
    import time
    import pkg_resources
    import yaml
    import shutil
    from joblib import Parallel, delayed
    import itertools
    from pynets.dmri.track import run_tracking
    from colorama import Fore, Style
    from pynets.dmri.dmri_utils import generate_sl
    from nibabel.streamlines.array_sequence import concatenate, ArraySequence
    from pynets.core.utils import save_3d_to_4d
    from nilearn.masking import intersect_masks
    from nilearn.image import math_img

    cache_dir = f"{cache_dir}/joblib_tracking"
    os.makedirs(cache_dir, exist_ok=True)

    with open(pkg_resources.resource_filename("pynets", "runconfig.yaml"),
              "r") as stream:
        hardcoded_params = yaml.load(stream)
        nthreads = hardcoded_params["nthreads"][0]
        n_seeds_per_iter = \
            hardcoded_params['tracking']["n_seeds_per_iter"][0]
        max_length = \
            hardcoded_params['tracking']["max_length"][0]
        pft_back_tracking_dist = \
            hardcoded_params['tracking']["pft_back_tracking_dist"][0]
        pft_front_tracking_dist = \
            hardcoded_params['tracking']["pft_front_tracking_dist"][0]
        particle_count = \
            hardcoded_params['tracking']["particle_count"][0]
        min_separation_angle = \
            hardcoded_params['tracking']["min_separation_angle"][0]
    stream.close()

    all_combs = list(itertools.product(step_list, curv_thr_list))

    # Construct seeding mask
    seeding_mask = f"{cache_dir}/seeding_mask.nii.gz"
    if waymask is not None and os.path.isfile(waymask):
        atlas_data_wm_gm_int_img = intersect_masks(
            [
                math_img("img > 0.0075", img=nib.load(waymask)),
                math_img("img > 0.001", img=nib.load(atlas_data_wm_gm_int)),
                math_img("img > 0.001", img=nib.load(labels_im_file))
            ],
            threshold=0,
            connected=False,
        )
        nib.save(atlas_data_wm_gm_int_img, seeding_mask)
    else:
        atlas_data_wm_gm_int_img = intersect_masks(
            [
                math_img("img > 0.001", img=nib.load(atlas_data_wm_gm_int)),
                math_img("img > 0.001", img=nib.load(labels_im_file))
            ],
            threshold=0,
            connected=False,
        )
        nib.save(atlas_data_wm_gm_int_img, seeding_mask)

    tissues4d = save_3d_to_4d([
        B0_mask, labels_im_file, seeding_mask, t1w2dwi, gm_in_dwi,
        vent_csf_in_dwi, wm_in_dwi
    ])

    # Commence Ensemble Tractography
    start = time.time()
    stream_counter = 0

    all_streams = []
    ix = 0
    while float(stream_counter) < float(target_samples) and \
        float(ix) < 0.50*float(len(all_combs)):
        with Parallel(n_jobs=nthreads,
                      backend='loky',
                      mmap_mode='r+',
                      temp_folder=cache_dir,
                      verbose=10) as parallel:
            out_streams = parallel(
                delayed(run_tracking)
                (i, recon_path, n_seeds_per_iter, directget, maxcrossing,
                 max_length, pft_back_tracking_dist, pft_front_tracking_dist,
                 particle_count, roi_neighborhood_tol, waymask, min_length,
                 track_type, min_separation_angle, sphere, tiss_class,
                 tissues4d, cache_dir) for i in all_combs)

            out_streams = [
                i for i in out_streams
                if i is not None and i is not ArraySequence() and len(i) > 0
            ]

            if len(out_streams) > 1:
                out_streams = concatenate(out_streams, axis=0)

            if len(out_streams) < 100:
                ix += 1
                print("Fewer than 100 streamlines tracked on last iteration."
                      " loosening tolerance and anatomical constraints...")
                if track_type != 'particle':
                    tiss_class = 'wb'
                roi_neighborhood_tol = float(roi_neighborhood_tol) * 1.05
                min_length = float(min_length) * 0.95
                continue
            else:
                ix -= 1

            # Append streamline generators to prevent exponential growth
            # in memory consumption
            all_streams.extend([generate_sl(i) for i in out_streams])
            stream_counter += len(out_streams)
            del out_streams

            print("%s%s%s%s" % (
                "\nCumulative Streamline Count: ",
                Fore.CYAN,
                stream_counter,
                "\n",
            ))
            gc.collect()
            print(Style.RESET_ALL)

    if ix >= 0.75*len(all_combs) and \
        float(stream_counter) < float(target_samples):
        print(f"Tractography failed. >{len(all_combs)} consecutive sampling "
              f"iterations with <50 streamlines. Are you using a waymask? "
              f"If so, it may be too restrictive.")
        return ArraySequence()
    else:
        print("Tracking Complete: ", str(time.time() - start))

    del parallel, all_combs
    shutil.rmtree(cache_dir, ignore_errors=True)

    if stream_counter != 0:
        print('Generating final ArraySequence...')
        return ArraySequence([ArraySequence(i) for i in all_streams])
    else:
        print('No streamlines generated!')
        return ArraySequence()
コード例 #2
0
def streams2graph(atlas_mni, streams, dir_path, track_type, target_samples,
                  conn_model, network, node_size, dens_thresh, ID, roi,
                  min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels,
                  coords, norm, binary, directget, warped_fa, min_length,
                  error_margin):
    """
    Use tracked streamlines as a basis for estimating a structural connectome.

    Parameters
    ----------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    dir_path : str
        Path to directory containing subject derivative data for a given
        pynets run.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based
        centroids are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are:
        det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    warped_fa : str
        File path to MNI-space warped FA Nifti1Image.
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection
         to an ROI. Default is 2 voxels.

    Returns
    -------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    conn_matrix : array
        Adjacency matrix stored as an m x n array of nodes and edges.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based
        centroids are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic),
        closest (clos), boot (bootstrapped), and prob (probabilistic).
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection
         to an ROI. Default is 2 voxels.

    References
    ----------
    .. [1] Sporns, O., Tononi, G., & Kötter, R. (2005). The human connectome:
      A structural description of the human brain. PLoS Computational Biology.
      https://doi.org/10.1371/journal.pcbi.0010042
    .. [2] Sotiropoulos, S. N., & Zalesky, A. (2019). Building connectomes
      using diffusion MRI: why, how and but. NMR in Biomedicine.
      https://doi.org/10.1002/nbm.3752
    .. [3] Chung, M. K., Hanson, J. L., Adluru, N., Alexander, A. L., Davidson,
      R. J., & Pollak, S. D. (2017). Integrative Structural Brain Network
      Analysis in Diffusion Tensor Imaging. Brain Connectivity.
      https://doi.org/10.1089/brain.2016.0481
    """
    import gc
    import time
    import pkg_resources
    import sys
    import yaml
    from dipy.tracking.streamline import Streamlines, values_from_volume
    from dipy.tracking._utils import _mapping_to_voxel, _to_voxel_coordinates
    import networkx as nx
    from itertools import combinations
    from collections import defaultdict
    from pynets.core import utils, nodemaker
    from pynets.dmri.dmri_utils import generate_sl
    from dipy.io.streamline import load_tractogram
    from dipy.io.stateful_tractogram import Space, Origin

    with open(pkg_resources.resource_filename("pynets", "runconfig.yaml"),
              "r") as stream:
        hardcoded_params = yaml.load(stream)
        fa_wei = hardcoded_params["StructuralNetworkWeighting"][
            "fa_weighting"][0]
        fiber_density = hardcoded_params["StructuralNetworkWeighting"][
            "fiber_density"][0]
        overlap_thr = hardcoded_params["StructuralNetworkWeighting"][
            "overlap_thr"][0]
        roi_neighborhood_tol = \
        hardcoded_params['tracking']["roi_neighborhood_tol"][0]
    stream.close()

    start = time.time()

    if float(roi_neighborhood_tol) <= float(error_margin):
        try:
            raise ValueError('roi_neighborhood_tol preset cannot be less than '
                             'the value of the structural connectome error'
                             '_margin parameter.')
        except ValueError:
            import sys
            sys.exit(1)
    else:
        print(f"Using fiber-roi intersection tolerance: {error_margin}...")

    # Load FA
    fa_img = nib.load(warped_fa)

    # Load parcellation
    roi_img = nib.load(atlas_mni)
    atlas_data = np.around(np.asarray(roi_img.dataobj))
    roi_zooms = roi_img.header.get_zooms()
    roi_shape = roi_img.shape

    # Read Streamlines
    streamlines = [
        i.astype(np.float32) for i in Streamlines(
            load_tractogram(
                streams, fa_img, to_origin=Origin.NIFTI,
                to_space=Space.VOXMM).streamlines)
    ]

    # from fury import actor, window
    # renderer = window.Renderer()
    # template_actor = actor.contour_from_roi(roi_img.get_fdata(),
    #                                         color=(50, 50, 50), opacity=0.05)
    # renderer.add(template_actor)
    # lines_actor = actor.streamtube(streamlines, window.colors.orange,
    #                                linewidth=0.3)
    # renderer.add(lines_actor)
    # window.show(renderer)

    roi_img.uncache()

    if fa_wei is True:
        fa_weights = values_from_volume(
            np.asarray(fa_img.dataobj, dtype=np.float32), streamlines,
            np.eye(4))
        global_fa_weights = list(utils.flatten(fa_weights))
        min_global_fa_wei = min([i for i in global_fa_weights if i > 0])
        max_global_fa_wei = max(global_fa_weights)
        fa_weights_norm = []
        # Here we normalize by global FA
        for val_list in fa_weights:
            fa_weights_norm.append(
                np.nanmean((val_list - min_global_fa_wei) /
                           (max_global_fa_wei - min_global_fa_wei)))

    # Make streamlines into generators to keep memory at a minimum
    total_streamlines = len(streamlines)
    sl = [generate_sl(i) for i in streamlines]
    del streamlines

    # Instantiate empty networkX graph object & dictionary and create
    # voxel-affine mapping
    lin_T, offset = _mapping_to_voxel(np.eye(4))
    mx = len(np.unique(atlas_data.astype("uint16"))) - 1
    g = nx.Graph(ecount=0, vcount=mx)
    edge_dict = defaultdict(int)
    node_dict = dict(
        zip(np.unique(atlas_data.astype("uint16"))[1:],
            np.arange(mx) + 1))

    # Add empty vertices with label volume attributes
    for node in range(1, mx + 1):
        g.add_node(node,
                   roi_volume=np.sum(atlas_data.astype("uint16") == node))

    # Build graph
    pc = 0
    bad_idxs = []
    fiberlengths = {}
    fa_weights_dict = {}
    print(f"Quantifying fiber-ROI intersection for {atlas}:")
    for ix, s in enumerate(sl):
        # Percent counter
        pcN = int(round(100 * float(ix / total_streamlines)))
        if pcN % 10 == 0 and ix > 0 and pcN > pc:
            pc = pcN
            print(f"{pcN}%")

        # Map the streamlines coordinates to voxel coordinates and get labels
        # for label_volume
        vox_coords = _to_voxel_coordinates(Streamlines(s), lin_T, offset)

        lab_coords = [
            nodemaker.get_sphere(coord, error_margin, roi_zooms, roi_shape)
            for coord in vox_coords
        ]
        [i, j, k] = np.vstack(np.array(lab_coords)).T

        # get labels for label_volume
        lab_arr = atlas_data[i, j, k]
        # print(lab_arr)
        endlabels = []
        for jx, lab in enumerate(np.unique(lab_arr).astype("uint32")):
            if (lab > 0) and (np.sum(lab_arr == lab) >= overlap_thr):
                try:
                    endlabels.append(node_dict[lab])
                except BaseException:
                    bad_idxs.append(jx)
                    print(f"Label {lab} missing from parcellation. Check "
                          f"registration and ensure valid input parcellation "
                          f"file.")

        edges = combinations(endlabels, 2)
        for edge in edges:
            # Get fiber lengths along edge
            if fiber_density is True:
                if not (edge[0], edge[1]) in fiberlengths.keys():
                    fiberlengths[(edge[0], edge[1])] = [len(vox_coords)]
                else:
                    fiberlengths[(edge[0], edge[1])].append(len(vox_coords))

            # Get FA values along edge
            if fa_wei is True:
                if not (edge[0], edge[1]) in fa_weights_dict.keys():
                    fa_weights_dict[(edge[0], edge[1])] = [fa_weights_norm[ix]]
                else:
                    fa_weights_dict[(edge[0],
                                     edge[1])].append(fa_weights_norm[ix])

            lst = tuple([int(node) for node in edge])
            edge_dict[tuple(sorted(lst))] += 1

        edge_list = [(k[0], k[1], count) for k, count in edge_dict.items()]

        g.add_weighted_edges_from(edge_list)

        del lab_coords, lab_arr, endlabels, edges, edge_list

    gc.collect()

    # Add fiber density attributes for each edge
    # Adapted from the nnormalized fiber-density estimation routines of
    # Sebastian Tourbier.
    if fiber_density is True:
        print("Weighting edges by fiber density...")
        # Summarize total fibers and total label volumes
        total_fibers = 0
        total_volume = 0
        u_start = -1
        for u, v, d in g.edges(data=True):
            total_fibers += len(d)
            if u != u_start:
                total_volume += g.nodes[int(u)]['roi_volume']
            u_start = u

        ix = 0
        for u, v, d in g.edges(data=True):
            if d['weight'] > 0:
                edge_fiberlength_mean = np.nanmean(fiberlengths[(u, v)])
                fiber_density = (float(
                    ((float(d['weight']) / float(total_fibers)) /
                     float(edge_fiberlength_mean)) *
                    ((2.0 * float(total_volume)) /
                     (g.nodes[int(u)]['roi_volume'] +
                      g.nodes[int(v)]['roi_volume'])))) * 1000
            else:
                fiber_density = 0
            g.edges[u, v].update({"fiber_density": fiber_density})
            ix += 1

    if fa_wei is True:
        print("Weighting edges by FA...")
        # Add FA attributes for each edge
        ix = 0
        for u, v, d in g.edges(data=True):
            if d['weight'] > 0:
                edge_average_fa = np.nanmean(fa_weights_dict[(u, v)])
            else:
                edge_average_fa = np.nan
            g.edges[u, v].update({"fa_weight": edge_average_fa})
            ix += 1

    # Summarize weights
    if fa_wei is True and fiber_density is True:
        for u, v, d in g.edges(data=True):
            g.edges[u, v].update(
                {"final_weight": (d['fa_weight']) * d['fiber_density']})
    elif fiber_density is True and fa_wei is False:
        for u, v, d in g.edges(data=True):
            g.edges[u, v].update({"final_weight": d['fiber_density']})
    elif fa_wei is True and fiber_density is False:
        for u, v, d in g.edges(data=True):
            g.edges[u,
                    v].update({"final_weight": d['fa_weight'] * d['weight']})
    else:
        for u, v, d in g.edges(data=True):
            g.edges[u, v].update({"final_weight": d['weight']})

    # Convert weighted graph to numpy matrix
    conn_matrix_raw = nx.to_numpy_array(g, weight='final_weight')

    # Enforce symmetry
    conn_matrix = np.maximum(conn_matrix_raw, conn_matrix_raw.T)

    print("Structural graph completed:\n", str(time.time() - start))

    if len(bad_idxs) > 0:
        bad_idxs = sorted(list(set(bad_idxs)), reverse=True)
        for j in bad_idxs:
            del labels[j], coords[j]

    coords = np.array(coords)
    labels = np.array(labels)

    assert len(coords) == len(labels) == conn_matrix.shape[0]

    return (atlas_mni, streams, conn_matrix, track_type, target_samples,
            dir_path, conn_model, network, node_size, dens_thresh, ID, roi,
            min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels,
            coords, norm, binary, directget, min_length, error_margin)
コード例 #3
0
ファイル: estimation.py プロジェクト: neurolibre/PyNets
def streams2graph(atlas_mni,
                  streams,
                  overlap_thr,
                  dir_path,
                  track_type,
                  target_samples,
                  conn_model,
                  network,
                  node_size,
                  dens_thresh,
                  ID,
                  roi,
                  min_span_tree,
                  disp_filt,
                  parc,
                  prune,
                  atlas,
                  uatlas,
                  labels,
                  coords,
                  norm,
                  binary,
                  directget,
                  warped_fa,
                  error_margin,
                  min_length,
                  fa_wei=True):
    '''
    Use tracked streamlines as a basis for estimating a structural connectome.

    Parameters
    ----------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    overlap_thr : int
        Number of voxels for which a given streamline must intersect with an ROI
        for an edge to be counted.
    dir_path : str
        Path to directory containing subject derivative data for a given pynets run.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    warped_fa : str
        File path to MNI-space warped FA Nifti1Image.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection to an ROI. Default is 2 voxels.
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.
    fa_wei :  bool
        Scale streamline count edges by fractional anistropy (FA). Default is False.

    Returns
    -------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    conn_matrix : array
        Adjacency matrix stored as an m x n array of nodes and edges.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic),
        closest (clos), boot (bootstrapped), and prob (probabilistic).
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.

    References
    ----------
    .. [1] Sporns, O., Tononi, G., & Kötter, R. (2005). The human connectome:
      A structural description of the human brain. PLoS Computational Biology.
      https://doi.org/10.1371/journal.pcbi.0010042
    .. [2] Sotiropoulos, S. N., & Zalesky, A. (2019). Building connectomes
      using diffusion MRI: why, how and but. NMR in Biomedicine.
      https://doi.org/10.1002/nbm.3752
    .. [3] Chung, M. K., Hanson, J. L., Adluru, N., Alexander, A. L., Davidson,
      R. J., & Pollak, S. D. (2017). Integrative Structural Brain Network
      Analysis in Diffusion Tensor Imaging. Brain Connectivity.
      https://doi.org/10.1089/brain.2016.0481

    '''
    import gc
    import time
    from dipy.tracking.streamline import Streamlines, values_from_volume
    from dipy.tracking._utils import (_mapping_to_voxel, _to_voxel_coordinates)
    import networkx as nx
    from itertools import combinations
    from collections import defaultdict
    from pynets.core import utils, nodemaker
    from pynets.dmri.dmri_utils import generate_sl
    from dipy.io.streamline import load_tractogram
    from dipy.io.stateful_tractogram import Space, Origin

    start = time.time()

    # Load parcellation
    roi_img = nib.load(atlas_mni)
    atlas_data = np.around(np.asarray(roi_img.dataobj))
    roi_zooms = roi_img.header.get_zooms()
    roi_shape = roi_img.shape

    # Read Streamlines
    streamlines = [
        i.astype(np.float32) for i in Streamlines(
            load_tractogram(streams,
                            roi_img,
                            to_space=Space.RASMM,
                            to_origin=Origin.TRACKVIS,
                            bbox_valid_check=False).streamlines)
    ]
    roi_img.uncache()

    if fa_wei is True:
        fa_weights = values_from_volume(
            np.asarray(nib.load(warped_fa).dataobj), streamlines, np.eye(4))
        global_fa_weights = list(utils.flatten(fa_weights))
        min_global_fa_wei = min(i for i in global_fa_weights if i > 0)
        max_global_fa_wei = max(global_fa_weights)
        fa_weights_norm = []
        # Here we normalize by global FA
        for val_list in fa_weights:
            fa_weights_norm.append(
                np.nanmean((val_list - min_global_fa_wei) /
                           (max_global_fa_wei - min_global_fa_wei)))

    # Make streamlines into generators to keep memory at a minimum
    sl = [generate_sl(i) for i in streamlines]
    del streamlines

    # Instantiate empty networkX graph object & dictionary and create voxel-affine mapping
    lin_T, offset = _mapping_to_voxel(np.eye(4))
    mx = len(np.unique(atlas_data.astype('uint16'))) - 1
    g = nx.Graph(ecount=0, vcount=mx)
    edge_dict = defaultdict(int)
    node_dict = dict(
        zip(np.unique(atlas_data.astype('uint16'))[1:],
            np.arange(mx) + 1))

    # Add empty vertices
    for node in range(1, mx + 1):
        g.add_node(node)

    # Build graph
    ix = 0
    bad_idxs = []
    for s in sl:
        # Map the streamlines coordinates to voxel coordinates and get labels for label_volume
        vox_coords = _to_voxel_coordinates(Streamlines(s), lin_T, offset)
        lab_coords = [
            nodemaker.get_sphere(coord, error_margin, roi_zooms, roi_shape)
            for coord in vox_coords
        ]
        [i, j, k] = np.vstack(np.array(lab_coords)).T

        # get labels for label_volume
        lab_arr = atlas_data[i, j, k]
        endlabels = []
        for ix, lab in enumerate(np.unique(lab_arr).astype('uint32')):
            if (lab > 0) and (np.sum(lab_arr == lab) >= overlap_thr):
                try:
                    endlabels.append(node_dict[lab])
                except:
                    bad_idxs.append(ix)
                    print(
                        f"Label {lab} missing from parcellation. Check registration and ensure valid input "
                        f"parcellation file.")

        edges = combinations(endlabels, 2)
        for edge in edges:
            lst = tuple([int(node) for node in edge])
            edge_dict[tuple(sorted(lst))] += 1

        edge_list = [(k[0], k[1], v) for k, v in edge_dict.items()]

        if fa_wei is True:
            # Add edgelist to g, weighted by average fa of the streamline
            g.add_weighted_edges_from(edge_list, weight=fa_weights_norm[ix])
        else:
            g.add_weighted_edges_from(edge_list)
        ix = ix + 1

        del lab_coords, lab_arr, endlabels, edges, edge_list

    gc.collect()

    if fa_wei is True:
        # Add average fa weights to streamline counts
        for u, v in list(g.edges):
            h = g.get_edge_data(u, v)
            edge_att_dict = {}
            for e, w in h.items():
                if w not in edge_att_dict.keys():
                    edge_att_dict[w] = []
                else:
                    edge_att_dict[w].append(e)
            for key in edge_att_dict.keys():
                edge_att_dict[key] = np.nanmean(edge_att_dict[key])
            vals = []
            for e2, w2 in edge_att_dict.items():
                vals.append(float(e2) * float(w2))
            g.edges[u, v].update({'weight': np.nanmean(vals)})

    # Convert to numpy matrix
    conn_matrix_raw = nx.to_numpy_array(g)

    # Impose symmetry
    conn_matrix = np.maximum(conn_matrix_raw, conn_matrix_raw.T)

    print('Graph Building Complete:\n', str(time.time() - start))

    if len(bad_idxs) > 0:
        bad_idxs = sorted(list(set(bad_idxs)), reverse=True)
        for j in bad_idxs:
            del labels[j], coords[j]

    coords = np.array(coords)
    labels = np.array(labels)

    return (atlas_mni, streams, conn_matrix, track_type, target_samples,
            dir_path, conn_model, network, node_size, dens_thresh, ID, roi,
            min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels,
            coords, norm, binary, directget, min_length)