Ejemplo n.º 1
0
def test_deform_streamlines():
    # Create Random deformation field
    deformation_field = np.random.randn(200, 200, 200, 3)
    # Specify stream2grid and grid2world
    stream2grid = np.array([[np.random.randn(1)[0], 0, 0, 0],
                            [0, np.random.randn(1)[0], 0, 0],
                            [0, 0, np.random.randn(1)[0], 0],
                            [0, 0, 0, 1]])
    grid2world = np.array([[np.random.randn(1)[0], 0, 0, 0],
                           [0, np.random.randn(1)[0], 0, 0],
                           [0, 0, np.random.randn(1)[0], 0],
                           [0, 0, 0, 1]])
    stream2world = np.dot(stream2grid, grid2world)

    # Deform streamlines (let two grid spaces be the same for simplicity)
    new_streamlines = deform_streamlines(streamlines,
                                         deformation_field,
                                         stream2grid,
                                         grid2world,
                                         stream2grid,
                                         grid2world)

    # Interpolate displacements onto original streamlines
    streamlines_in_grid = transform_streamlines(streamlines, stream2grid)
    disps = values_from_volume(deformation_field, streamlines_in_grid)

    # Put new_streamlines into world space
    new_streamlines_world = transform_streamlines(new_streamlines,
                                                  stream2world)

    # Subtract disps from new_streamlines in world space
    orig_streamlines_world = list(np.subtract(new_streamlines_world, disps))

    # Put orig_streamlines_world into voxmm
    orig_streamlines = transform_streamlines(orig_streamlines_world,
                                             np.linalg.inv(stream2world))
    # All close because of floating pt inprecision
    for o, s in zip(orig_streamlines, streamlines):
        assert_allclose(s, o, rtol=1e-10, atol=0)
Ejemplo n.º 2
0
def calculate_tract_profile(img, streamlines, affine=None, n_points=100,
                            weights=None):
    """

    Parameters
    ----------
    img : 3D volume

    streamlines : list of arrays, or array

    weights : 1D array or 2D array (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant).

    """
    if (isinstance(streamlines, list) or
            isinstance(streamlines, dts.Streamlines)):
        # Resample each streamline to the same number of points
        # list => np.array
        # Setting the number of points should happen in a streamline template
        # space, rather than in the subject native space, but for now we do
        # everything as in the Matlab version -- in native space.
        # In the future, an SLR object can be passed here, and then it would
        # move these streamlines into the template space before resampling...
        fgarray = _resample_bundle(streamlines, n_points)
    else:
        fgarray = streamlines
    # ...and move them back to native space before indexing into the volume:
    values = dts.values_from_volume(img, fgarray, affine=affine)

    # We assume that weights *always sum to 1 across streamlines*:
    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]

    tract_profile = np.sum(weights * values, 0)
    return tract_profile
Ejemplo n.º 3
0
def afq_profile(data, bundle, affine, n_points=100,
                orient_by=None, weights=None, **weights_kwarg):
    """
    Calculates a summarized profile of data for a bundle or tract
    along its length.

    Follows the approach outlined in [Yeatman2012]_.

    Parameters
    ----------
    data : 3D volume
        The statistic to sample with the streamlines.

    bundle : StreamLines class instance
        The collection of streamlines (possibly already resampled into an array
         for each to have the same length) with which we are resampling. See
         Note below about orienting the streamlines.
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
        The voxel_to_rasmm matrix, typically from a NIFTI file.
    n_points: int, optional
        The number of points to sample along the bundle. Default: 100.
    orient_by: streamline, optional.
        A streamline to use as a standard to orient all of the streamlines in
        the bundle according to.
    weights : 1D array or 2D array or callable (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant). If callable, this is a function that calculates weights.
    weights_kwarg : key-word arguments
        Additional key-word arguments to pass to the weight-calculating
        function. Only to be used if weights is a callable.

    Returns
    -------
    ndarray : a 1D array with the profile of `data` along the length of
        `bundle`

    Notes
    -----
    Before providing a bundle as input to this function, you will need to make
    sure that the streamlines in the bundle are all oriented in the same
    orientation relative to the bundle (use :func:`orient_by_streamline`).

    References
    ----------
    .. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty,
       Nathaniel J. Myall, Brian A. Wandell, and Heidi M. Feldman. 2012.
       "Tract Profiles of White Matter Properties: Automating Fiber-Tract
       Quantification" PloS One 7 (11): e49790.

    """
    if orient_by is not None:
        bundle = orient_by_streamline(bundle, orient_by)
    if affine is None:
        affine = np.eye(4)
    if len(bundle) == 0:
        raise ValueError("The bundle contains no streamlines")

    # Resample each streamline to the same number of points:
    fgarray = set_number_of_points(bundle, n_points)

    # Extract the values
    values = np.array(values_from_volume(data, fgarray, affine))

    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]
    elif callable(weights):
        weights = weights(bundle, **weights_kwarg)
    else:
        # We check that weights *always sum to 1 across streamlines*:
        if not np.allclose(np.sum(weights, 0), np.ones(n_points)):
            raise ValueError("The sum of weights across streamlines must ",
                             "be equal to 1")

    return np.sum(weights * values, 0)
Ejemplo n.º 4
0
def streams2graph(atlas_mni,
                  streams,
                  overlap_thr,
                  dir_path,
                  track_type,
                  target_samples,
                  conn_model,
                  network,
                  node_size,
                  dens_thresh,
                  ID,
                  roi,
                  min_span_tree,
                  disp_filt,
                  parc,
                  prune,
                  atlas,
                  uatlas,
                  labels,
                  coords,
                  norm,
                  binary,
                  directget,
                  warped_fa,
                  error_margin,
                  max_length,
                  fa_wei=True):
    '''
    Use tracked streamlines as a basis for estimating a structural connectome.

    Parameters
    ----------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    overlap_thr : int
        Number of voxels for which a given streamline must intersect with an ROI
        for an edge to be counted.
    dir_path : str
        Path to directory containing subject derivative data for a given pynets run.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    warped_fa : str
        File path to MNI-space warped FA Nifti1Image.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection to an ROI. Default is 2 voxels.
    max_length : int
        Maximum fiber length threshold in mm to restrict tracking.
    fa_wei :  bool
        Scale streamline count edges by fractional anistropy (FA). Default is False.

    Returns
    -------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    conn_matrix : array
        Adjacency matrix stored as an m x n array of nodes and edges.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    max_length : int
        Maximum fiber length threshold in mm to restrict tracking.
    '''
    from dipy.tracking.streamline import Streamlines, values_from_volume
    from dipy.tracking._utils import (_mapping_to_voxel, _to_voxel_coordinates)
    import networkx as nx
    from itertools import combinations
    from collections import defaultdict
    from pynets.core import utils, nodemaker
    from dipy.io.streamline import load_tractogram
    from dipy.io.stateful_tractogram import Space, Origin
    import time

    # Load parcellation
    roi_img = nib.load(atlas_mni)
    atlas_data = np.around(roi_img.get_fdata())
    roi_zooms = roi_img.header.get_zooms()
    roi_shape = roi_img.shape

    # Read Streamlines
    streamlines = Streamlines(
        load_tractogram(streams,
                        roi_img,
                        to_space=Space.RASMM,
                        to_origin=Origin.TRACKVIS,
                        bbox_valid_check=False).streamlines)
    roi_img.uncache()

    fa_weights = values_from_volume(
        nib.load(warped_fa).get_fdata(), streamlines, np.eye(4))
    global_fa_weights = list(utils.flatten(fa_weights))
    min_global_fa_wei = min(global_fa_weights)
    max_global_fa_wei = max(global_fa_weights)
    fa_weights_norm = []
    for val_list in fa_weights:
        fa_weights_norm.append((val_list - min_global_fa_wei) /
                               (max_global_fa_wei - min_global_fa_wei))

    # Instantiate empty networkX graph object & dictionary and create voxel-affine mapping
    lin_T, offset = _mapping_to_voxel(np.eye(4))
    mx = len(np.unique(atlas_data.astype('uint16'))) - 1
    g = nx.Graph(ecount=0, vcount=mx)
    edge_dict = defaultdict(int)
    node_dict = dict(
        zip(np.unique(atlas_data.astype('uint16')) + 1,
            np.arange(mx) + 1))

    # Add empty vertices
    for node in range(1, mx + 1):
        g.add_node(node)

    # Build graph
    start_time = time.time()

    ix = 0
    for s in streamlines:
        # Map the streamlines coordinates to voxel coordinates and get labels for label_volume
        i, j, k = np.vstack(
            np.array([
                nodemaker.get_sphere(coord, error_margin, roi_zooms, roi_shape)
                for coord in _to_voxel_coordinates(s, lin_T, offset)
            ])).T

        # get labels for label_volume
        lab_arr = atlas_data[i, j, k]
        endlabels = []
        for lab in np.unique(lab_arr).astype('uint32'):
            if (lab > 0) and (np.sum(lab_arr == lab) >= overlap_thr):
                try:
                    endlabels.append(node_dict[lab])
                except UserWarning:
                    print("%s%s%s" % (
                        'Label ', lab,
                        ' missing from parcellation. Check registration and ensure valid '
                        'input parcellation file.'))

        edges = combinations(endlabels, 2)
        for edge in edges:
            lst = tuple([int(node) for node in edge])
            edge_dict[tuple(sorted(lst))] += 1

        edge_list = [(k[0], k[1], v) for k, v in edge_dict.items()]

        if fa_wei is True:
            # Add edgelist to g, weighted by average fa of the streamline
            g.add_weighted_edges_from(edge_list,
                                      weight=np.nanmean(fa_weights_norm[ix]))
        else:
            g.add_weighted_edges_from(edge_list)
        ix = ix + 1

    print("%s%s%s" % ('Graph construction runtime: ',
                      np.round(time.time() - start_time, 1), 's'))
    del streamlines

    if fa_wei is True:
        # Add average fa weights to streamline counts
        for u, v in list(g.edges):
            h = g.get_edge_data(u, v)
            edge_att_dict = {}
            for e, w in h.items():
                if w not in edge_att_dict.keys():
                    edge_att_dict[w] = []
                else:
                    edge_att_dict[w].append(e)
            for key in edge_att_dict.keys():
                edge_att_dict[key] = np.nanmean(edge_att_dict[key])
            vals = []
            for e2, w2 in edge_att_dict.items():
                vals.append(float(e2) * float(w2))
            g.edges[u, v].update({'weight': np.nanmean(vals)})

    # Convert to numpy matrix
    conn_matrix_raw = nx.to_numpy_matrix(g)

    # Enforce symmetry
    conn_matrix = np.maximum(conn_matrix_raw, conn_matrix_raw.T)

    return atlas_mni, streams, conn_matrix, track_type, target_samples, dir_path, conn_model, network, node_size, dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels, coords, norm, binary, directget, max_length
Ejemplo n.º 5
0
def weighting_streamlines(
    out_folder_name,
    streamlines,
    bvec_file,
    weight_by="1.5_2_AxPasi5",
    hue=[0.0, 1.0],
    saturation=[0.0, 1.0],
    scale=[2, 7],
    fig_type="",
):
    """
    weight_by = '1.5_2_AxPasi5'
    hue = [0.0,1.0]
    saturation = [0.0,1.0]
    scale = [3,6]
    """
    from dipy.viz import window, actor
    from dipy.tracking.streamline import values_from_volume

    weight_by_data, affine = load_weight_by_img(bvec_file, weight_by)

    stream = list(streamlines)
    vol_per_tract = values_from_volume(weight_by_data, stream, affine=affine)

    pfr_data = load_weight_by_img(bvec_file, "1.5_2_AxFr5")[0]

    pfr_per_tract = values_from_volume(pfr_data, stream, affine=affine)

    # Leave out from the calculation of mean value per tract, a chosen quantile:
    vol_vec = weight_by_data.flatten()
    q = np.quantile(vol_vec[vol_vec > 0], 0.95)
    mean_vol_per_tract = []
    for s, pfr in zip(vol_per_tract, pfr_per_tract):
        s = np.asanyarray(s)
        non_out = [s < q]
        pfr = np.asanyarray(pfr)
        high_pfr = [pfr > 0.5]
        mean_vol_per_tract.append(np.nanmean(s[tuple(non_out and high_pfr)]))

    lut_cmap = actor.colormap_lookup_table(hue_range=hue,
                                           saturation_range=saturation,
                                           scale_range=scale)
    streamlines_actor = actor.line(streamlines,
                                   mean_vol_per_tract,
                                   linewidth=1,
                                   lookup_colormap=lut_cmap)
    bar = actor.scalar_bar(lut_cmap)
    r = window.Renderer()
    r.add(streamlines_actor)
    r.add(bar)
    # mean_pasi_weighted_img = out_folder_name+'\streamlines\mean_pasi_weighted' + fig_type + '.png'
    mean_pasi_weighted_img = f"{out_folder_name}/mean_pasi_weighted{fig_type}.png"
    # window.show(r)
    # r.set_camera(r.camera_info())
    r.set_camera(
        position=(-389.00, 225.24, 62.02),
        focal_point=(1.78, -3.27, -12.65),
        view_up=(0.00, -0.31, 0.95),
    )
    # window.record(r, out_path=mean_pasi_weighted_img, size=(800, 800))
    window.snapshot(r, fname=mean_pasi_weighted_img, size=(800, 800))
Ejemplo n.º 6
0
subj = r'\BeEf_subj7'
stream_file = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5' + subj + '\streamlines' + subj + '.trk'
streams, hdr = load_trk(stream_file)
streamlines = Streamlines(streams)
stream = list(s1)
weight_by = 'pasiS'
folder_name = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5' + subj
file_list = os.listdir(folder_name)
for file in file_list:
    if weight_by in file and file.endswith('.nii'):
        weight_by_file = os.path.join(folder_name, file)
        weight_by_img = nib.load(weight_by_file)
        weight_by_data = weight_by_img.get_data()
        data_affine = weight_by_img.get_affine()
A = values_from_volume(weight_by_data, stream, affine=data_affine)
pasi_stream = []
for i, s in enumerate(A):
    pasi_stream.append(np.mean(s))
scale = [0, 100]
saturation = [0.0, 1.0]
hue = [0, 1]

lut_cmap = actor.colormap_lookup_table(hue_range=hue,
                                       saturation_range=saturation,
                                       scale_range=scale)
streamlines_actor = actor.line(streamlines,
                               pasi_stream,
                               linewidth=0.1,
                               lookup_colormap=lut_cmap)
bar = actor.scalar_bar(lut_cmap)
Ejemplo n.º 7
0
    def segment_afq(self, tg=None):
        """
        Assign streamlines to bundles using the waypoint ROI approach
        Parameters
        ----------
        tg : StatefulTractogram class instance
        """
        tg = self._read_tg(tg=tg)
        self.tg.to_vox()

        # For expedience, we approximate each streamline as a 100 point curve.
        # This is only used in extracting the values from the probability map,
        # so will not affect measurement of distance from the waypoint ROIs
        fgarray = np.array(_resample_tg(tg, 100))
        n_streamlines = fgarray.shape[0]

        streamlines_in_bundles = np.zeros(
            (n_streamlines, len(self.bundle_dict)))
        min_dist_coords = np.zeros((n_streamlines, len(self.bundle_dict), 2),
                                   dtype=int)
        self.fiber_groups = {}

        if self.return_idx:
            out_idx = np.arange(n_streamlines, dtype=int)

        if self.filter_by_endpoints:
            aal_atlas = afd.read_aal_atlas(self.reg_template)
            if self.save_intermediates is not None:
                nib.save(
                    aal_atlas['atlas'],
                    op.join(self.save_intermediates,
                            'AAL_registered_to_template.nii.gz'))

            aal_atlas = aal_atlas['atlas'].get_fdata()
            # We need to calculate the size of a voxel, so we can transform
            # from mm to voxel units:
            R = self.img_affine[0:3, 0:3]
            vox_dim = np.mean(np.diag(np.linalg.cholesky(R.T.dot(R))))
            dist_to_aal = self.dist_to_aal / vox_dim

        self.logger.info("Assigning Streamlines to Bundles")
        # Tolerance is set to the square of the distance to the corner
        # because we are using the squared Euclidean distance in calls to
        # `cdist` to make those calls faster.
        tol = dts.dist_to_corner(self.img_affine)**2
        for bundle_idx, bundle in enumerate(self.bundle_dict):
            self.logger.info(f"Finding Streamlines for {bundle}")
            warped_prob_map, include_roi, exclude_roi = \
                self._get_bundle_info(bundle_idx, bundle)
            if self.save_intermediates is not None:
                nib.save(
                    nib.Nifti1Image(warped_prob_map.astype(np.float32),
                                    self.img_affine),
                    op.join(self.save_intermediates, 'warpedprobmap', bundle,
                            'as_used.nii.gz'))

            fiber_probabilities = dts.values_from_volume(
                warped_prob_map, fgarray, np.eye(4))
            fiber_probabilities = np.mean(fiber_probabilities, -1)
            idx_above_prob = np.where(
                fiber_probabilities > self.prob_threshold)
            self.logger.info((f"{len(idx_above_prob[0])} streamlines exceed"
                              " the probability threshold."))
            crosses_midline = self.bundle_dict[bundle]['cross_midline']
            for sl_idx in tqdm(idx_above_prob[0]):
                sl = tg.streamlines[sl_idx]
                if fiber_probabilities[sl_idx] > self.prob_threshold:
                    if crosses_midline is not None:
                        if self.crosses[sl_idx]:
                            # This means that the streamline does
                            # cross the midline:
                            if crosses_midline:
                                # This is what we want, keep going
                                pass
                            else:
                                # This is not what we want,
                                # skip to next streamline
                                continue

                    is_close, dist = \
                        self._check_sl_with_inclusion(sl,
                                                      include_roi,
                                                      tol)
                    if is_close:
                        is_far = \
                            self._check_sl_with_exclusion(sl,
                                                          exclude_roi,
                                                          tol)
                        if is_far:
                            min_dist_coords[sl_idx, bundle_idx, 0] =\
                                np.argmin(dist[0], 0)[0]
                            min_dist_coords[sl_idx, bundle_idx, 1] =\
                                np.argmin(dist[1], 0)[0]
                            streamlines_in_bundles[sl_idx, bundle_idx] =\
                                fiber_probabilities[sl_idx]
            self.logger.info(
                (f"{np.sum(streamlines_in_bundles[:, bundle_idx] > 0)} "
                 "streamlines selected with waypoint ROIs"))

        # Eliminate any fibers not selected using the waypoint ROIs:
        possible_fibers = np.sum(streamlines_in_bundles, -1) > 0
        tg = StatefulTractogram(tg.streamlines[possible_fibers], self.img,
                                Space.VOX)
        if self.return_idx:
            out_idx = out_idx[possible_fibers]

        streamlines_in_bundles = streamlines_in_bundles[possible_fibers]
        min_dist_coords = min_dist_coords[possible_fibers]
        bundle_choice = np.argmax(streamlines_in_bundles, -1)

        # We do another round through, so that we can orient all the
        # streamlines within a bundle in the same orientation with respect to
        # the ROIs. This order is ARBITRARY but CONSISTENT (going from ROI0
        # to ROI1).
        self.logger.info("Re-orienting streamlines to consistent directions")
        for bundle_idx, bundle in enumerate(self.bundle_dict):
            self.logger.info(f"Processing {bundle}")

            select_idx = np.where(bundle_choice == bundle_idx)

            if len(select_idx[0]) == 0:
                # There's nothing here, set and move to the next bundle:
                self._return_empty(bundle)
                continue

            # Use a list here, because ArraySequence doesn't support item
            # assignment:
            select_sl = list(tg.streamlines[select_idx])
            # Sub-sample min_dist_coords:
            min_dist_coords_bundle = min_dist_coords[select_idx]
            for idx in range(len(select_sl)):
                min0 = min_dist_coords_bundle[idx, bundle_idx, 0]
                min1 = min_dist_coords_bundle[idx, bundle_idx, 1]
                if min0 > min1:
                    select_sl[idx] = select_sl[idx][::-1]

            if self.filter_by_endpoints:
                self.logger.info("Filtering by endpoints")
                # Create binary masks and warp these into subject's DWI space:
                aal_targets = afd.bundles_to_aal([bundle], atlas=aal_atlas)[0]
                aal_idx = []
                for targ in aal_targets:
                    if targ is not None:
                        aal_roi = np.zeros(aal_atlas.shape[:3])
                        aal_roi[targ[:, 0], targ[:, 1], targ[:, 2]] = 1
                        warped_roi = self.mapping.transform_inverse(
                            aal_roi, interpolation='nearest')
                        aal_idx.append(np.array(np.where(warped_roi > 0)).T)
                    else:
                        aal_idx.append(None)

                self.logger.info("Before filtering "
                                 f"{len(select_sl)} streamlines")

                new_select_sl = clean_by_endpoints(select_sl,
                                                   aal_idx[0],
                                                   aal_idx[1],
                                                   tol=dist_to_aal,
                                                   return_idx=self.return_idx)
                # Generate immediately:
                new_select_sl = list(new_select_sl)

                # We need to check this again:
                if len(new_select_sl) == 0:
                    # There's nothing here, set and move to the next bundle:
                    self._return_empty(bundle)
                    continue

                if self.return_idx:
                    temp_select_sl = []
                    temp_select_idx = np.empty(len(new_select_sl), int)
                    for ii, ss in enumerate(new_select_sl):
                        temp_select_sl.append(ss[0])
                        temp_select_idx[ii] = ss[1]
                    select_idx = select_idx[0][temp_select_idx]
                    new_select_sl = temp_select_sl

                select_sl = new_select_sl
                self.logger.info("After filtering "
                                 f"{len(select_sl)} streamlines")

            if self.clip_edges:
                self.logger.info("Clipping Streamlines by ROI")
                for idx in range(len(select_sl)):
                    min0 = min_dist_coords_bundle[idx, bundle_idx, 0]
                    min1 = min_dist_coords_bundle[idx, bundle_idx, 1]

                    # If the point that is closest to the first ROI
                    # is the same as the point closest to the second ROI,
                    # include the surrounding points to make a streamline.
                    if min0 == min1:
                        min1 = min1 + 1
                        min0 = min0 - 1

                    select_sl[idx] = select_sl[idx][min0:min1]

            select_sl = StatefulTractogram(select_sl, self.img, Space.RASMM)

            if self.return_idx:
                self.fiber_groups[bundle] = {}
                self.fiber_groups[bundle]['sl'] = select_sl
                self.fiber_groups[bundle]['idx'] = out_idx[select_idx]
            else:
                self.fiber_groups[bundle] = select_sl
        return self.fiber_groups
Ejemplo n.º 8
0
def segment(fdata, fbval, fbvec, streamlines, bundles,
            reg_template=None, mapping=None, prob_threshold=0,
            **reg_kwargs):
    """
    Segment streamlines into bundles based on inclusion ROIs.

    Parameters
    ----------
    fdata, fbval, fbvec : str
        Full path to data, bvals, bvecs

    streamlines : list of 2D arrays
        Each array is a streamline, shape (3, N).

    bundles: dict
        The format is something like::

            {'name': {'ROIs':[img1, img2],
            'rules':[True, True]},
            'prob_map': img3,
            'cross_midline': False}

    reg_template : str or nib.Nifti1Image, optional.
        Template to use for registration (defaults to the MNI T2)

    mapping : DiffeomorphicMap object, str or nib.Nifti1Image, optional
        A mapping between DWI space and a template. Defaults to generate
        this.

    prob_threshold : float.
        Initial cleaning of fiber groups is done using probability maps from
        [Hua2008]_. Here, we choose an average probability that needs to be
        exceeded for an individual streamline to be retained. Default: 0.

    References
    ----------
    .. [Hua2008] Hua K, Zhang J, Wakana S, Jiang H, Li X, et al. (2008)
       Tract probability maps in stereotaxic spaces: analyses of white
       matter anatomy and tract-specific quantification. Neuroimage 39:
       336-347
    """
    img, _, gtab, _ = ut.prepare_data(fdata, fbval, fbvec)
    tol = dts.dist_to_corner(img.affine)

    xform_sl = dts.Streamlines(dtu.move_streamlines(streamlines,
                                                    np.linalg.inv(img.affine)))

    if reg_template is None:
        reg_template = dpd.read_mni_template()

    if mapping is None:
        mapping = reg.syn_register_dwi(fdata, gtab, template=reg_template,
                                       **reg_kwargs)

    if isinstance(mapping, str) or isinstance(mapping, nib.Nifti1Image):
        mapping = reg.read_mapping(mapping, img, reg_template)

    fiber_probabilities = np.zeros((len(xform_sl), len(bundles)))

    # For expedience, we approximate each streamline as a 100 point curve:
    fgarray = _resample_bundle(xform_sl, 100)
    streamlines_in_bundles = np.zeros((len(xform_sl), len(bundles)))
    min_dist_coords = np.zeros((len(xform_sl), len(bundles), 2))

    fiber_groups = {}

    for bundle_idx, bundle in enumerate(bundles):
        # Get the ROI coordinates:
        ROI0 = bundles[bundle]['ROIs'][0]
        ROI1 = bundles[bundle]['ROIs'][1]
        if not isinstance(ROI0, np.ndarray):
            ROI0 = ROI0.get_data()

        warped_ROI0 = patch_up_roi(
            mapping.transform_inverse(
                ROI0,
                interpolation='nearest')).astype(bool)

        if not isinstance(ROI1, np.ndarray):
            ROI1 = ROI1.get_data()

        warped_ROI1 = patch_up_roi(
            mapping.transform_inverse(
                ROI1,
                interpolation='nearest')).astype(bool)

        roi_coords0 = np.array(np.where(warped_ROI0)).T
        roi_coords1 = np.array(np.where(warped_ROI1)).T

        crosses_midline = bundles[bundle]['cross_midline']

        # The probability map if doesn't exist is all ones with the same
        # shape as the ROIs:
        prob_map = bundles[bundle].get('prob_map', np.ones(ROI0.shape))
        if not isinstance(prob_map, np.ndarray):
            prob_map = prob_map.get_data()
        warped_prob_map = mapping.transform_inverse(prob_map,
                                                    interpolation='nearest')
        fiber_probabilities = dts.values_from_volume(warped_prob_map,
                                                     fgarray)
        fiber_probabilities = np.mean(fiber_probabilities, -1)

        for sl_idx, sl in enumerate(xform_sl):
            if fiber_probabilities[sl_idx] > prob_threshold:
                if crosses_midline is not None:
                    if (np.any(sl[:, 0] > img.shape[0] // 2) and
                            np.any(sl[:, 0] < img.shape[0] // 2)):
                        # This means that the streamline does
                        # cross the midline:
                        if crosses_midline:
                            # This is what we want, keep going
                            pass
                        else:
                            # This is not what we want, skip to next streamline
                            continue
                dist0 = cdist(sl, roi_coords0, 'euclidean')
                if np.min(dist0) <= tol:
                    dist1 = cdist(sl, roi_coords1, 'euclidean')
                    if np.min(dist1) <= tol:
                        min_dist_coords[sl_idx, bundle_idx, 0] =\
                            np.argmin(dist0, 0)[0]
                        min_dist_coords[sl_idx, bundle_idx, 1] =\
                            np.argmin(dist1, 0)[0]
                        streamlines_in_bundles[sl_idx, bundle_idx] =\
                            fiber_probabilities[sl_idx]

    # Eliminate any fibers not selected using the plane ROIs:
    possible_fibers = np.sum(streamlines_in_bundles, -1) > 0
    xform_sl = xform_sl[possible_fibers]
    streamlines_in_bundles = streamlines_in_bundles[possible_fibers]
    min_dist_coords = min_dist_coords[possible_fibers]
    bundle_choice = np.argmax(streamlines_in_bundles, -1)

    for bundle_idx, bundle in enumerate(bundles):
        print(bundle)
        select_idx = np.where(bundle_choice == bundle_idx)
        # Use a list here, because Streamlines don't support item assignment:
        select_sl = list(xform_sl[select_idx])
        # Sub-sample min_dist_coords:
        min_dist_coords_bundle = min_dist_coords[select_idx]
        if len(select_sl) == 0:
            fiber_groups[bundle] = dts.Streamlines([])
            # There's nothing here, move to the next bundle:
            continue

        for idx in range(len(select_sl)):
            min0 = min_dist_coords_bundle[idx, bundle_idx, 0]
            min1 = min_dist_coords_bundle[idx, bundle_idx, 1]
            if min0 > min1:
                select_sl[idx] = select_sl[idx][::-1]
        # We'll set this to Streamlines object for the next steps (e.g.,
        # cleaning) because these objects support indexing with arrays:
        select_sl = dts.Streamlines(select_sl)
        fiber_groups[bundle] = select_sl

    return fiber_groups
Ejemplo n.º 9
0
else:
    mapping = reg.read_mapping('./mapping.nii.gz', img, MNI_T2_img)

bundle_names = ["CST", "UF", "CC_ForcepsMajor", "CC_ForcepsMinor"]
bundles = api.make_bundle_dict(bundle_names=bundle_names, seg_algo="reco")

print("Tracking...")
if not op.exists('dti_streamlines_reco.trk'):
    seed_roi = np.zeros(img.shape[:-1])
    for bundle in bundles:
        if bundle != 'whole_brain':
            sl_xform = dts.Streamlines(
                dtu.transform_tracking_output(bundles[bundle]['sl'],
                                              MNI_T2_img.affine))

            delta = dts.values_from_volume(mapping.backward, sl_xform,
                                           np.eye(4))
            sl_xform = [sum(d, s) for d, s in zip(delta, sl_xform)]

            sl_xform = dts.Streamlines(
                dtu.transform_tracking_output(sl_xform,
                                              np.linalg.inv(
                                                  MNI_T2_img.affine)))

            sft = StatefulTractogram(sl_xform, img, Space.RASMM)
            save_tractogram(sft, f'./{bundle}_atlas.trk')

            sl_xform = dts.Streamlines(
                dtu.transform_tracking_output(sl_xform,
                                              np.linalg.inv(img.affine)))

            for sl in sl_xform:
Ejemplo n.º 10
0
def streams2graph(atlas_for_streams, streams, dir_path, track_type,
                  target_samples, conn_model, network, node_size, dens_thresh,
                  ID, roi, min_span_tree, disp_filt, parc, prune, atlas,
                  uatlas, labels, coords, norm, binary, directget, warped_fa,
                  min_length, error_margin):
    """
    Use tracked streamlines as a basis for estimating a structural connectome.

    Parameters
    ----------
    atlas_for_streams : str
        File path to atlas parcellation Nifti1Image in T1w-conformed space.
    streams : str
        File path to streamline array sequence in .trk format.
    dir_path : str
        Path to directory containing subject derivative data for a given
        pynets run.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based
        centroids are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are:
        det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    warped_fa : str
        File path to MNI-space warped FA Nifti1Image.
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection
         to an ROI. Default is 2 voxels.

    Returns
    -------
    atlas_for_streams : str
        File path to atlas parcellation Nifti1Image in T1w-conformed space.
    streams : str
        File path to streamline array sequence in .trk format.
    conn_matrix : array
        Adjacency matrix stored as an m x n array of nodes and edges.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based
        centroids are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic),
        closest (clos), boot (bootstrapped), and prob (probabilistic).
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection
         to an ROI. Default is 2 voxels.

    References
    ----------
    .. [1] Sporns, O., Tononi, G., & Kötter, R. (2005). The human connectome:
      A structural description of the human brain. PLoS Computational Biology.
      https://doi.org/10.1371/journal.pcbi.0010042
    .. [2] Sotiropoulos, S. N., & Zalesky, A. (2019). Building connectomes
      using diffusion MRI: why, how and but. NMR in Biomedicine.
      https://doi.org/10.1002/nbm.3752
    .. [3] Chung, M. K., Hanson, J. L., Adluru, N., Alexander, A. L., Davidson,
      R. J., & Pollak, S. D. (2017). Integrative Structural Brain Network
      Analysis in Diffusion Tensor Imaging. Brain Connectivity.
      https://doi.org/10.1089/brain.2016.0481
    """
    import gc
    import os
    import time
    from dipy.tracking.streamline import Streamlines, values_from_volume
    from dipy.tracking._utils import _mapping_to_voxel, _to_voxel_coordinates
    import networkx as nx
    from itertools import combinations
    from collections import defaultdict
    from pynets.core import utils, nodemaker
    from pynets.dmri.utils import generate_sl
    from dipy.io.streamline import load_tractogram
    from dipy.io.stateful_tractogram import Space, Origin
    from pynets.core.utils import load_runconfig

    hardcoded_params = load_runconfig()
    fa_wei = hardcoded_params["StructuralNetworkWeighting"]["fa_weighting"][0]
    fiber_density = hardcoded_params["StructuralNetworkWeighting"][
        "fiber_density"][0]
    overlap_thr = hardcoded_params["StructuralNetworkWeighting"][
        "overlap_thr"][0]
    roi_neighborhood_tol = \
        hardcoded_params['tracking']["roi_neighborhood_tol"][0]

    start = time.time()

    if float(roi_neighborhood_tol) <= float(error_margin):
        raise ValueError('roi_neighborhood_tol preset cannot be less than '
                         'the value of the structural connectome error'
                         '_margin parameter.')
    else:
        print(f"Using fiber-roi intersection tolerance: {error_margin}...")

    # Load FA
    fa_img = nib.load(warped_fa)

    # Load parcellation
    roi_img = nib.load(atlas_for_streams)
    atlas_data = np.around(np.asarray(roi_img.dataobj))
    roi_zooms = roi_img.header.get_zooms()
    roi_shape = roi_img.shape

    # Read Streamlines
    if streams is not None:
        streamlines = [
            i.astype(np.float32) for i in Streamlines(
                load_tractogram(streams,
                                fa_img,
                                to_origin=Origin.NIFTI,
                                to_space=Space.VOXMM).streamlines)
        ]

        # from fury import actor, window
        # renderer = window.Renderer()
        # template_actor = actor.contour_from_roi(roi_img.get_fdata(),
        #                                         color=(50, 50, 50),
        #                                         opacity=0.05)
        # renderer.add(template_actor)
        # lines_actor = actor.streamtube(streamlines, window.colors.orange,
        #                                linewidth=0.3)
        # renderer.add(lines_actor)
        # window.show(renderer)

        roi_img.uncache()

        if fa_wei is True:
            fa_weights = values_from_volume(
                np.asarray(fa_img.dataobj, dtype=np.float32), streamlines,
                np.eye(4))
            global_fa_weights = list(utils.flatten(fa_weights))
            min_global_fa_wei = min([i for i in global_fa_weights if i > 0])
            max_global_fa_wei = max(global_fa_weights)
            fa_weights_norm = []
            # Here we normalize by global FA
            for val_list in fa_weights:
                fa_weights_norm.append(
                    np.nanmean((val_list - min_global_fa_wei) /
                               (max_global_fa_wei - min_global_fa_wei)))

        # Make streamlines into generators to keep memory at a minimum
        total_streamlines = len(streamlines)
        sl = [generate_sl(i) for i in streamlines]
        del streamlines

        # Instantiate empty networkX graph object & dictionary and create
        # voxel-affine mapping
        lin_T, offset = _mapping_to_voxel(np.eye(4))
        mx = len(np.unique(atlas_data.astype("uint16"))) - 1
        g = nx.Graph(ecount=0, vcount=mx)
        edge_dict = defaultdict(int)
        node_dict = dict(
            zip(np.unique(atlas_data.astype("uint16"))[1:],
                np.arange(mx) + 1))

        # Add empty vertices with label volume attributes
        for node in range(1, mx + 1):
            g.add_node(node,
                       roi_volume=np.sum(atlas_data.astype("uint16") == node))

        # Build graph
        pc = 0
        bad_idxs = []
        fiberlengths = {}
        fa_weights_dict = {}
        print(f"Quantifying fiber-ROI intersection for {atlas}:")
        for ix, s in enumerate(sl):
            # Percent counter
            pcN = int(round(100 * float(ix / total_streamlines)))
            if pcN % 10 == 0 and ix > 0 and pcN > pc:
                pc = pcN
                print(f"{pcN}%")

            # Map the streamlines coordinates to voxel coordinates and get
            # labels for label_volume
            vox_coords = _to_voxel_coordinates(Streamlines(s), lin_T, offset)

            lab_coords = [
                nodemaker.get_sphere(coord, error_margin, roi_zooms, roi_shape)
                for coord in vox_coords
            ]
            [i, j, k] = np.vstack(np.array(lab_coords)).T

            # get labels for label_volume
            lab_arr = atlas_data[i, j, k]
            # print(lab_arr)
            endlabels = []
            for jx, lab in enumerate(np.unique(lab_arr).astype("uint32")):
                if (lab > 0) and (np.sum(lab_arr == lab) >= overlap_thr):
                    try:
                        endlabels.append(node_dict[lab])
                    except BaseException:
                        bad_idxs.append(jx)
                        print(f"Label {lab} missing from parcellation. Check "
                              f"registration and ensure valid input "
                              f"parcellation file.")

            edges = combinations(endlabels, 2)
            for edge in edges:
                # Get fiber lengths along edge
                if fiber_density is True:
                    if not (edge[0], edge[1]) in fiberlengths.keys():
                        fiberlengths[(edge[0], edge[1])] = [len(vox_coords)]
                    else:
                        fiberlengths[(edge[0],
                                      edge[1])].append(len(vox_coords))

                # Get FA values along edge
                if fa_wei is True:
                    if not (edge[0], edge[1]) in fa_weights_dict.keys():
                        fa_weights_dict[(edge[0],
                                         edge[1])] = [fa_weights_norm[ix]]
                    else:
                        fa_weights_dict[(edge[0],
                                         edge[1])].append(fa_weights_norm[ix])

                lst = tuple([int(node) for node in edge])
                edge_dict[tuple(sorted(lst))] += 1

            edge_list = [(k[0], k[1], count) for k, count in edge_dict.items()]

            g.add_weighted_edges_from(edge_list)

            del lab_coords, lab_arr, endlabels, edges, edge_list

        gc.collect()

        # Add fiber density attributes for each edge
        # Adapted from the nnormalized fiber-density estimation routines of
        # Sebastian Tourbier.
        if fiber_density is True:
            print("Redefining edges on the basis of fiber density...")
            # Summarize total fibers and total label volumes
            total_fibers = 0
            total_volume = 0
            u_start = -1
            for u, v, d in g.edges(data=True):
                total_fibers += len(d)
                if u != u_start:
                    total_volume += g.nodes[int(u)]['roi_volume']
                u_start = u

            ix = 0
            for u, v, d in g.edges(data=True):
                if d['weight'] > 0:
                    edge_fiberlength_mean = np.nanmean(fiberlengths[(u, v)])
                    fiber_density = (float(
                        ((float(d['weight']) / float(total_fibers)) /
                         float(edge_fiberlength_mean)) *
                        ((2.0 * float(total_volume)) /
                         (g.nodes[int(u)]['roi_volume'] +
                          g.nodes[int(v)]['roi_volume'])))) * 1000
                else:
                    fiber_density = 0
                g.edges[u, v].update({"fiber_density": fiber_density})
                ix += 1

        if fa_wei is True:
            print("Re-weighting edges by FA...")
            # Add FA attributes for each edge
            ix = 0
            for u, v, d in g.edges(data=True):
                if d['weight'] > 0:
                    edge_average_fa = np.nanmean(fa_weights_dict[(u, v)])
                else:
                    edge_average_fa = np.nan
                g.edges[u, v].update({"fa_weight": edge_average_fa})
                ix += 1

        # Summarize weights
        if fa_wei is True and fiber_density is True:
            for u, v, d in g.edges(data=True):
                g.edges[u, v].update(
                    {"final_weight": (d['fa_weight']) * d['fiber_density']})
        elif fiber_density is True and fa_wei is False:
            for u, v, d in g.edges(data=True):
                g.edges[u, v].update({"final_weight": d['fiber_density']})
        elif fa_wei is True and fiber_density is False:
            for u, v, d in g.edges(data=True):
                g.edges[u, v].update(
                    {"final_weight": d['fa_weight'] * d['weight']})
        else:
            for u, v, d in g.edges(data=True):
                g.edges[u, v].update({"final_weight": d['weight']})

        # Convert weighted graph to numpy matrix
        conn_matrix_raw = nx.to_numpy_array(g, weight='final_weight')

        # Enforce symmetry
        conn_matrix = np.maximum(conn_matrix_raw, conn_matrix_raw.T)

        print("Structural graph completed:\n", str(time.time() - start))

        if len(bad_idxs) > 0:
            bad_idxs = sorted(list(set(bad_idxs)), reverse=True)
            for j in bad_idxs:
                del labels[j], coords[j]
    else:
        print(
            UserWarning('No valid streamlines detected. '
                        'Proceeding with an empty graph...'))
        mx = len(np.unique(atlas_data.astype("uint16"))) - 1
        conn_matrix = np.zeros((mx, mx))

    assert len(coords) == len(labels) == conn_matrix.shape[0]

    if network is not None:
        atlas_name = f"{atlas}_{network}_stage-rawgraph"
    else:
        atlas_name = f"{atlas}_stage-rawgraph"

    utils.save_coords_and_labels_to_json(coords,
                                         labels,
                                         dir_path,
                                         atlas_name,
                                         indices=None)

    coords = np.array(coords)
    labels = np.array(labels)

    if parc is True:
        node_size = "parc"

    # Save unthresholded
    utils.save_mat(
        conn_matrix,
        utils.create_raw_path_diff(ID, network, conn_model, roi, dir_path,
                                   node_size, target_samples, track_type, parc,
                                   directget, min_length, error_margin),
    )

    return (atlas_for_streams, streams, conn_matrix, track_type,
            target_samples, dir_path, conn_model, network, node_size,
            dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune, atlas,
            uatlas, labels, coords, norm, binary, directget, min_length,
            error_margin)
Ejemplo n.º 11
0
def evaluate_along_streamlines(scalar_img,
                               streamlines,
                               beginnings,
                               nr_points,
                               dilate=0,
                               predicted_peaks=None,
                               affine=None):
    # Runtime:
    # - default:                2.7s (test),    56s (all),      10s (test 4 bundles, 100 points)
    # - map_coordinate order 1: 1.9s (test),    26s (all),       6s (test 4 bundles, 100 points)
    # - map_coordinate order 3: 2.2s (test),    33s (all),
    # - values_from_volume:     2.5s (test),    43s (all),
    # - AFQ:                      ?s (test),     ?s (all),      85s  (test 4 bundles, 100 points)
    # => AFQ a lot slower than others

    streamlines = list(
        transform_streamlines(streamlines, np.linalg.inv(affine)))

    for i in range(dilate):
        beginnings = binary_dilation(beginnings)
    beginnings = beginnings.astype(np.uint8)
    streamlines = _orient_to_same_start_region(streamlines, beginnings)
    if predicted_peaks is not None:
        # scalar img can also be orig peaks
        best_orig_peaks = fiber_utils.get_best_original_peaks(
            predicted_peaks, scalar_img, peak_len_thr=0.00001)
        scalar_img = np.linalg.norm(best_orig_peaks, axis=-1)

    algorithm = "distance_map"  # equal_dist | distance_map | cutting_plane | afq

    if algorithm == "equal_dist":
        ### Sampling ###
        streamlines = fiber_utils.resample_fibers(streamlines,
                                                  nb_points=nr_points)
        values = map_coordinates(scalar_img, np.array(streamlines).T, order=1)
        ### Aggregation ###
        values_mean = np.array(values).mean(axis=1)
        values_std = np.array(values).std(axis=1)
        return values_mean, values_std

    if algorithm == "distance_map":  # cKDTree

        ### Sampling ###
        streamlines = fiber_utils.resample_fibers(streamlines,
                                                  nb_points=nr_points)
        values = map_coordinates(scalar_img, np.array(streamlines).T, order=1)

        ### Aggregating by cKDTree approach ###
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines)
        centroids = Streamlines(clusters.centroids)
        if len(centroids) > 1:
            print("WARNING: number clusters > 1 ({})".format(len(centroids)))
        _, segment_idxs = cKDTree(centroids.data, 1,
                                  copy_data=True).query(streamlines,
                                                        k=1)  # (2000, 100)

        values_t = np.array(values).T  # (2000, 100)

        # If we want to take weighted mean like in AFQ:
        # weights = dsa.gaussian_weights(Streamlines(streamlines))
        # values_t = weights * values_t
        # return np.sum(values_t, 0), None

        results_dict = defaultdict(list)
        for idx, sl in enumerate(values_t):
            for jdx, seg in enumerate(sl):
                results_dict[segment_idxs[idx, jdx]].append(seg)

        if len(results_dict.keys()) < nr_points:
            print(
                "WARNING: found less than required points. Filling up with centroid values."
            )
            centroid_values = map_coordinates(scalar_img,
                                              np.array([centroids[0]]).T,
                                              order=1)
            for i in range(nr_points):
                if len(results_dict[i]) == 0:
                    results_dict[i].append(np.array(centroid_values).T[0, i])

        results_mean = []
        results_std = []
        for key in sorted(results_dict.keys()):
            value = results_dict[key]
            if len(value) > 0:
                results_mean.append(np.array(value).mean())
                results_std.append(np.array(value).std())
            else:
                print("WARNING: empty segment")
                results_mean.append(0)
                results_std.append(0)
        return results_mean, results_std

    elif algorithm == "cutting_plane":
        # This will resample all streamline to have equally distant points (resulting in a different number of points
        # in each streamline). Then the "middle" of the tract will be estimated taking the middle element of the
        # centroid (estimated with QuickBundles). Then each streamline the point closest to the "middle" will be
        # calculated and points will be indexed for each streamline starting from the middle. Then averaging across
        # all streamlines will be done by taking the mean for points with same indices.

        ### Sampling ###
        streamlines = fiber_utils.resample_to_same_distance(
            streamlines, max_nr_points=nr_points)
        # map_coordinates does not allow streamlines with different lengths -> use values_from_volume
        values = np.array(
            values_from_volume(scalar_img, streamlines, affine=np.eye(4))).T

        ### Aggregating by Cutting Plane approach ###
        # Resample to all fibers having same number of points -> needed for QuickBundles
        streamlines_resamp = fiber_utils.resample_fibers(streamlines,
                                                         nb_points=nr_points)
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines_resamp)
        centroids = Streamlines(clusters.centroids)

        # index of the middle cluster
        middle_idx = int(nr_points / 2)
        middle_point = centroids[0][middle_idx]
        # For each streamline get idx for the point which is closest to the middle
        segment_idxs = fiber_utils.get_idxs_of_closest_points(
            streamlines, middle_point)

        # Align along the middle and assign indices
        segment_idxs_eqlen = []
        base_idx = 1000  # use higher index to avoid negative numbers for area below middle
        for idx, sl in enumerate(streamlines):
            sl_middle_pos = segment_idxs[idx]
            before_elems = sl_middle_pos
            after_elems = len(sl) - sl_middle_pos
            # indices for one streamline e.g. [998, 999, 1000, 1001, 1002, 1003]; 1000 is middle
            r = range((base_idx - before_elems), (base_idx + after_elems))
            segment_idxs_eqlen.append(r)
        segment_idxs = segment_idxs_eqlen

        # Calcuate maximum number of indices to not result in more indices than nr_points.
        # (this could be case if one streamline is very off-center and therefore has a lot of points only on one
        # side. In this case the values too far out of this streamline will be cut off).
        max_idx = base_idx + int(nr_points / 2)
        min_idx = base_idx - int(nr_points / 2)

        # Group by segment indices
        results_dict = defaultdict(list)
        for idx, sl in enumerate(values):
            for jdx, seg in enumerate(sl):
                current_idx = segment_idxs[idx][jdx]
                if current_idx >= min_idx and current_idx < max_idx:
                    results_dict[current_idx].append(seg)

        # If values missing fill up with centroid values
        if len(results_dict.keys()) < nr_points:
            print(
                "WARNING: found less than required points. Filling up with centroid values."
            )
            centroid_sl = [centroids[0]]
            centroid_sl = np.array(centroid_sl).T
            centroid_values = map_coordinates(scalar_img, centroid_sl, order=1)
            for idx, seg_idx in enumerate(range(min_idx, max_idx)):
                if len(results_dict[seg_idx]) == 0:
                    results_dict[seg_idx].append(
                        np.array(centroid_values).T[0, idx])

        # Aggregate by mean
        results_mean = []
        results_std = []
        for key in sorted(results_dict.keys()):
            value = results_dict[key]
            if len(value) > 0:
                results_mean.append(np.array(value).mean())
                results_std.append(np.array(value).std())
            else:
                print("WARNING: empty segment")
                results_mean.append(0)
                results_std.append(0)
        return results_mean, results_std

    elif algorithm == "afq":
        ### sampling + aggregation ###
        streamlines = fiber_utils.resample_fibers(streamlines,
                                                  nb_points=nr_points)
        streamlines = Streamlines(streamlines)
        weights = dsa.gaussian_weights(streamlines)
        results_mean = dsa.afq_profile(scalar_img,
                                       streamlines,
                                       affine=np.eye(4),
                                       weights=weights)
        results_std = np.zeros(nr_points)
        return results_mean, results_std
Ejemplo n.º 12
0
def segment(fdata, fbval, fbvec, streamlines, bundle_dict, mapping,
            reg_prealign=None, b0_threshold=0, reg_template=None,
            prob_threshold=0):
    """
    Segment streamlines into bundles based on inclusion ROIs.

    Parameters
    ----------
    fdata, fbval, fbvec : str
        Full path to data, bvals, bvecs

    streamlines : list of 2D arrays
        Each array is a streamline, shape (3, N).

    bundle_dict: dict
        The format is something like::

            {'name': {'ROIs':[img1, img2],
            'rules':[True, True]},
            'prob_map': img3,
            'cross_midline': False}

    mapping : a DiffeomorphicMapping object
        Used to align the ROIs to the data.

    reg_template : str or nib.Nifti1Image, optional.
        Template to use for registration (defaults to the MNI T2)

    mapping : DiffeomorphicMap object, str or nib.Nifti1Image, optional
        A mapping between DWI space and a template. Defaults to generate
        this.

    prob_threshold : float.
        Initial cleaning of fiber groups is done using probability maps from
        [Hua2008]_. Here, we choose an average probability that needs to be
        exceeded for an individual streamline to be retained. Default: 0.

    References
    ----------
    .. [Hua2008] Hua K, Zhang J, Wakana S, Jiang H, Li X, et al. (2008)
       Tract probability maps in stereotaxic spaces: analyses of white
       matter anatomy and tract-specific quantification. Neuroimage 39:
       336-347
    """
    img, _, gtab, _ = ut.prepare_data(fdata, fbval, fbvec,
                                      b0_threshold=b0_threshold)

    tol = dts.dist_to_corner(img.affine)

    if reg_template is None:
        reg_template = dpd.read_mni_template()

    # Classify the streamlines and split those that: 1) cross the
    # midline, and 2) pass under 10 mm below the mid-point of their
    # representation in the template space:
    xform_sl, crosses = split_streamlines(streamlines, img)

    if isinstance(mapping, str) or isinstance(mapping, nib.Nifti1Image):
        if reg_prealign is None:
            reg_prealign = np.eye(4)
        mapping = reg.read_mapping(mapping, img, reg_template,
                                   prealign=reg_prealign)

    fiber_probabilities = np.zeros((len(xform_sl), len(bundle_dict)))

    # For expedience, we approximate each streamline as a 100 point curve:
    fgarray = _resample_bundle(xform_sl, 100)
    streamlines_in_bundles = np.zeros((len(xform_sl), len(bundle_dict)))
    min_dist_coords = np.zeros((len(xform_sl), len(bundle_dict), 2))

    fiber_groups = {}

    for bundle_idx, bundle in enumerate(bundle_dict):
        rules = bundle_dict[bundle]['rules']
        include_rois = []
        exclude_rois = []
        for rule_idx, rule in enumerate(rules):
            roi = bundle_dict[bundle]['ROIs'][rule_idx]
            if not isinstance(roi, np.ndarray):
                roi = roi.get_data()
            warped_roi = auv.patch_up_roi(
                (mapping.transform_inverse(
                    roi,
                    interpolation='linear')) > 0)

            if rule:
                # include ROI:
                include_rois.append(np.array(np.where(warped_roi)).T)
            else:
                # Exclude ROI:
                exclude_rois.append(np.array(np.where(warped_roi)).T)

        crosses_midline = bundle_dict[bundle]['cross_midline']

        # The probability map if doesn't exist is all ones with the same
        # shape as the ROIs:
        prob_map = bundle_dict[bundle].get('prob_map', np.ones(roi.shape))

        if not isinstance(prob_map, np.ndarray):
            prob_map = prob_map.get_data()
        warped_prob_map = mapping.transform_inverse(prob_map,
                                                    interpolation='nearest')
        fiber_probabilities = dts.values_from_volume(warped_prob_map,
                                                     fgarray)
        fiber_probabilities = np.mean(fiber_probabilities, -1)

        for sl_idx, sl in enumerate(xform_sl):
            if fiber_probabilities[sl_idx] > prob_threshold:
                if crosses_midline is not None:
                    if crosses[sl_idx]:
                        # This means that the streamline does
                        # cross the midline:
                        if crosses_midline:
                            # This is what we want, keep going
                            pass
                        else:
                            # This is not what we want, skip to next streamline
                            continue

                is_close, dist = _check_sl_with_inclusion(sl, include_rois,
                                                          tol)
                if is_close:
                    is_far = _check_sl_with_exclusion(sl, exclude_rois,
                                                      tol)
                    if is_far:
                        min_dist_coords[sl_idx, bundle_idx, 0] =\
                            np.argmin(dist[0], 0)[0]
                        min_dist_coords[sl_idx, bundle_idx, 1] =\
                            np.argmin(dist[1], 0)[0]
                        streamlines_in_bundles[sl_idx, bundle_idx] =\
                            fiber_probabilities[sl_idx]

    # Eliminate any fibers not selected using the plane ROIs:
    possible_fibers = np.sum(streamlines_in_bundles, -1) > 0
    xform_sl = xform_sl[possible_fibers]
    streamlines_in_bundles = streamlines_in_bundles[possible_fibers]
    min_dist_coords = min_dist_coords[possible_fibers]
    bundle_choice = np.argmax(streamlines_in_bundles, -1)

    # We do another round through, so that we can orient all the
    # streamlines within a bundle in the same orientation with respect to
    # the ROIs. This order is ARBITRARY but CONSISTENT (going from ROI0
    # to ROI1).
    for bundle_idx, bundle in enumerate(bundle_dict):
        select_idx = np.where(bundle_choice == bundle_idx)
        # Use a list here, because Streamlines don't support item assignment:
        select_sl = list(xform_sl[select_idx])
        if len(select_sl) == 0:
            fiber_groups[bundle] = dts.Streamlines([])
            # There's nothing here, move to the next bundle:
            continue

        # Sub-sample min_dist_coords:
        min_dist_coords_bundle = min_dist_coords[select_idx]
        for idx in range(len(select_sl)):
            min0 = min_dist_coords_bundle[idx, bundle_idx, 0]
            min1 = min_dist_coords_bundle[idx, bundle_idx, 1]
            if min0 > min1:
                select_sl[idx] = select_sl[idx][::-1]
        # Set this to nibabel.Streamlines object for output:
        select_sl = dts.Streamlines(select_sl)
        fiber_groups[bundle] = select_sl

    return fiber_groups
Ejemplo n.º 13
0
def streams2graph(atlas_mni, streams, overlap_thr, dir_path, track_type, target_samples, conn_model, network, node_size,
                  dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels, coords, norm,
                  binary, directget, warped_fa, error_margin, min_length, fa_wei=True):
    '''
    Use tracked streamlines as a basis for estimating a structural connectome.

    Parameters
    ----------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    overlap_thr : int
        Number of voxels for which a given streamline must intersect with an ROI
        for an edge to be counted.
    dir_path : str
        Path to directory containing subject derivative data for a given pynets run.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    warped_fa : str
        File path to MNI-space warped FA Nifti1Image.
    error_margin : int
        Euclidean margin of error for classifying a streamline as a connection to an ROI. Default is 2 voxels.
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.
    fa_wei :  bool
        Scale streamline count edges by fractional anistropy (FA). Default is False.

    Returns
    -------
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    streams : str
        File path to streamline array sequence in .trk format.
    conn_matrix : array
        Adjacency matrix stored as an m x n array of nodes and edges.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    dir_path : str
        Path to directory containing subject derivative data for given run.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic),
        closest (clos), boot (bootstrapped), and prob (probabilistic).
    min_length : int
        Minimum fiber length threshold in mm to restrict tracking.

    References
    ----------
    .. [1] Sporns, O., Tononi, G., & Kötter, R. (2005). The human connectome:
      A structural description of the human brain. PLoS Computational Biology.
      https://doi.org/10.1371/journal.pcbi.0010042
    .. [2] Sotiropoulos, S. N., & Zalesky, A. (2019). Building connectomes
      using diffusion MRI: why, how and but. NMR in Biomedicine.
      https://doi.org/10.1002/nbm.3752
    .. [3] Chung, M. K., Hanson, J. L., Adluru, N., Alexander, A. L., Davidson,
      R. J., & Pollak, S. D. (2017). Integrative Structural Brain Network
      Analysis in Diffusion Tensor Imaging. Brain Connectivity.
      https://doi.org/10.1089/brain.2016.0481

    '''
    import gc
    import time
    from dipy.tracking.streamline import Streamlines, values_from_volume
    from dipy.tracking._utils import (_mapping_to_voxel, _to_voxel_coordinates)
    import networkx as nx
    from itertools import combinations
    from collections import defaultdict
    from pynets.core import utils, nodemaker
    from pynets.dmri.dmri_utils import generate_sl
    from dipy.io.streamline import load_tractogram
    from dipy.io.stateful_tractogram import Space, Origin

    start = time.time()

    # Load parcellation
    roi_img = nib.load(atlas_mni)
    atlas_data = np.around(np.asarray(roi_img.dataobj))
    roi_zooms = roi_img.header.get_zooms()
    roi_shape = roi_img.shape

    # Read Streamlines
    streamlines = [i.astype(np.float32) for i in Streamlines(load_tractogram(streams, roi_img, to_space=Space.RASMM,
                                                                             to_origin=Origin.TRACKVIS,
                                                                             bbox_valid_check=False).streamlines)]
    roi_img.uncache()

    if fa_wei is True:
        fa_weights = values_from_volume(np.asarray(nib.load(warped_fa).dataobj), streamlines, np.eye(4))
        global_fa_weights = list(utils.flatten(fa_weights))
        min_global_fa_wei = min(i for i in global_fa_weights if i > 0)
        max_global_fa_wei = max(global_fa_weights)
        fa_weights_norm = []
        # Here we normalize by global FA
        for val_list in fa_weights:
            fa_weights_norm.append(np.nanmean((val_list - min_global_fa_wei) /
                                              (max_global_fa_wei - min_global_fa_wei)))

    # Make streamlines into generators to keep memory at a minimum
    sl = [generate_sl(i) for i in streamlines]
    del streamlines

    # Instantiate empty networkX graph object & dictionary and create voxel-affine mapping
    lin_T, offset = _mapping_to_voxel(np.eye(4))
    mx = len(np.unique(atlas_data.astype('uint16'))) - 1
    g = nx.Graph(ecount=0, vcount=mx)
    edge_dict = defaultdict(int)
    node_dict = dict(zip(np.unique(atlas_data.astype('uint16'))[1:], np.arange(mx) + 1))

    # Add empty vertices
    for node in range(1, mx + 1):
        g.add_node(node)

    # Build graph
    ix = 0
    bad_idxs = []
    for s in sl:
        # Map the streamlines coordinates to voxel coordinates and get labels for label_volume
        vox_coords = _to_voxel_coordinates(Streamlines(s), lin_T, offset)
        lab_coords = [nodemaker.get_sphere(coord, error_margin, roi_zooms, roi_shape) for coord in vox_coords]
        [i, j, k] = np.vstack(np.array(lab_coords)).T

        # get labels for label_volume
        lab_arr = atlas_data[i, j, k]
        endlabels = []
        for ix, lab in enumerate(np.unique(lab_arr).astype('uint32')):
            if (lab > 0) and (np.sum(lab_arr == lab) >= overlap_thr):
                try:
                    endlabels.append(node_dict[lab])
                except:
                    bad_idxs.append(ix)
                    print(f"Label {lab} missing from parcellation. Check registration and ensure valid input "
                          f"parcellation file.")

        edges = combinations(endlabels, 2)
        for edge in edges:
            lst = tuple([int(node) for node in edge])
            edge_dict[tuple(sorted(lst))] += 1

        edge_list = [(k[0], k[1], v) for k, v in edge_dict.items()]

        if fa_wei is True:
            # Add edgelist to g, weighted by average fa of the streamline
            g.add_weighted_edges_from(edge_list, weight=fa_weights_norm[ix])
        else:
            g.add_weighted_edges_from(edge_list)
        ix = ix + 1

        del lab_coords, lab_arr, endlabels, edges, edge_list

    gc.collect()

    if fa_wei is True:
        # Add average fa weights to streamline counts
        for u, v in list(g.edges):
            h = g.get_edge_data(u, v)
            edge_att_dict = {}
            for e, w in h.items():
                if w not in edge_att_dict.keys():
                    edge_att_dict[w] = []
                else:
                    edge_att_dict[w].append(e)
            for key in edge_att_dict.keys():
                edge_att_dict[key] = np.nanmean(edge_att_dict[key])
            vals = []
            for e2, w2 in edge_att_dict.items():
                vals.append(float(e2) * float(w2))
            g.edges[u, v].update({'weight': np.nanmean(vals)})

    # Convert to numpy matrix
    conn_matrix_raw = nx.to_numpy_array(g)

    # Impose symmetry
    conn_matrix = np.maximum(conn_matrix_raw, conn_matrix_raw.T)

    print('Graph Building Complete:\n', str(time.time() - start))

    if len(bad_idxs) > 0:
        bad_idxs = sorted(list(set(bad_idxs)), reverse=True)
        for j in bad_idxs:
            del labels[j], coords[j]

    coords = np.array(coords)
    labels = np.array(labels)

    assert len(coords) == len(labels) == conn_matrix.shape[0]

    return (atlas_mni, streams, conn_matrix, track_type, target_samples, dir_path, conn_model, network, node_size,
            dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels, coords, norm, binary,
            directget, min_length)
Ejemplo n.º 14
0
        folder_name = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V6\after_file_prep' + s
        dir_name = folder_name + '\streamlines'
        gtab, data, affine, labels, white_matter, nii_file, bvec_file = load_dwi_files(folder_name)
        weight_by_file = bvec_file[:-5:] + '_' + weight_by + '.nii'
        weight_by_img = nib.load(weight_by_file)
        weight_by_data = weight_by_img.get_data()
        affine = weight_by_img.affine
        pfr_file = bvec_file[:-5:] + '_1.5_2_AxFr5.nii'
        pfr_img = nib.load(pfr_file)
        pfr_data = pfr_img.get_data()
        index_to_text_file = r'C:\Users\Admin\my_scripts\aal\megaatlas\megaatlascortex2nii.txt'
        for j,m in enumerate(masks):
            tract_path = dir_name + n + m + '.trk'
            streamlines = load_ft(tract_path)
            stream = list(streamlines)
            vol_per_tract = values_from_volume(weight_by_data, stream, affine=affine)
            pfr_per_tract = values_from_volume(pfr_data, stream, affine=affine)
            vol_vec = weight_by_data.flatten()
            q = np.quantile(vol_vec[vol_vec > 0], 0.95)
            mean_vol_per_tract = []
            for v, pfr in zip(vol_per_tract, pfr_per_tract):
                v = np.asanyarray(v)
                non_out = [v < q]
                pfr = np.asanyarray(pfr)
                high_pfr = [pfr > 0.5]
                mean_vol_per_tract.append(np.nanmean(v[tuple(non_out and high_pfr)]))

            mean_vol_per_part = np.nanmean(mean_vol_per_tract)
            mean_vals[i,j] = mean_vol_per_part
    np.save(r'C:\Users\Admin\my_scripts\Ax3D_Pack\mean_vals\different_way.npy',mean_vals)
    a=mean_vals
Ejemplo n.º 15
0
def test_values_from_volume():
    decimal = 4
    data3d = np.arange(2000).reshape(20, 10, 10)
    # Test two cases of 4D data (handled differently)
    # One where the last dimension is length 3:
    data4d_3vec = np.arange(6000).reshape(20, 10, 10, 3)
    # The other where the last dimension is not 3:
    data4d_2vec = np.arange(4000).reshape(20, 10, 10, 2)
    for dt in [np.float32, np.float64]:
        for data in [data3d, data4d_3vec, data4d_2vec]:
            sl1 = [np.array([[1, 0, 0],
                             [1.5, 0, 0],
                             [2, 0, 0],
                             [2.5, 0, 0]]).astype(dt),
                   np.array([[2, 0, 0],
                             [3.1, 0, 0],
                             [3.9, 0, 0],
                             [4.1, 0, 0]]).astype(dt)]

            ans1 = [[data[1, 0, 0],
                     data[1, 0, 0] + (data[2, 0, 0] - data[1, 0, 0]) / 2,
                     data[2, 0, 0],
                     data[2, 0, 0] + (data[3, 0, 0] - data[2, 0, 0]) / 2],
                    [data[2, 0, 0],
                     data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.1,
                     data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.9,
                     data[4, 0, 0] + (data[5, 0, 0] - data[4, 0, 0]) * 0.1]]

            vv = values_from_volume(data, sl1)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            vv = values_from_volume(data, np.array(sl1))
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            affine = np.eye(4)
            affine[:, 3] = [-100, 10, 1, 1]
            x_sl1 = ut.move_streamlines(sl1, affine)
            x_sl2 = ut.move_streamlines(sl1, affine)

            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # The generator has already been consumed so needs to be
            # regenerated:
            x_sl1 = list(ut.move_streamlines(sl1, affine))
            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # Test that the streamlines haven't mutated:
            l_sl2 = list(x_sl2)
            npt.assert_equal(x_sl1, l_sl2)

            vv = values_from_volume(data, np.array(x_sl1), affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)
            npt.assert_equal(np.array(x_sl1), np.array(l_sl2))

            # Test for lists of streamlines with different numbers of nodes:
            sl2 = [sl1[0][:-1], sl1[1]]
            ans2 = [ans1[0][:-1], ans1[1]]
            vv = values_from_volume(data, sl2)
            for ii, v in enumerate(vv):
                npt.assert_almost_equal(v, ans2[ii], decimal=decimal)

    # We raise an error if the streamlines fed don't make sense. In this
    # case, a tuple instead of a list, generator or array
    nonsense_sl = (np.array([[1, 0, 0],
                             [1.5, 0, 0],
                             [2, 0, 0],
                             [2.5, 0, 0]]),
                   np.array([[2, 0, 0],
                             [3.1, 0, 0],
                             [3.9, 0, 0],
                             [4.1, 0, 0]]))

    npt.assert_raises(RuntimeError, values_from_volume, data, nonsense_sl)

    # For some use-cases we might have singleton streamlines (with only one
    # node each):
    data3D = np.ones((2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data3D, streamlines).shape, (10, 1))
    data4D = np.ones((2, 2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data4D, streamlines).shape, (10, 1, 2))
Ejemplo n.º 16
0
def test_values_from_volume():
    decimal = 4
    data3d = np.arange(2000).reshape(20, 10, 10)
    # Test two cases of 4D data (handled differently)
    # One where the last dimension is length 3:
    data4d_3vec = np.arange(6000).reshape(20, 10, 10, 3)
    # The other where the last dimension is not 3:
    data4d_2vec = np.arange(4000).reshape(20, 10, 10, 2)
    for dt in [np.float32, np.float64]:
        for data in [data3d, data4d_3vec, data4d_2vec]:
            sl1 = [
                np.array([[1, 0, 0], [1.5, 0, 0], [2, 0, 0], [2.5, 0,
                                                              0]]).astype(dt),
                np.array([[2, 0, 0], [3.1, 0, 0], [3.9, 0, 0], [4.1, 0,
                                                                0]]).astype(dt)
            ]

            ans1 = [[
                data[1, 0,
                     0], data[1, 0, 0] + (data[2, 0, 0] - data[1, 0, 0]) / 2,
                data[2, 0,
                     0], data[2, 0, 0] + (data[3, 0, 0] - data[2, 0, 0]) / 2
            ],
                    [
                        data[2, 0, 0],
                        data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.1,
                        data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.9,
                        data[4, 0, 0] + (data[5, 0, 0] - data[4, 0, 0]) * 0.1
                    ]]

            vv = values_from_volume(data, sl1)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            vv = values_from_volume(data, np.array(sl1))
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            affine = np.eye(4)
            affine[:, 3] = [-100, 10, 1, 1]
            x_sl1 = ut.move_streamlines(sl1, affine)
            x_sl2 = ut.move_streamlines(sl1, affine)

            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # The generator has already been consumed so needs to be
            # regenerated:
            x_sl1 = list(ut.move_streamlines(sl1, affine))
            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # Test that the streamlines haven't mutated:
            l_sl2 = list(x_sl2)
            npt.assert_equal(x_sl1, l_sl2)

            vv = values_from_volume(data, np.array(x_sl1), affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)
            npt.assert_equal(np.array(x_sl1), np.array(l_sl2))

            # Test for lists of streamlines with different numbers of nodes:
            sl2 = [sl1[0][:-1], sl1[1]]
            ans2 = [ans1[0][:-1], ans1[1]]
            vv = values_from_volume(data, sl2)
            for ii, v in enumerate(vv):
                npt.assert_almost_equal(v, ans2[ii], decimal=decimal)

    # We raise an error if the streamlines fed don't make sense. In this
    # case, a tuple instead of a list, generator or array
    nonsense_sl = (np.array([[1, 0, 0], [1.5, 0, 0], [2, 0, 0], [2.5, 0, 0]]),
                   np.array([[2, 0, 0], [3.1, 0, 0], [3.9, 0, 0], [4.1, 0,
                                                                   0]]))

    npt.assert_raises(RuntimeError, values_from_volume, data, nonsense_sl)

    # For some use-cases we might have singleton streamlines (with only one
    # node each):
    data3D = np.ones((2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data3D, streamlines).shape, (10, 1))
    data4D = np.ones((2, 2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data4D, streamlines).shape, (10, 1, 2))
Ejemplo n.º 17
0
def afq_profile(data, bundle, affine=None, n_points=100,
                orient_by=None, weights=None, **weights_kwarg):
    """
    Calculates a summarized profile of data for a bundle or tract
    along its length.

    Follows the approach outlined in [Yeatman2012]_.

    Parameters
    ----------
    data : 3D volume
        The statistic to sample with the streamlines.

    bundle : StreamLines class instance
        The collection of streamlines (possibly already resampled into an array
         for each to have the same length) with which we are resampling. See
         Note below about orienting the streamlines.

    affine: 4-by-4 array, optional.
        A transformation associated with the streamlines in the bundle.
        Default: identity.

    n_points: int, optional
        The number of points to sample along the bundle. Default: 100.

    orient_by: streamline, optional.
        A streamline to use as a standard to orient all of the streamlines in
        the bundle according to.

    weights : 1D array or 2D array or callable (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant). If callable, this is a function that calculates weights.

    weights_kwarg : key-word arguments
        Additional key-word arguments to pass to the weight-calculating
        function. Only to be used if weights is a callable.

    Returns
    -------
    ndarray : a 1D array with the profile of `data` along the length of
        `bundle`

    Note
    ----
    Before providing a bundle as input to this function, you will need to make
    sure that the streamlines in the bundle are all oriented in the same
    orientation relative to the bundle (use :func:`orient_by_streamline`).

    References
    ----------
    .. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty,
       Nathaniel J. Myall, Brian A. Wandell, and Heidi M. Feldman. 2012.
       "Tract Profiles of White Matter Properties: Automating Fiber-Tract
       Quantification" PloS One 7 (11): e49790.
    """
    if orient_by is not None:
        bundle = orient_by_streamline(bundle, orient_by, affine=affine)
    if len(bundle) == 0:
        raise ValueError("The bundle contains no streamlines")

    # Resample each streamline to the same number of points:
    fgarray = set_number_of_points(bundle, n_points)

    # Extract the values
    values = np.array(values_from_volume(data, fgarray, affine=affine))

    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]
    elif callable(weights):
        weights = weights(bundle, **weights_kwarg)
    else:
        # We check that weights *always sum to 1 across streamlines*:
        if not np.allclose(np.sum(weights, 0), np.ones(n_points)):
            raise ValueError("The sum of weights across streamlines must ",
                             "be equal to 1")

    return np.sum(weights * values, 0)
Ejemplo n.º 18
0
    mm = mm[:, 1:]
    mm = mm[idx]
    mm = mm[:, idx]

    weight_by_file = nii_file[:-4:] + '_' + weight_by + '.nii'
    weight_by_img = nib.load(weight_by_file)
    weight_by_data = weight_by_img.get_data()
    affine = weight_by_img.affine
    m_weighted = np.zeros((len(idx), len(idx)), dtype='float64')
    for pair, tracts in grouping.items():
        if pair[0] == 0 or pair[1] == 0:
            continue
        else:
            mean_vol_per_tract = []
            vol_per_tract = values_from_volume(weight_by_data,
                                               tracts,
                                               affine=affine)
            for s in vol_per_tract:
                mean_vol_per_tract.append(np.mean(s))
            mean_path_vol = np.nanmean(mean_vol_per_tract)
            m_weighted[pair[0] - 1, pair[1] - 1] = mean_path_vol
            m_weighted[pair[1] - 1, pair[0] - 1] = mean_path_vol

    mm_weighted = m_weighted[idx]
    mm_weighted = mm_weighted[:, idx]

    np.save(folder_name + r'\non-weighted_non-norm', mm)
    np.save(folder_name + r'\weighted_non-norm', mm_weighted)
    nw = np.reshape(mm, (23409, ))
    w = np.reshape(mm_weighted, (23409, ))