Ejemplo n.º 1
0
def orient_by_streamline(streamlines,
                         standard,
                         n_points=12,
                         in_place=False,
                         as_generator=False,
                         affine=None):
    """
    Orient a bundle of streamlines to a standard streamline.

    Parameters
    ----------
    streamlines : Streamlines, list
        The input streamlines to orient.
    standard : Streamlines, list, or ndarrray
        This provides the standard orientation according to which the
        streamlines in the provided bundle should be reoriented.
    n_points: int, optional
        The number of samples to apply to each of the streamlines.
    in_place : bool
        Whether to make the change in-place in the original input
        (and return a reference), or to make a copy of the list
        and return this copy, with the relevant streamlines reoriented.
        Default: False.
    as_generator : bool
        Whether to return a generator as output. Default: False
    affine : ndarray
        Affine transformation from voxels to streamlines. Default: identity.

    Returns
    -------
    Streamlines : with each individual array oriented to be as similar as
        possible to the standard.

    """
    # Start by resampling, so that distance calculation is easy:
    fgarray = set_number_of_points(streamlines, n_points)
    std_array = set_number_of_points([standard], n_points)

    if as_generator:
        if in_place:
            w_s = "Cannot return a generator when in_place is set to True"
            raise ValueError(w_s)
        return _orient_by_sl_generator(streamlines, std_array, fgarray)

    # If it's a generator on input, we may as well generate it
    # here and now:
    if isinstance(streamlines, types.GeneratorType):
        out = Streamlines(streamlines)

    elif in_place:
        out = streamlines
    else:
        # Make a copy, so you don't change the output in place:
        out = deepcopy(streamlines)

    return _orient_by_sl_list(out, std_array, fgarray)
Ejemplo n.º 2
0
def orient_by_streamline(streamlines, standard, n_points=12, in_place=False,
                         as_generator=False, affine=None):
    """
    Orient a bundle of streamlines to a standard streamline.

    Parameters
    ----------
    streamlines : Streamlines, list
        The input streamlines to orient.
    standard : Streamlines, list, or ndarrray
        This provides the standard orientation according to which the
        streamlines in the provided bundle should be reoriented.
    n_points: int, optional
        The number of samples to apply to each of the streamlines.
    in_place : bool
        Whether to make the change in-place in the original input
        (and return a reference), or to make a copy of the list
        and return this copy, with the relevant streamlines reoriented.
        Default: False.
    as_generator : bool
        Whether to return a generator as output. Default: False
    affine : ndarray
        Affine transformation from voxels to streamlines. Default: identity.

    Returns
    -------
    Streamlines : with each individual array oriented to be as similar as
        possible to the standard.

    """
    # Start by resampling, so that distance calculation is easy:
    fgarray = set_number_of_points(streamlines,  n_points)
    std_array =  set_number_of_points([standard],  n_points)

    if as_generator:
        if in_place:
            w_s = "Cannot return a generator when in_place is set to True"
            raise ValueError(w_s)
        return _orient_by_sl_generator(streamlines, std_array, fgarray)

    # If it's a generator on input, we may as well generate it
    # here and now:
    if isinstance(streamlines, types.GeneratorType):
        out = Streamlines(streamlines)

    elif in_place:
        out = streamlines
    else:
        # Make a copy, so you don't change the output in place:
        out = deepcopy(streamlines)

    return _orient_by_sl_list(out, std_array, fgarray)
Ejemplo n.º 3
0
def find_bad_nrigh(name, score):

    test_data = np.load(os.path.join('..', 'cci_clean_data', name))
    data_evl = test_data['arr_0']
    key = os.path.splitext(name)[0]
    cci_score = cci_dict[key]
    cci_score = np.array(cci_score)

    streamlines_evl = Streamlines()

    for i in range(np.shape(data_evl)[0]):
        tmp = data_evl[i]
        tmp = zero_remove(tmp)
        #tmp = tmp[~np.all(tmp == 0, axis=-1)]
        #tmp = np.around(tmp, decimals=0)
        streamlines_evl.append(tmp)

    lengths = np.array(list(length(streamlines_evl)))

    neighb = np.zeros((np.shape(data_evl)[0]))

    subsamp_sls = set_number_of_points(streamlines_evl, 64)
    for i in range(len(streamlines_evl)):

        mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
        if (score[i] == 0):  # bad fiber

            thre = np.percentile(mdf_mx, 4)
            len_bad = lengths[i]
            bound = len_bad * 0.1
            len_limt = 1 * ((lengths > len_bad - bound) &
                            (lengths < len_bad + bound))
            label = 1 * (mdf_mx < thre).flatten()  # bad neighbour
            neighb = neighb + label * (cci_score < 10)
    return data_evl, neighb
Ejemplo n.º 4
0
def resample_streamlines_num_points(sft, num_points):
    """
    Resample streamlines using number of points per streamline

    Parameters
    ----------
    sft: StatefulTractogram
        SFT containing the streamlines to subsample.
    num_points: int
        Number of points per streamline in the output.

    Return
    ------
    resampled_sft: StatefulTractogram
        The resampled streamlines as a sft.
    """

    # Checks
    if num_points <= 1:
        raise ValueError("The value of num_points should be greater than 1!")

    # Resampling
    resampled_streamlines = []
    for streamline in sft.streamlines:
        line = set_number_of_points(streamline, num_points)
        resampled_streamlines.append(line)

    # Creating sft
    # CAREFUL. Data_per_point will be lost.
    resampled_sft = _warn_and_save(resampled_streamlines, sft)

    return resampled_sft
Ejemplo n.º 5
0
def _resample_tg(tg, n_points):
    # reformat for dipy's set_number_of_points
    if isinstance(tg, np.ndarray):
        if len(tg.shape) > 2:
            streamlines = tg.tolist()
            streamlines = [np.asarray(item) for item in streamlines]
    else:
        streamlines = tg.streamlines

    return dps.set_number_of_points(streamlines, n_points)
def remove_indentical(n, streamlines_ori):

    for i in range(n):

        error_index = []
        subsamp_sls = set_number_of_points(streamlines_ori, 64)
        mdf1 = np.zeros((1, len(subsamp_sls)))
        for i, sl in enumerate(subsamp_sls):
            mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
            mdf1 = np.vstack((mdf1, mdf_mx))
            if (mdf_mx == 0).sum() > 1:
                if i - 1 not in error_index:
                    error_index.append(i)

        if error_index == []:
            return streamlines_ori
            print('number of detecting iteration is ' + str(2 * i + 1))
            break

        streamlines_evl_final1 = Streamlines()
        for i, sl in enumerate(streamlines_ori):
            if i not in error_index:
                streamlines_evl_final1.append(sl)

        error_index = []
        subsamp_sls = set_number_of_points(streamlines_evl_final1, 64)
        mdf1 = np.zeros((1, len(subsamp_sls)))
        for i, sl in enumerate(subsamp_sls):
            mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
            mdf1 = np.vstack((mdf1, mdf_mx))
            if (mdf_mx == 0).sum() > 1:
                if i - 1 not in error_index:
                    error_index.append(i)
        if error_index == []:
            return streamlines_evl_final1
            print('number of detecting iteration is ' + str(2 * i + 2))
            break

        streamlines_ori = Streamlines()
        for i, sl in enumerate(streamlines_evl_final1):
            if i not in error_index:
                streamlines_ori.append(sl)
Ejemplo n.º 7
0
def gaussian_weights(bundle, n_points=100):
    """
    Calculate weights for each streamline/node in a bundle, based on a
    Mahalanobis distance from the mean of the bundle, at that node

    Parameters
    ----------
    bundle : array or list
        If this is a list, assume that it is a list of streamline coordinates
        (each entry is a 2D array, of shape n by 3). If this is an array, this
        is a resampled version of the streamlines, with equal number of points
        in each streamline.
    n_points : int, optional
        The number of points to resample to. *If the `bundle` is an array, this
        input is ignored*. Default: 100.

    Returns
    -------
    w : array of shape (n_streamlines, n_points)
        Weights for each node in each streamline, calculated as its relative
        inverse of the Mahalanobis distance, relative to the distribution of
        coordinates at that node position across streamlines.
    """
    if isinstance(bundle, list):
        # if you got a list, assume that it needs to be resampled:
        bundle = np.array(dps.set_number_of_points(bundle, n_points))
    else:
        if bundle.shape[-1] != 3:
            e_s = "Input must be shape (n_streamlines, n_points, 3)"
            raise ValueError(e_s)
        n_points = bundle.shape[1]

    w = np.zeros((bundle.shape[0], n_points))
    for node in range(bundle.shape[1]):
        # This should come back as a 3D covariance matrix with the spatial
        # variance covariance of this node across the different streamlines
        # This is a 3-by-3 array:
        node_coords = bundle[:, node]
        c = np.cov(node_coords.T, ddof=0)
        # Calculate the mean or median of this node as well
        # delta = node_coords - np.mean(node_coords, 0)
        m = np.mean(node_coords, 0)
        # Weights are the inverse of the Mahalanobis distance
        for fn in range(bundle.shape[0]):
            # calculate Mahalanobis for node on fiber[fn]
            w[fn, node] = mahalanobis(node_coords[fn], m, c)
    # weighting is inverse to the distance (the further you are, the less you
    # should be weighted)
    w = 1 / w
    # Normalize before returning, so that the weights in each node sum to 1:
    return w / np.sum(w, 0)
Ejemplo n.º 8
0
def gaussian_weights(bundle, n_points=100):
    """
    Calculate weights for each streamline/node in a bundle, based on a
    Mahalanobis distance from the mean of the bundle, at that node

    Parameters
    ----------
    bundle : array or list
        If this is a list, assume that it is a list of streamline coordinates
        (each entry is a 2D array, of shape n by 3). If this is an array, this
        is a resampled version of the streamlines, with equal number of points
        in each streamline.
    n_points : int, optional
        The number of points to resample to. *If the `bundle` is an array, this
        input is ignored*. Default: 100.

    Returns
    -------
    w : array of shape (n_streamlines, n_points)
        Weights for each node in each streamline, calculated as its relative
        inverse of the Mahalanobis distance, relative to the distribution of
        coordinates at that node position across streamlines.
    """
    if isinstance(bundle, list):
        # if you got a list, assume that it needs to be resampled:
        bundle = np.array(dps.set_number_of_points(bundle, n_points))
    else:
        if bundle.shape[-1] != 3:
            e_s = "Input must be shape (n_streamlines, n_points, 3)"
            raise ValueError(e_s)
        n_points = bundle.shape[1]

    w = np.zeros((bundle.shape[0], n_points))
    for node in range(bundle.shape[1]):
        # This should come back as a 3D covariance matrix with the spatial
        # variance covariance of this node across the different streamlines
        # This is a 3-by-3 array:
        node_coords = bundle[:, node]
        c = np.cov(node_coords.T, ddof=0)
        # Calculate the mean or median of this node as well
        # delta = node_coords - np.mean(node_coords, 0)
        m = np.mean(node_coords, 0)
        # Weights are the inverse of the Mahalanobis distance
        for fn in range(bundle.shape[0]):
            # calculate Mahalanobis for node on fiber[fn]
            w[fn, node] = mahalanobis(node_coords[fn], m, c)
    # weighting is inverse to the distance (the further you are, the less you
    # should be weighted)
    w = 1 / w
    # Normalize before returning, so that the weights in each node sum to 1:
    return w / np.sum(w, 0)
Ejemplo n.º 9
0
def resample_streamlines_step_size(sft, step_size):
    """
    Resample streamlines using a fixed step size.

    Parameters
    ----------
    sft: StatefulTractogram
        SFT containing the streamlines to subsample.
    step_size: float
        Size of the new steps, in mm.

    Return
    ------
    resampled_sft: StatefulTractogram
        The resampled streamlines as a sft.
    """

    # Checks
    if step_size == 0:
        raise ValueError("Step size can't be 0!")
    elif step_size < 0.1:
        logging.debug("The value of your step size seems suspiciously low. "
                      "Please check.")
    elif step_size > np.max(sft.voxel_sizes):
        logging.debug("The value of your step size seems suspiciously high. "
                      "Please check.")

    # Make sure we are in world space
    orig_space = sft.space
    sft.to_rasmm()

    # Resampling
    lengths = length(sft.streamlines)
    nb_points = np.ceil(lengths / step_size).astype(int)
    if np.any(nb_points == 1):
        logging.warning("Some streamlines are shorter than the provided "
                        "step size...")
        nb_points[nb_points == 1] = 2
    resampled_streamlines = [
        set_number_of_points(s, n) for s, n in zip(sft.streamlines, nb_points)
    ]

    # Creating sft
    resampled_sft = _warn_and_save(resampled_streamlines, sft)

    # Return to original space
    resampled_sft.to_space(orig_space)

    return resampled_sft
Ejemplo n.º 10
0
def smooth_line_gaussian(streamline, sigma):
    if sigma < 0.00001:
        ValueError('Cant have a 0 sigma with gaussian.')

    nb_points = int(length(streamline))
    if nb_points < 2:
        logging.debug('Streamline shorter than 1mm, corner cases possible.')
        nb_points = 2
    sampled_streamline = set_number_of_points(streamline, nb_points)

    x, y, z = sampled_streamline.T
    x3 = gaussian_filter1d(x, sigma)
    y3 = gaussian_filter1d(y, sigma)
    z3 = gaussian_filter1d(z, sigma)
    smoothed_streamline = np.asarray([x3, y3, z3]).T

    # Ensure first and last point remain the same
    smoothed_streamline[0] = streamline[0]
    smoothed_streamline[-1] = streamline[-1]

    return smoothed_streamline
Ejemplo n.º 11
0
def smooth_line_spline(streamline, sigma, nb_ctrl_points):
    if sigma < 0.00001:
        ValueError('Cant have a 0 sigma with spline.')

    nb_points = int(length(streamline))
    if nb_points < 2:
        logging.debug('Streamline shorter than 1mm, corner cases possible.')

    if nb_ctrl_points < 3:
        nb_ctrl_points = 3

    sampled_streamline = set_number_of_points(streamline, nb_ctrl_points)

    tck, u = splprep(sampled_streamline.T, s=sigma)
    smoothed_streamline = splev(np.linspace(0, 1, 99), tck)
    smoothed_streamline = np.squeeze(np.asarray([smoothed_streamline]).T)

    # Ensure first and last point remain the same
    smoothed_streamline[0] = streamline[0]
    smoothed_streamline[-1] = streamline[-1]

    return smoothed_streamline
Ejemplo n.º 12
0
def calculate_tract_profile(img,
                            streamlines,
                            affine=None,
                            n_points=100,
                            weights=None):
    """

    Parameters
    ----------
    img : 3D volume

    streamlines : list of arrays, or array

    weights : 1D array or 2D array (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant).

    """
    if isinstance(streamlines, list):
        # Resample each streamline to the same number of points
        # list => np.array
        # Setting the number of points should happen in a streamline template
        # space, rather than in the subject native space, but for now we do
        # everything as in the Matlab version -- in native space.
        # In the future, an SLR object can be passed here, and then it would
        # move these streamlines into the template space before resampling...
        fgarray = np.array(dps.set_number_of_points(streamlines, n_points))
    else:
        fgarray = streamlines
    # ...and move them back to native space before indexing into the volume:
    values = dts.values_from_volume(img, fgarray, affine=affine)

    # We assume that weights *always sum to 1 across streamlines*:
    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]

    tract_profile = np.sum(weights * values, 0)
    return tract_profile
def gen_short_data(name_list):
    for i in range(len(name_list)):
        name = name_list[i]
        path = os.path.join('..', 'cci_clean_data', name)
        sample_data = np.load(path)
        ndata = sample_data['arr_0']

        streamlines_evl = Streamlines()
        for j in range(np.shape(ndata)[0]):
            tmp = ndata[j]
            tmp = zero_remove(tmp)
            streamlines_evl.append(tmp)

        subsamp_sls = set_number_of_points(streamlines_evl, sub_len)

        tt = np.array(subsamp_sls)
        path = os.path.join('..', 'subsample-data', str(sub_len))
        if not os.path.exists(path):
            os.makedirs(path)
        name_save = os.path.join(path, os.path.splitext(name)[0])

        np.savez_compressed(name_save, tt)
        print(str(i + 1) + '/141 finished')
Ejemplo n.º 14
0
def calculate_tract_profile(img, streamlines, affine=None, n_points=100,
                            weights=None):
    """

    Parameters
    ----------
    img : 3D volume

    streamlines : list of arrays, or array

    weights : 1D array or 2D array (optional)
        Weight each streamline (1D) or each node (2D) when calculating the
        tract-profiles. Must sum to 1 across streamlines (in each node if
        relevant).

    """
    if isinstance(streamlines, list):
        # Resample each streamline to the same number of points
        # list => np.array
        # Setting the number of points should happen in a streamline template
        # space, rather than in the subject native space, but for now we do
        # everything as in the Matlab version -- in native space.
        # In the future, an SLR object can be passed here, and then it would
        # move these streamlines into the template space before resampling...
        fgarray = np.array(dps.set_number_of_points(streamlines, n_points))
    else:
        fgarray = streamlines
    # ...and move them back to native space before indexing into the volume:
    values = dts.values_from_volume(img, fgarray, affine=affine)

    # We assume that weights *always sum to 1 across streamlines*:
    if weights is None:
        weights = np.ones(values.shape) / values.shape[0]

    tract_profile = np.sum(weights * values, 0)
    return tract_profile
Ejemplo n.º 15
0
    def segment(self,
                bundle_dict,
                tg,
                fdata=None,
                fbval=None,
                fbvec=None,
                mapping=None,
                reg_prealign=None,
                reg_template=None,
                b0_threshold=0,
                img_affine=None):
        """
        Segment streamlines into bundles based on either waypoint ROIs
        [Yeatman2012]_ or RecoBundles [Garyfallidis2017]_.

        Parameters
        ----------
        bundle_dict: dict
            Meta-data for the segmentation. The format is something like::

                {'name': {'ROIs':[img1, img2],
                'rules':[True, True]},
                'prob_map': img3,
                'cross_midline': False}

        tg : StatefulTractogram
            Bundles to segment

        fdata, fbval, fbvec : str
            Full path to data, bvals, bvecs

        mapping : DiffeomorphicMap object, str or nib.Nifti1Image, optional.
            A mapping between DWI space and a template. If None, mapping will
            be registered from data used in prepare_img. Default: None.

        reg_prealign : array, optional.
            The linear transformation to be applied to align input images to
            the reference space before warping under the deformation field.
            Default: None.

        reg_template : str or nib.Nifti1Image, optional.
            Template to use for registration. Default: MNI T2.

        img_affine : array, optional.
            The spatial transformation from the measurement to the scanner
            space.

        References
        ----------
        .. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty, Nathaniel J.
        Myall, Brian A. Wandell, and Heidi M. Feldman. 2012. "Tract Profiles of
        White Matter Properties: Automating Fiber-Tract Quantification"
        PloS One 7 (11): e49790.

        .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
        bundles using local and global streamline-based registration and
        clustering, Neuroimage, 2017.
        """
        if img_affine is not None:
            if (mapping is None or fdata is not None or fbval is not None
                    or fbvec is not None):

                self.logger.error(
                    "Provide either the full path to data, bvals, bvecs," +
                    "or provide the affine of the image and the mapping")

        self.logger.info("Preparing Segmentation Parameters")
        self.img_affine = img_affine
        self.prepare_img(fdata, fbval, fbvec)
        self.logger.info("Preprocessing Streamlines")
        self.tg = tg

        # If resampling over-write the sft:
        if self.nb_points:
            self.tg = StatefulTractogram(
                dps.set_number_of_points(self.tg.streamlines, self.nb_points),
                self.tg, self.tg.space)

            self.resample_streamlines(self.nb_points)

        self.prepare_map(mapping, reg_prealign, reg_template)
        self.bundle_dict = bundle_dict
        self.cross_streamlines()

        if self.seg_algo == "afq":
            return self.segment_afq()
        elif self.seg_algo == "reco":
            return self.segment_reco()
Ejemplo n.º 16
0
def cluster_confidence(streamlines,
                       max_mdf=5,
                       subsample=12,
                       power=1,
                       override=False):
    """ Computes the cluster confidence index (cci), which is an
    estimation of the support a set of streamlines gives to
    a particular pathway.

    Ex: A single streamline with no others in the dataset
    following a similar pathway has a low cci. A streamline
    in a bundle of 100 streamlines that follow similar
    pathways has a high cci.

    See: Jordan et al. 2017
    (Based on streamline MDF distance from Garyfallidis et al. 2012)

    Parameters
    ----------
    streamlines : list of 2D (N, 3) arrays
        A sequence of streamlines of length N (# streamlines)
    max_mdf : int
        The maximum MDF distance (mm) that will be considered a
        "supporting" streamline and included in cci calculation
    subsample: int
        The number of points that are considered for each streamline
        in the calculation. To save on calculation time, each
        streamline is subsampled to subsampleN points.
    power: int
        The power to which the MDF distance for each streamline
        will be raised to determine how much it contributes to
        the cci. High values of power make the contribution value
        degrade much faster. Example: a streamline with 5mm MDF
        similarity contributes 1/5 to the cci if power is 1, but
        only contributes 1/5^2 = 1/25 if power is 2.
    override: bool, False by default
        override means that the cci calculation will still occur even
        though there are short streamlines in the dataset that may alter
        expected behaviour.

    Returns
    -------
    Returns an array of CCI scores

    References
    ----------
    [Jordan17] Jordan K. Et al., Cluster Confidence Index: A Streamline-Wise
    Pathway Reproducibility Metric for Diffusion-Weighted MRI Tractography,
    Journal of Neuroimaging, vol 28, no 1, 2017.

    [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
    tractography simplification, Frontiers in Neuroscience,
    vol 6, no 175, 2012.

    """

    # error if any streamlines are shorter than 20mm
    lengths = list(length(streamlines))
    if min(lengths) < 20 and not override:
        raise ValueError('Short streamlines found. We recommend removing them.'
                         ' To continue without removing short streamlines set'
                         ' override=True')

    # calculate the pairwise MDF distance between all streamlines in dataset
    subsamp_sls = set_number_of_points(streamlines, subsample)

    cci_score_mtrx = np.zeros([len(subsamp_sls)])

    for i, sl in enumerate(subsamp_sls):
        mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
        if (mdf_mx == 0).sum() > 1:
            raise ValueError('Identical streamlines. CCI calculation invalid')
        mdf_mx_oi = (mdf_mx > 0) & (mdf_mx < max_mdf) & ~np.isnan(mdf_mx)
        mdf_mx_oi_only = mdf_mx[mdf_mx_oi]
        cci_score = np.sum(np.divide(1, np.power(mdf_mx_oi_only, power)))
        cci_score_mtrx[i] = cci_score

    return cci_score_mtrx
Ejemplo n.º 17
0
def _resample_bundle(streamlines, n_points):
    return np.array(dps.set_number_of_points(streamlines, n_points))
Ejemplo n.º 18
0
def cluster_confidence(streamlines, max_mdf=5, subsample=12, power=1,
                       override=False):
    """ Computes the cluster confidence index (cci), which is an
    estimation of the support a set of streamlines gives to
    a particular pathway.

    Ex: A single streamline with no others in the dataset
    following a similar pathway has a low cci. A streamline
    in a bundle of 100 streamlines that follow similar
    pathways has a high cci.

    See: Jordan et al. 2017
    (Based on streamline MDF distance from Garyfallidis et al. 2012)

    Parameters
    ----------
    streamlines : list of 2D (N, 3) arrays
        A sequence of streamlines of length N (# streamlines)
    max_mdf : int
        The maximum MDF distance (mm) that will be considered a
        "supporting" streamline and included in cci calculation
    subsample: int
        The number of points that are considered for each streamline
        in the calculation. To save on calculation time, each
        streamline is subsampled to subsampleN points.
    power: int
        The power to which the MDF distance for each streamline
        will be raised to determine how much it contributes to
        the cci. High values of power make the contribution value
        degrade much faster. Example: a streamline with 5mm MDF
        similarity contributes 1/5 to the cci if power is 1, but
        only contributes 1/5^2 = 1/25 if power is 2.
    override: bool, False by default
        override means that the cci calculation will still occur even
        though there are short streamlines in the dataset that may alter
        expected behaviour.

    Returns
    -------
    Returns an array of CCI scores

    References
    ----------
    [Jordan17] Jordan K. Et al., Cluster Confidence Index: A Streamline-Wise
    Pathway Reproducibility Metric for Diffusion-Weighted MRI Tractography,
    Journal of Neuroimaging, vol 28, no 1, 2017.

    [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
    tractography simplification, Frontiers in Neuroscience,
    vol 6, no 175, 2012.

    """

    # error if any streamlines are shorter than 20mm
    lengths = list(length(streamlines))
    if min(lengths) < 20 and not override:
        raise ValueError('Short streamlines found. We recommend removing them.'
                         ' To continue without removing short streamlines set'
                         ' override=True')

    # calculate the pairwise MDF distance between all streamlines in dataset
    subsamp_sls = set_number_of_points(streamlines, subsample)

    cci_score_mtrx = np.zeros([len(subsamp_sls)])

    for i, sl in enumerate(subsamp_sls):
        mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
        if (mdf_mx == 0).sum() > 1:
            raise ValueError('Identical streamlines. CCI calculation invalid')
        mdf_mx_oi = (mdf_mx > 0) & (mdf_mx < max_mdf) & ~ np.isnan(mdf_mx)
        mdf_mx_oi_only = mdf_mx[mdf_mx_oi]
        cci_score = np.sum(np.divide(1, np.power(mdf_mx_oi_only, power)))
        cci_score_mtrx[i] = cci_score

    return cci_score_mtrx
Ejemplo n.º 19
0
def resample_streamlines(streamlines, n_pts=16):
    resampled = []
    for sl in streamlines:
        resampled.append(set_number_of_points(sl, n_pts))

    return resampled
Ejemplo n.º 20
0
def _resample_bundle(streamlines, n_points):
    return np.array(dps.set_number_of_points(streamlines, n_points))