Ejemplo n.º 1
0
def read_hcp_atlas_16_bundles():
    """
    XXX
    """
    bundle_dict = {}
    _, folder = fetch_hcp_atlas_16_bundles()
    whole_brain = load_tractogram(op.join(folder,
                                          'Atlas_in_MNI_Space_16_bundles',
                                          'whole_brain',
                                          'whole_brain_MNI.trk'),
                                  'same',
                                  bbox_valid_check=False).streamlines
    bundle_dict['whole_brain'] = whole_brain
    bundle_files = glob(
        op.join(folder, "Atlas_in_MNI_Space_16_bundles", "bundles", "*.trk"))
    for bundle_file in bundle_files:
        bundle = op.splitext(op.split(bundle_file)[-1])[0]
        bundle_dict[bundle] = {}
        bundle_dict[bundle]['sl'] = load_tractogram(bundle_file,
                                                    'same',
                                                    bbox_valid_check=False)\
            .streamlines

        feature = ResampleFeature(nb_points=100)
        metric = AveragePointwiseEuclideanMetric(feature)
        qb = QuickBundles(np.inf, metric=metric)
        cluster = qb.cluster(bundle_dict[bundle]['sl'])
        bundle_dict[bundle]['centroid'] = cluster.centroids[0]

    # For some reason, this file-name has a 0 in it, instead of an O:
    bundle_dict["IFOF_R"] = bundle_dict["IF0F_R"]
    del bundle_dict["IF0F_R"]
    return bundle_dict
Ejemplo n.º 2
0
def test_3D_segments():
    points = np.array([[[1, 0, 0],
                        [1, 1, 0]],
                       [[3, 1, 0],
                        [3, 0, 0]],
                       [[2, 0, 0],
                        [2, 1, 0]],
                       [[5, 1, 0],
                        [5, 0, 0]],
                       [[5.5, 0, 0],
                        [5.5, 1, 0]]], dtype="f4")

    thresholds = [4, 2, 1]

    feature = ResampleFeature(nb_points=20)
    metric = AveragePointwiseEuclideanMetric(feature)
    qbx = QuickBundlesX(thresholds, metric=metric)
    tree = qbx.cluster(points)
    clusters_0 = tree.get_clusters(0)
    clusters_1 = tree.get_clusters(1)
    clusters_2 = tree.get_clusters(2)

    assert_equal(len(clusters_0.centroids), len(clusters_1.centroids))
    assert_equal(len(clusters_2.centroids) > len(clusters_1.centroids), True)

    assert_array_equal(clusters_2[1].indices, np.array([3, 4], dtype=np.int32))
Ejemplo n.º 3
0
def assignment_map(target_bundle, model_bundle, no_disks):
    """
    Calculates assignment maps of the target bundle with reference to
    model bundle centroids.

    Parameters
    ----------
    target_bundle : streamlines
        target bundle extracted from subject data in common space
    model_bundle : streamlines
        atlas bundle used as reference
    no_disks : integer, optional
        Number of disks used for dividing bundle into disks. (Default 100)

    References
    ----------
    .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
    E. Garyfallidis, Bundle Analytics: a computational and statistical
    analyses framework for tractometric studies, Proceedings of:
    International Society of Magnetic Resonance in Medicine (ISMRM),
    Montreal, Canada, 2019.
    """

    mbundle_streamlines = set_number_of_points(model_bundle,
                                               nb_points=no_disks)

    metric = AveragePointwiseEuclideanMetric()
    qb = QuickBundles(threshold=85., metric=metric)
    clusters = qb.cluster(mbundle_streamlines)
    centroids = Streamlines(clusters.centroids)

    _, indx = cKDTree(centroids.data, 1,
                      copy_data=True).query(target_bundle.data, k=1)

    return indx
Ejemplo n.º 4
0
def produce_Clusters(truth_list, thresh):
    
    feature = ResampleFeature(nb_points=24)
    metric = AveragePointwiseEuclideanMetric(feature=feature) 
    qb = QuickBundles(threshold = thresh, metric = metric)
    clusters = qb.cluster(truth_list)
    
    return qb, clusters
Ejemplo n.º 5
0
def get_centroid_streamline(streamlines, nb_points):
    resample_feature = ResampleFeature(nb_points=nb_points)
    quick_bundle = QuickBundles(
        threshold=np.inf,
        metric=AveragePointwiseEuclideanMetric(resample_feature))
    clusters = quick_bundle.cluster(streamlines)
    centroid_streamlines = clusters.centroids

    return centroid_streamlines
Ejemplo n.º 6
0
def qb_metrics_features(streamlines,
                        threshold=10.0,
                        metric=None,
                        max_nb_clusters=np.iinfo('i4').max):
    """
    Enhancing QuickBundles with different metrics and features
    metric: 'IF', 'RF', 'CoMF', 'MF', 'AF', 'VBEF', None
    """
    if metric == 'IF':
        feature = IdentityFeature()
        metric = AveragePointwiseEuclideanMetric(feature=feature)
    elif metric == 'RF':
        feature = ResampleFeature(nb_point=24)
        metric = AveragePointwiseEuclideanMetric(feature=feature)
    elif metric == 'CoMF':
        feature = CenterOfMassFeature()
        metric = EuclideanMetric(feature)
    elif metric == 'MF':
        feature = MidpointFeature()
        metric = EuclideanMetric(feature)
    elif metric == 'AF':
        feature = ArcLengthFeature()
        metric = EuclideanMetric(feature)
    elif metric == 'VBEF':
        feature = VectorOfEndpointsFeature()
        metric = CosineMetric(feature)
    else:
        metric = "MDF_12points"

    qb = QuickBundles(threshold=threshold,
                      metric=metric,
                      max_nb_clusters=max_nb_clusters)
    clusters = qb.cluster(streamlines)

    labels = np.array(len(streamlines) * [None])
    N_list = []
    for i in range(len(clusters)):
        N_list.append(clusters[i]['N'])
    data_clusters = []
    for i in range(len(clusters)):
        labels[clusters[i]['indices']] = i + 1
        data_clusters.append(streamlines[clusters[i]['indices']])

    return labels, data_clusters, N_list
Ejemplo n.º 7
0
    def __init__(self,
                 threshold,
                 metric="MDF",
                 max_nb_clusters=np.iinfo('i4').max):
        self.threshold = threshold
        self.max_nb_clusters = max_nb_clusters

        if isinstance(metric, Metric):
            self.metric = metric
        elif metric.upper() == "MDF":
            self.metric = AveragePointwiseEuclideanMetric()
Ejemplo n.º 8
0
def get_centroid_streamline(tractogram, nb_points, distance_threshold):
    streamlines = tractogram.streamlines
    resample_feature = ResampleFeature(nb_points=nb_points)
    quick_bundle = QuickBundles(
        threshold=distance_threshold,
        metric=AveragePointwiseEuclideanMetric(resample_feature))
    clusters = quick_bundle.cluster(streamlines)
    centroid_streamlines = clusters.centroids

    if len(centroid_streamlines) > 1:
        raise Exception('Multiple centroids found')

    return Tractogram(centroid_streamlines, affine_to_rasmm=np.eye(4))
Ejemplo n.º 9
0
    def __init__(self, thresholds, metric="MDF_12points"):
        self.thresholds = thresholds

        if isinstance(metric, MinimumAverageDirectFlipMetric):
            raise ValueError("Use AveragePointwiseEuclideanMetric instead")

        if isinstance(metric, Metric):
            self.metric = metric
        elif metric == "MDF_12points":
            feature = ResampleFeature(nb_points=12)
            self.metric = AveragePointwiseEuclideanMetric(feature)
        else:
            raise ValueError("Unknown metric: {0}".format(metric))
Ejemplo n.º 10
0
def test_3D_points():

    points = np.array(
        [[[1, 0, 0]], [[3, 0, 0]], [[2, 0, 0]], [[5, 0, 0]], [[5.5, 0, 0]]],
        dtype="f4")

    thresholds = [4, 2, 1]
    metric = AveragePointwiseEuclideanMetric()
    qbx = QuickBundlesX(thresholds, metric=metric)
    tree = qbx.cluster(points)
    clusters_2 = tree.get_clusters(2)
    assert_array_equal(clusters_2.clusters_sizes(), [3, 2])
    clusters_0 = tree.get_clusters(0)
    assert_array_equal(clusters_0.clusters_sizes(), [5])
Ejemplo n.º 11
0
    def __init__(self,
                 threshold,
                 metric="MDF_12points",
                 max_nb_clusters=np.iinfo('i4').max):
        self.threshold = threshold
        self.max_nb_clusters = max_nb_clusters

        if isinstance(metric, Metric):
            self.metric = metric
        elif metric == "MDF_12points":
            feature = ResampleFeature(nb_points=12)
            self.metric = AveragePointwiseEuclideanMetric(feature)
        else:
            raise ValueError("Unknown metric: {0}".format(metric))
Ejemplo n.º 12
0
    def quickbundles(self, streamlines):
        """Segment tract with QuickBundles."""

        # TODO: implement other metrics
        try:
            from dipy.segment.clustering import QuickBundles
            from dipy.segment.metric import ResampleFeature
            from dipy.segment.metric import AveragePointwiseEuclideanMetric
        except ImportError:
            return None
        else:

            feature = ResampleFeature(nb_points=self.qb_points)
            metric = AveragePointwiseEuclideanMetric(feature=feature)
            qb = QuickBundles(threshold=self.qb_threshold, metric=metric)
            clusters = qb.cluster(streamlines)

        return clusters
def get_tract_profile(bundle,
                      metric_img,
                      metric_affine,
                      use_weights=False,
                      flip=True,
                      num_points=100):
    '''
    This function reorients the streamlines and extracts the diffusion metrics 
    along the tract. It essentiall performs step 1. The default number of points
    along a tract is 100, which can be thought of as %-along a tract.

    The flip variable signals if you would like to flip the direction of the
    streamlines after reorientation. For example if after reorientation all the
    streamlines were motor cortex -> brainstem and you actually wanted 
    brainstem -> motor cortex, then you set flip to True. The default is True
    because generally we see reorientation result in motor cortex -> brainstem.
    For the honours project, we were looking for the opposite
    '''

    # Reorient all the streamlines so that they are follwing the same direction
    feature = ResampleFeature(nb_points=num_points)
    d_metric = AveragePointwiseEuclideanMetric(feature)
    qb = QuickBundles(np.inf, metric=d_metric)
    centroid_bundle = qb.cluster(bundle).centroids[0]
    oriented_bundle = orient_by_streamline(bundle, centroid_bundle)

    # Calculate weights for each streamline/node in a bundle, based on a
    # Mahalanobis distance from the core the bundle, at that node
    w_bundle = None
    if use_weights:
        w_bundle = gaussian_weights(oriented_bundle)

    # Sample the metric along the tract. The implementation of this function
    # is based off of work by Yeatman et al. in 2012
    profile_bundle = afq_profile(metric_img,
                                 oriented_bundle,
                                 metric_affine,
                                 weights=w_bundle)

    # Reverse the profile bundle if the direction is not desired
    if flip:
        profile_bundle = np.flip(profile_bundle)

    return profile_bundle
Ejemplo n.º 14
0
def _prepare_gt_bundles_info(bundles_dir, bundles_masks_dir,
                             gt_bundles_attribs, ref_anat_fname):
    # Ref bundles will contain {'name': 'name_of_the_bundle',
    #                           'threshold': thres_value,
    #                           'streamlines': list_of_streamlines}

    dummy_attribs = {'orientation': 'LPS'}
    qb = QuickBundles(20, metric=AveragePointwiseEuclideanMetric())

    ref_bundles = []

    for bundle_idx, bundle_f in enumerate(sorted(os.listdir(bundles_dir))):
        bundle_name = os.path.splitext(os.path.basename(bundle_f))[0]

        bundle_attribs = gt_bundles_attribs.get(os.path.basename(bundle_f))
        if bundle_attribs is None:
            raise ValueError(
                "Missing basic bundle attribs for {0}".format(bundle_f))

        # Already resample to avoid doing it for each iteration of chunking
        orig_strl = [
            s for s in get_tracts_voxel_space_for_dipy(
                os.path.join(bundles_dir, bundle_f), ref_anat_fname,
                dummy_attribs)
        ]

        resamp_bundle = set_number_of_points(orig_strl, NB_POINTS_RESAMPLE)
        resamp_bundle = [s.astype('f4') for s in resamp_bundle]

        bundle_cluster_map = qb.cluster(resamp_bundle)
        bundle_cluster_map.refdata = resamp_bundle

        bundle_mask = nib.load(
            os.path.join(bundles_masks_dir, bundle_name + '.nii.gz'))

        ref_bundles.append({
            'name': bundle_name,
            'threshold': bundle_attribs['cluster_threshold'],
            'cluster_map': bundle_cluster_map,
            'mask': bundle_mask
        })

    return ref_bundles
Ejemplo n.º 15
0
def auto_extract(model_cluster_map,
                 rstreamlines,
                 number_pts_per_str=NB_POINTS_RESAMPLE,
                 close_centroids_thr=20,
                 clean_thr=7.,
                 disp=False,
                 verbose=False,
                 ordering=None):

    if ordering is None:
        ordering = np.arange(len(rstreamlines))

    qb = QuickBundles(threshold=REF_BUNDLES_THRESHOLD,
                      metric=AveragePointwiseEuclideanMetric())
    closest_bundles = qb.find_closest(model_cluster_map,
                                      rstreamlines,
                                      clean_thr,
                                      ordering=ordering)
    return ordering[np.where(closest_bundles >= 0)[0]]
Ejemplo n.º 16
0
def get_streamlines_centroid(streamlines, nb_points):
    """
    Compute centroid from streamlines using QuickBundles.

    Parameters
    ----------
    streamlines: list of ndarray
        The list of streamlines from which we compute the centroid.
    nb_points: int
        Number of points defining the centroid streamline.

    Returns
    -------
    List of length one, containing a np.ndarray of shape (nb_points, 3)
    """
    resample_feature = ResampleFeature(nb_points=nb_points)
    quick_bundle = QuickBundles(
        threshold=np.inf,
        metric=AveragePointwiseEuclideanMetric(resample_feature))
    clusters = quick_bundle.cluster(streamlines)
    centroid_streamlines = clusters.centroids

    return centroid_streamlines
Ejemplo n.º 17
0
def assignment_map(target_bundle, model_bundle, no_disks):
    """
    Calculates assignment maps of the target bundle with reference to
    model bundle centroids.

    Parameters
    ----------
    target_bundle : streamlines
        target bundle extracted from subject data in common space
    model_bundle : streamlines
        atlas bundle used as reference
    no_disks : integer, optional
        Number of disks used for dividing bundle into disks. (Default 100)

    References
    ----------
    .. [Chandio2020] Chandio, B.Q., Risacher, S.L., Pestilli, F., Bullock, D.,
    Yeh, FC., Koudoro, S., Rokem, A., Harezlak, J., and Garyfallidis, E.
    Bundle analytics, a computational framework for investigating the
    shapes and profiles of brain pathways across populations.
    Sci Rep 10, 17149 (2020)

    """

    mbundle_streamlines = set_number_of_points(model_bundle,
                                               nb_points=no_disks)

    metric = AveragePointwiseEuclideanMetric()
    qb = QuickBundles(threshold=85., metric=metric)
    clusters = qb.cluster(mbundle_streamlines)
    centroids = Streamlines(clusters.centroids)

    _, indx = cKDTree(centroids.get_data(), 1,
                      copy_data=True).query(target_bundle.get_data(), k=1)

    return indx
Ejemplo n.º 18
0
"""

from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import IdentityFeature
from dipy.segment.metric import AveragePointwiseEuclideanMetric

# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

# Make sure our streamlines have the same number of points.
from dipy.tracking.streamline import set_number_of_points
streamlines = set_number_of_points(streamlines, nb_points=12)

# Create an instance of `IdentityFeature` and tell metric to use it.
feature = IdentityFeature()
metric = AveragePointwiseEuclideanMetric(feature=feature)
qb = QuickBundles(threshold=10., metric=metric)
clusters = qb.cluster(streamlines)

print("Nb. clusters:", len(clusters))
print("Cluster sizes:", list(map(len, clusters)))
"""

::

    Nb. clusters: 4

    Cluster sizes: [64, 191, 47, 1]


.. _clustering-examples-ResampleFeature:
Ejemplo n.º 19
0
args = sys.argv
tractogram_fn = args[1]
output_dir = args[2]
subject = args[3]
tract_name = args[4]
max_num_centroids = int(args[5])
points_per_sl = int(args[6])

# Open the file and extract streamlines
streams, header = trackvis.read(tractogram_fn)
streamlines = [sl[0] for sl in streams]

# Run quickbundles with chosen parameters
feature = ResampleFeature(nb_points=points_per_sl)
metric = AveragePointwiseEuclideanMetric(feature)
qb = QuickBundles(threshold=10.,
                  max_nb_clusters=max_num_centroids,
                  metric=metric)
clusters = qb.cluster(streamlines)

# Extract the centroids
centroids = [cluster.centroid for cluster in clusters]

# If not enough generated, fill with empty streamlines
diff = max_num_centroids - len(centroids)
if diff > 0:
    print(
        "Not enough centroids generated, so generating empty streamlines for padding."
    )
    empty_sl = np.zeros((points_per_sl, 3), dtype=np.float32)
Ejemplo n.º 20
0
def qbx_and_merge(streamlines,
                  thresholds,
                  nb_pts=20,
                  select_randomly=None,
                  rng=None,
                  verbose=True):
    """ Run QuickBundlesX and then run again on the centroids of the last layer

    Running again QuickBundles at a layer has the effect of merging
    some of the clusters that maybe originally devided because of branching.
    This function help obtain a result at a QuickBundles quality but with
    QuickBundlesX speed. The merging phase has low cost because it is applied
    only on the centroids rather than the entire dataset.

    Parameters
    ----------
    streamlines : Streamlines
    thresholds : sequence
        List of distance thresholds for QuickBundlesX.
    nb_pts : int
        Number of points for discretizing each streamline
    select_randomly : int
        Randomly select a specific number of streamlines. If None all the
        streamlines are used.
    rng : RandomState
        If None then RandomState is initialized internally.
    verbose : bool
        If True print information in stdout.

    Returns
    -------
    clusters : obj
        Contains the clusters of the last layer of QuickBundlesX after merging.

    References
    ----------
    .. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
                        tractography simplification, Frontiers in Neuroscience,
                        vol 6, no 175, 2012.

    .. [Garyfallidis16] Garyfallidis E. et al. QuickBundlesX: Sequential
                        clustering of millions of streamlines in multiple
                        levels of detail at record execution time. Proceedings
                        of the, International Society of Magnetic Resonance
                        in Medicine (ISMRM). Singapore, 4187, 2016.
    """
    if verbose:
        t = time()
    len_s = len(streamlines)
    if select_randomly is None:
        select_randomly = len_s

    if rng is None:
        rng = np.random.RandomState()
    indices = rng.choice(len_s, min(select_randomly, len_s), replace=False)
    sample_streamlines = set_number_of_points(streamlines, nb_pts)

    if verbose:
        print(' Resampled to {} points'.format(nb_pts))
        print(' Size is %0.3f MB' % (nbytes(sample_streamlines), ))
        print(' Duration of resampling is %0.3f sec.' % (time() - t, ))
        print(' QBX phase starting...')

    qbx = QuickBundlesX(thresholds, metric=AveragePointwiseEuclideanMetric())

    if verbose:
        t1 = time()
    qbx_clusters = qbx.cluster(sample_streamlines, ordering=indices)

    if verbose:
        print(' Merging phase starting ...')

    qbx_merge = QuickBundlesX([thresholds[-1]],
                              metric=AveragePointwiseEuclideanMetric())

    final_level = len(thresholds)
    len_qbx_fl = len(qbx_clusters.get_clusters(final_level))
    qbx_ordering_final = rng.choice(len_qbx_fl, len_qbx_fl, replace=False)

    qbx_merged_cluster_map = qbx_merge.cluster(
        qbx_clusters.get_clusters(final_level).centroids,
        ordering=qbx_ordering_final).get_clusters(1)

    qbx_cluster_map = qbx_clusters.get_clusters(final_level)

    merged_cluster_map = ClusterMapCentroid()
    for cluster in qbx_merged_cluster_map:
        merged_cluster = ClusterCentroid(centroid=cluster.centroid)
        for i in cluster.indices:
            merged_cluster.indices.extend(qbx_cluster_map[i].indices)
        merged_cluster_map.add_cluster(merged_cluster)

    merged_cluster_map.refdata = streamlines

    if verbose:
        print(' QuickBundlesX time for %d random streamlines' %
              (select_randomly, ))

        print(' Duration %0.3f sec. \n' % (time() - t1, ))

    return merged_cluster_map
Ejemplo n.º 21
0
def bundle_analysis(model_bundle_folder,
                    bundle_folder,
                    orig_bundle_folder,
                    metric_folder,
                    group,
                    subject,
                    no_disks=100,
                    out_dir=''):
    """
    Applies statistical analysis on bundles and saves the results
    in a directory specified by ``out_dir``.

    Parameters
    ----------
    model_bundle_folder : string
        Path to the input model bundle files. This path may contain
        wildcards to process multiple inputs at once.
    bundle_folder : string
        Path to the input bundle files in common space. This path may
        contain wildcards to process multiple inputs at once.
    orig_folder : string
        Path to the input bundle files in native space. This path may
        contain wildcards to process multiple inputs at once.
    metric_folder : string
        Path to the input dti metric or/and peak files. It will be used as
        metric for statistical analysis of bundles.
    group : string
        what group subject belongs to e.g. control or patient
    subject : string
        subject id e.g. 10001
    no_disks : integer, optional
        Number of disks used for dividing bundle into disks. (Default 100)
    out_dir : string, optional
        Output directory (default input file directory)

    References
    ----------
    .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
    E. Garyfallidis, Bundle Analytics: a computational and statistical
    analyses framework for tractometric studies, Proceedings of:
    International Society of Magnetic Resonance in Medicine (ISMRM),
    Montreal, Canada, 2019.

    """

    dt = dict()

    mb = os.listdir(model_bundle_folder)
    mb.sort()
    bd = os.listdir(bundle_folder)
    bd.sort()
    org_bd = os.listdir(orig_bundle_folder)
    org_bd.sort()
    n = len(org_bd)

    for io in range(n):
        mbundles, _ = load_trk(os.path.join(model_bundle_folder, mb[io]))
        bundles, _ = load_trk(os.path.join(bundle_folder, bd[io]))
        orig_bundles, _ = load_trk(os.path.join(orig_bundle_folder,
                                                org_bd[io]))

        mbundle_streamlines = set_number_of_points(mbundles,
                                                   nb_points=no_disks)

        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=25., metric=metric)
        clusters = qb.cluster(mbundle_streamlines)
        centroids = Streamlines(clusters.centroids)

        print('Number of centroids ', len(centroids.data))
        print('Model bundle ', mb[io])
        print('Number of streamlines in bundle in common space ', len(bundles))
        print('Number of streamlines in bundle in original space ',
              len(orig_bundles))

        _, indx = cKDTree(centroids.data, 1,
                          copy_data=True).query(bundles.data, k=1)

        metric_files_names = os.listdir(metric_folder)
        _, affine = load_nifti(os.path.join(metric_folder, "fa.nii.gz"))

        affine_r = np.linalg.inv(affine)
        transformed_orig_bundles = transform_streamlines(
            orig_bundles, affine_r)

        for mn in range(0, len(metric_files_names)):

            ind = np.array(indx)
            fm = metric_files_names[mn][:2]
            bm = mb[io][:-4]
            dt = dict()
            metric_name = os.path.join(metric_folder, metric_files_names[mn])

            if metric_files_names[mn][2:] == '.nii.gz':
                metric, _ = load_nifti(metric_name)

                dti_measures(transformed_orig_bundles, metric, dt, fm, bm,
                             subject, group, ind, out_dir)

            else:
                fm = metric_files_names[mn][:3]
                metric = load_peaks(metric_name)
                peak_values(bundles, metric, dt, fm, bm, subject, group, ind,
                            out_dir)
Ejemplo n.º 22
0
**Note:** Inputs must be sequences of same length.
"""

from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import AveragePointwiseEuclideanMetric

# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

# Make sure our streamlines have the same number of points.
from dipy.tracking.streamline import set_number_of_points
streamlines = set_number_of_points(streamlines, nb_points=12)

# Create the instance of `AveragePointwiseEuclideanMetric` to use.
metric = AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=10., metric=metric)
clusters = qb.cluster(streamlines)

print("Nb. clusters:", len(clusters))
print("Cluster sizes:", map(len, clusters))

"""

::

    Nb. clusters: 4

    Cluster sizes: [64, 191, 44, 1]

.. _clustering-examples-SumPointwiseEuclideanMetric:
Ejemplo n.º 23
0
def auto_extract_VCs(streamlines, ref_bundles):
    # Streamlines = list of all streamlines

    VC = 0
    VC_idx = set()

    found_vbs_info = {}
    for bundle in ref_bundles:
        found_vbs_info[bundle['name']] = {
            'nb_streamlines': 0,
            'streamlines_indices': set()
        }

    # Need to bookkeep because we chunk for big datasets
    processed_strl_count = 0
    chunk_size = 5000
    chunk_it = 0

    nb_bundles = len(ref_bundles)
    bundles_found = [False] * nb_bundles

    logging.debug("Starting scoring VCs")

    qb = QuickBundles(threshold=20, metric=AveragePointwiseEuclideanMetric())

    # Start loop here for big datasets
    while processed_strl_count < len(streamlines):
        logging.debug("Starting chunk: {0}".format(chunk_it))

        strl_chunk = streamlines[chunk_it * chunk_size:(chunk_it + 1) *
                                 chunk_size]

        processed_strl_count += len(strl_chunk)
        cur_chunk_VC_idx, cur_chunk_IC_idx, cur_chunk_VCWP_idx = set(), set(
        ), set()

        # Already resample and run quickbundles on the submission chunk,
        # to avoid doing it at every call of auto_extract
        rstreamlines = set_number_of_points(strl_chunk, NB_POINTS_RESAMPLE)

        # qb.cluster had problem with f8
        rstreamlines = [s.astype('f4') for s in rstreamlines]

        chunk_cluster_map = qb.cluster(rstreamlines)
        chunk_cluster_map.refdata = strl_chunk

        logging.debug("Starting VC identification through auto_extract")

        for bundle_idx, ref_bundle in enumerate(ref_bundles):
            # The selected indices are from [0, len(strl_chunk)]
            selected_streamlines_indices = auto_extract(
                ref_bundle['cluster_map'],
                chunk_cluster_map,
                clean_thr=ref_bundle['threshold'])

            # Remove duplicates, when streamlines are assigned to multiple VBs.
            selected_streamlines_indices = set(selected_streamlines_indices) - \
                                           cur_chunk_VC_idx
            cur_chunk_VC_idx |= selected_streamlines_indices

            nb_selected_streamlines = len(selected_streamlines_indices)

            if nb_selected_streamlines:
                bundles_found[bundle_idx] = True
                VC += nb_selected_streamlines

                # Shift indices to match the real number of streamlines
                global_select_strl_indices = set([
                    v + chunk_it * chunk_size
                    for v in selected_streamlines_indices
                ])
                vb_info = found_vbs_info.get(ref_bundle['name'])
                vb_info['nb_streamlines'] += nb_selected_streamlines
                vb_info['streamlines_indices'] |= global_select_strl_indices

                VC_idx |= global_select_strl_indices
            else:
                global_select_strl_indices = set()

        chunk_it += 1

    # Compute bundle overlap, overreach and f1_scores and update found_vbs_info
    for bundle_idx, ref_bundle in enumerate(ref_bundles):
        bundle_name = ref_bundle["name"]
        bundle_mask = ref_bundle["mask"]

        vb_info = found_vbs_info[bundle_name]

        # Streamlines are in voxel space since that's how they were
        # loaded in the scoring function.
        tractogram = Tractogram(
            streamlines=(streamlines[i]
                         for i in vb_info['streamlines_indices']),
            affine_to_rasmm=bundle_mask.affine)

        scores = {}
        if len(tractogram) > 0:
            scores = compute_bundle_coverage_scores(tractogram, bundle_mask)

        vb_info['overlap'] = scores.get("OL", 0)
        vb_info['overreach'] = scores.get("OR", 0)
        vb_info['overreach_norm'] = scores.get("ORn", 0)
        vb_info['f1_score'] = scores.get("F1", 0)

    return VC_idx, found_vbs_info, bundles_found
**Note:** Inputs must be sequences of same length.
"""

from dipy.viz import fvtk
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import AveragePointwiseEuclideanMetric

# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

# Make sure our streamlines have the same number of points.
from dipy.tracking.streamline import set_number_of_points
streamlines = set_number_of_points(streamlines, nb_points=12)

# Create the instance of `AveragePointwiseEuclideanMetric` to use.
metric = AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=10., metric=metric)
clusters = qb.cluster(streamlines)

print("Nb. clusters:", len(clusters))
print("Cluster sizes:", map(len, clusters))

"""

::

    Nb. clusters: 4

    Cluster sizes: [64, 191, 44, 1]

.. _clustering-examples-SumPointwiseEuclideanMetric:
Ejemplo n.º 25
0
def agreement(model_path,
              dwi_path_1,
              trk_path_1,
              dwi_path_2,
              trk_path_2,
              wm_path,
              fixel_cnt_path,
              cluster_thresh,
              centroid_size,
              fixel_thresh,
              bundle_min_cnt,
              gpu_queue=None):

    try:
        gpu_idx = maybe_get_a_gpu() if gpu_queue is None else gpu_queue.get()
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_idx
    except Exception as e:
        print(str(e))

    temperature = np.round(float(re.findall("T=(.*)\.h5", model_path)[0]), 6)
    model = load_model(model_path)

    print("Load data ...")

    dwi_img_1 = nib.load(dwi_path_1)
    dwi_img_1 = nib.funcs.as_closest_canonical(dwi_img_1)
    affine_1 = dwi_img_1.affine
    dwi_1 = dwi_img_1.get_data()

    dwi_img_2 = nib.load(dwi_path_2)
    dwi_img_2 = nib.funcs.as_closest_canonical(dwi_img_2)
    affine_2 = dwi_img_2.affine
    dwi_2 = dwi_img_2.get_data()

    wm_img = nib.load(wm_path)
    wm_data = wm_img.get_data()
    n_wm = (wm_data > 0).sum()

    fixel_cnt = nib.load(fixel_cnt_path).get_data()[:, :, :, 0]
    fixel_cnt = fixel_cnt[wm_data > 0]

    k_fixels = np.unique(fixel_cnt)
    max_fixels = k_fixels.max()
    n_fixels_gt = np.sum(k * (fixel_cnt == k).sum() for k in k_fixels)

    img_shape = dwi_1.shape[:-1]

    #---------------------------------------------------------------------------

    tractogram_1 = maybe_add_tangent(trk_path_1)
    tractogram_2 = maybe_add_tangent(trk_path_2)

    streamlines_1 = tractogram_1.streamlines
    streamlines_2 = tractogram_2.streamlines

    n_streamlines_1 = len(streamlines_1)
    n_streamlines_2 = len(streamlines_2)

    tractogram_1.extend(tractogram_2)

    ############################################################################

    print("Clustering streamlines.")

    feature = ResampleFeature(nb_points=centroid_size)

    qb = QuickBundles(threshold=cluster_thresh,
                      metric=AveragePointwiseEuclideanMetric(feature))

    bundles = qb.cluster(streamlines_1)
    bundles.refdata = tractogram_1

    n_bundles = len(bundles)

    print("Found {} bundles.".format(n_bundles))

    print("Computing bundle masks...")

    direction_masks_1 = np.zeros((n_bundles, ) + img_shape + (3, ), np.float16)
    direction_masks_2 = np.zeros((n_bundles, ) + img_shape + (3, ), np.float16)
    count_masks_1 = np.zeros((n_bundles, ) + img_shape, np.uint16)
    count_masks_2 = np.zeros((n_bundles, ) + img_shape, np.uint16)

    marginal_bundles = 0
    for i, b in enumerate(bundles.clusters):

        is_from_1 = np.argwhere(
            np.array(b.indices) < n_streamlines_1).squeeze().tolist()
        is_from_2 = np.argwhere(
            np.array(b.indices) >= n_streamlines_1).squeeze().tolist()

        if (np.sum(is_from_1) > bundle_min_cnt
                and np.sum(is_from_2) > bundle_min_cnt):

            bundle_map(b[is_from_1],
                       affine_1,
                       img_shape,
                       dir_out=direction_masks_1[i],
                       cnt_out=count_masks_1[i])

            bundle_map(b[is_from_2],
                       affine_2,
                       img_shape,
                       dir_out=direction_masks_2[i],
                       cnt_out=count_masks_2[i])
        else:
            marginal_bundles += 1

        assert direction_masks_1.dtype.name == "float16"
        assert direction_masks_2.dtype.name == "float16"
        assert count_masks_1.dtype.name == "uint16"
        assert count_masks_2.dtype.name == "uint16"

        print("Computed bundle {:3d}.".format(i), end="\r")

        #gc.collect()

    overlap = ((count_masks_1 > 0) * (count_masks_2 > 0) *
               np.expand_dims(wm_data > 0, 0))

    print("Calculating Fixels...")

    fixel_directions_1 = []
    fixel_directions_2 = []
    fixel_cnts_1 = []
    fixel_cnts_2 = []
    fixel_ijk = []
    n_fixels = []
    no_overlap = 0
    for vox in np.argwhere(wm_data > 0):

        matched = overlap[:, vox[0], vox[1], vox[2]] > 0

        if matched.sum() > 0:

            dir_1 = direction_masks_1[matched, vox[0], vox[1], vox[2], :]
            cnts_1 = count_masks_1[matched, vox[0], vox[1], vox[2]]

            dir_2 = direction_masks_2[matched, vox[0], vox[1], vox[2], :]
            cnts_2 = count_masks_2[matched, vox[0], vox[1], vox[2]]

            fixels1, fixels2, f_cnts_1, f_cnts_2 = cluster_fixels(
                dir_1,
                dir_2,
                cnts_1,
                cnts_2,
                threshold=np.cos(np.pi / fixel_thresh))

            n_f = len(fixels1)

            fixel_directions_1.append(fixels1)
            fixel_directions_2.append(fixels2)

            fixel_cnts_1.append(f_cnts_1)
            fixel_cnts_2.append(f_cnts_2)

            fixel_ijk.append(np.tile(vox, (n_f, 1)))

            n_fixels.append(n_f)
        else:
            no_overlap += 1

    fixel_directions_1 = np.vstack(fixel_directions_1)
    fixel_directions_2 = np.vstack(fixel_directions_2)
    fixel_cnts_1 = np.vstack(fixel_cnts_1).reshape(-1)
    fixel_cnts_2 = np.vstack(fixel_cnts_2).reshape(-1)
    fixel_ijk = np.vstack(fixel_ijk)

    #gc.collect()

    ############################################################################

    print("Computing agreement ...")

    n_fixels_sum = np.sum(n_fixels)

    block_size = get_blocksize(model, dwi_1.shape[-1])

    d_1 = np.zeros(
        [n_fixels_sum, block_size, block_size, block_size, dwi_1.shape[-1]])
    d_2 = np.zeros(
        [n_fixels_sum, block_size, block_size, block_size, dwi_1.shape[-1]])
    i, j, k = fixel_ijk.T
    for idx in range(block_size**3):
        ii, jj, kk = np.unravel_index(idx,
                                      (block_size, block_size, block_size))
        d_1[:, ii, jj, kk, :] = dwi_1[i + ii - 1, j + jj - 1, k + kk - 1, :]
        d_2[:, ii, jj, kk, :] = dwi_2[i + ii - 1, j + jj - 1, k + kk - 1, :]

    d_1 = d_1.reshape(-1, dwi_1.shape[-1] * block_size**3)
    d_2 = d_2.reshape(-1, dwi_2.shape[-1] * block_size**3)

    dnorm_1 = np.linalg.norm(d_1, axis=1, keepdims=True) + 10**-2
    dnorm_2 = np.linalg.norm(d_2, axis=1, keepdims=True) + 10**-2

    d_1 /= dnorm_1
    d_2 /= dnorm_2

    model_inputs_1 = np.hstack([fixel_directions_1, d_1, dnorm_1])
    model_inputs_2 = np.hstack([fixel_directions_2, d_2, dnorm_2])

    fixel_agreements, fixel_kappa_1, fixel_kappa_2, fixel_mu_1, fixel_mu_2 = \
    agreement_for(
        model,
        model_inputs_1,
        model_inputs_2,
        fixel_cnts_1,
        fixel_cnts_2
    )

    agreement = {"temperature": temperature}
    agreement["model_path"] = model_path
    agreement["n_bundles"] = n_bundles
    agreement["value"] = fixel_agreements.sum() / n_fixels_gt
    agreement["min"] = fixel_agreements.min()
    agreement["mean"] = fixel_agreements.mean()
    agreement["max"] = fixel_agreements.max()
    agreement["std"] = fixel_agreements.std()
    agreement["n_fixels_sum"] = n_fixels_sum
    agreement["n_wm"] = n_wm
    agreement["n_fixels_gt"] = n_fixels_gt
    agreement["marginal_bundles"] = marginal_bundles
    agreement["no_overlap"] = no_overlap
    agreement["dwi_1"] = dwi_path_1
    agreement["trk_1"] = trk_path_1
    agreement["dwi_2"] = dwi_path_2
    agreement["trk_2"] = trk_path_2
    agreement["fixel_cnt_path"] = fixel_cnt_path
    agreement["cluster_thresh"] = cluster_thresh
    agreement["centroid_size"] = centroid_size
    agreement["fixel_thresh"] = fixel_thresh
    agreement["bundle_min_cnt"] = bundle_min_cnt
    agreement["wm_path"] = wm_path
    agreement["ideal"] = ideal_agreement(temperature)

    for k, cnt in zip(*np.unique(n_fixels, return_counts=True)):
        agreement["n_vox_with_{}_fixels".format(k)] = cnt

    for i in [1, 5, 10]:
        agreement["le_{}_fibers_per_fixel_1".format(i)] = np.mean(
            fixel_cnts_1 < i)

    agreement["mean_fibers_per_fixel_1"] = np.mean(fixel_cnts_1)
    agreement["median_fibers_per_fixel_1"] = np.median(fixel_cnts_1)
    agreement["mean_fibers_per_fixel_2"] = np.mean(fixel_cnts_2)
    agreement["median_fibers_per_fixel_2"] = np.median(fixel_cnts_2)
    agreement["std_fibers_per_fixel"] = np.std(fixel_cnts_1)
    agreement["max_fibers_per_fixel"] = np.max(fixel_cnts_1)
    agreement["min_fibers_per_fixel"] = np.min(fixel_cnts_1)

    fixel_angles = (fixel_directions_1 * fixel_directions_2).sum(axis=1)
    agreement["mean_fixel_angle"] = fixel_angles.mean()
    agreement["median_fixel_angle"] = np.median(fixel_angles)
    agreement["std_fixel_angle"] = fixel_angles.std()
    agreement["negative_fixel_angles"] = (fixel_angles < 0).mean()

    save(agreement, "agreement_T={}.yml".format(temperature),
         os.path.dirname(model_path))

    np.savez(
        os.path.join(os.path.dirname(model_path),
                     "data_T={}".format(temperature)),
        fixel_cnts_1=fixel_cnts_1,
        fixel_cnts_2=fixel_cnts_2,
        fixel_mu_1=fixel_mu_1,
        fixel_mu_2=fixel_mu_2,
        fixel_kappa_1=fixel_kappa_1,
        fixel_kappa_2=fixel_kappa_2,
        fixel_directions_1=fixel_directions_1,
        fixel_directions_2=fixel_directions_2,
        fixel_agreements=fixel_agreements,
    )

    K.clear_session()
    if gpu_queue is not None:
        gpu_queue.put(gpu_idx)
Ejemplo n.º 26
0
def evaluate_along_streamlines(scalar_img, streamlines, beginnings, nr_points, dilate=0, predicted_peaks=None,
                               affine=None):
    # Runtime:
    # - default:                2.7s (test),    56s (all),      10s (test 4 bundles, 100 points)
    # - map_coordinate order 1: 1.9s (test),    26s (all),       6s (test 4 bundles, 100 points)
    # - map_coordinate order 3: 2.2s (test),    33s (all),
    # - values_from_volume:     2.5s (test),    43s (all),
    # - AFQ:                      ?s (test),     ?s (all),      85s  (test 4 bundles, 100 points)
    # => AFQ a lot slower than others

    for i in range(dilate):
        beginnings = binary_dilation(beginnings)
    beginnings = beginnings.astype(np.uint8)

    # THIS IS ONLY NEEDED FOR "MANUAL":
    # Move from convention "0mm is in voxel center" to convention "0mm is in voxel corner". This makes it easier
    # to calculate the voxel a streamline point is located in.
    # (dipy is working with the convention "0mm is in voxel center" therefore this is not needed there)
    # print("INFO: Adding 0.5 to streamlines")
    # streamlines = fiber_utils.add_to_each_streamline(streamlines, 0.5)

    streamlines = _orient_to_same_start_region(streamlines, beginnings)

    ### Sampling ###

    #################################### Sampling "MANUAL" ############################
    # values = []
    # for i in range(nr_points):
    #     values.append([])
    #
    # for idx, sl in enumerate(streamlines):
    #     for jdx in range(sl.shape[0]):   #iterate over nr_points
    #         point = sl[jdx]
    #         if predicted_peaks is not None:
    #             scalar_value = _get_length_best_orig_peak(predicted_peaks, scalar_img,
    #                                                      int(point[0]), int(point[1]), int(point[2]))
    #         else:
    #             scalar_value = scalar_img[int(point[0]), int(point[1]), int(point[2])]
    #         values[jdx].append(scalar_value)
    ###################################################################################

    #################################### Sampling map_coordinates #####################
    values = map_coordinates(scalar_img, np.array(streamlines).T, order=1)
    ###################################################################################

    #################################### Sampling values_from_volume ##################
    # streamlines = list(transform_streamlines(streamlines, affine))  # this has to be here; not remove previous one
    # values = np.array(values_from_volume(scalar_img, streamlines, affine=affine)).T
    ###################################################################################


    ### Aggregation ###

    #################################### Aggregating by MEAN ##########################
    # values_mean = np.array(values).mean(axis=1)
    # values_std = np.array(values).std(axis=1)
    # return values_mean, values_std
    ###################################################################################

    #################################### Aggregating by cKDTree #######################
    metric = AveragePointwiseEuclideanMetric()
    qb = QuickBundles(threshold=100., metric=metric)
    clusters = qb.cluster(streamlines)
    centroids = Streamlines(clusters.centroids)
    if len(centroids) > 1:
        print("WARNING: number clusters > 1 ({})".format(len(centroids)))
    _, segment_idxs = cKDTree(centroids.data, 1, copy_data=True).query(streamlines, k=1)  # (2000, 20)

    values_t = np.array(values).T  # (2000, 20)

    # If we want to take weighted mean like in AFQ:
    # weights = dsa.gaussian_weights(Streamlines(streamlines))
    # values_t = weights * values_t
    # return np.sum(values_t, 0), None

    results_dict = defaultdict(list)
    for idx, sl in enumerate(values_t):
        for jdx, seg in enumerate(sl):
            results_dict[segment_idxs[idx, jdx]].append(seg)

    if len(results_dict.keys()) < nr_points:
        print("WARNING: found less than required points. Filling up with centroid values.")
        centroid_values = map_coordinates(scalar_img, np.array([centroids[0]]).T, order=1)
        for i in range(nr_points):
            if len(results_dict[i]) == 0:
                results_dict[i].append(np.array(centroid_values).T[0, i])

    results_mean = []
    results_std = []
    for key in sorted(results_dict.keys()):
        value = results_dict[key]
        if len(value) > 0:
            results_mean.append(np.array(value).mean())
            results_std.append(np.array(value).std())
        else:
            print("WARNING: empty segment")
            results_mean.append(0)
            results_std.append(0)

    return results_mean, results_std
Ejemplo n.º 27
0
def plot_bundles_with_metric(bundle_path,
                             endings_path,
                             brain_mask_path,
                             bundle,
                             metrics,
                             output_path,
                             tracking_format="trk_legacy",
                             show_color_bar=True):
    import seaborn as sns  # import in function to avoid error if not installed (this is only needed in this function)
    from dipy.viz import actor, window
    from tractseg.libs import vtk_utils

    def _add_extra_point_to_last_streamline(sl):
        # Coloring broken as soon as all streamlines have same number of points -> why???
        # Add one number to last streamline to make it have a different number
        sl[-1] = np.append(sl[-1], [sl[-1][-1]], axis=0)
        return sl

    # Settings
    NR_SEGMENTS = 100
    ANTI_INTERPOL_MULT = 1  # increase number of points to avoid interpolation to blur the colors
    algorithm = "distance_map"  # equal_dist | distance_map | cutting_plane
    # colors = np.array(sns.color_palette("coolwarm", NR_SEGMENTS))  # colormap blue to red (does not fit to colorbar)
    colors = np.array(sns.light_palette(
        "red", NR_SEGMENTS))  # colormap only red, which fits to color_bar
    img_size = (1000, 1000)

    # Tractometry skips first and last element. Therefore we only have 98 instead of 100 elements.
    # Here we duplicate the first and last element to get back to 100 elements
    metrics = list(metrics)
    metrics = np.array([metrics[0]] + metrics + [metrics[-1]])

    metrics_max = metrics.max()
    metrics_min = metrics.min()
    if metrics_max == metrics_min:
        metrics = np.zeros(len(metrics))
    else:
        metrics = img_utils.scale_to_range(
            metrics,
            range=(0, 99))  # range needs to be same as segments in colormap

    orientation = dataset_specific_utils.get_optimal_orientation_for_bundle(
        bundle)

    # Load mask
    beginnings_img = nib.load(endings_path)
    beginnings = beginnings_img.get_data()
    for i in range(1):
        beginnings = binary_dilation(beginnings)

    # Load trackings
    if tracking_format == "trk_legacy":
        streams, hdr = trackvis.read(bundle_path)
        streamlines = [s[0] for s in streams]
    else:
        sl_file = nib.streamlines.load(bundle_path)
        streamlines = sl_file.streamlines

    # Reduce streamline count
    streamlines = streamlines[::2]

    # Reorder to make all streamlines have same start region
    streamlines = fiber_utils.add_to_each_streamline(streamlines, 0.5)
    streamlines_new = []
    for idx, sl in enumerate(streamlines):
        startpoint = sl[0]
        # Flip streamline if not in right order
        if beginnings[int(startpoint[0]),
                      int(startpoint[1]),
                      int(startpoint[2])] == 0:
            sl = sl[::-1, :]
        streamlines_new.append(sl)
    streamlines = fiber_utils.add_to_each_streamline(streamlines_new, -0.5)

    if algorithm == "distance_map" or algorithm == "equal_dist":
        streamlines = fiber_utils.resample_fibers(
            streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT)
    elif algorithm == "cutting_plane":
        streamlines = fiber_utils.resample_to_same_distance(
            streamlines,
            max_nr_points=NR_SEGMENTS,
            ANTI_INTERPOL_MULT=ANTI_INTERPOL_MULT)

    # Cut start and end by percentage
    # streamlines = FiberUtils.resample_fibers(streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT)
    # remove = int((NR_SEGMENTS * ANTI_INTERPOL_MULT) * 0.15)  # remove X% in beginning and end
    # streamlines = np.array(streamlines)[:, remove:-remove, :]
    # streamlines = list(streamlines)

    if algorithm == "equal_dist":
        segment_idxs = []
        for i in range(len(streamlines)):
            segment_idxs.append(list(range(NR_SEGMENTS * ANTI_INTERPOL_MULT)))
        segment_idxs = np.array(segment_idxs)

    elif algorithm == "distance_map":
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines)
        centroids = Streamlines(clusters.centroids)
        _, segment_idxs = cKDTree(centroids.data, 1,
                                  copy_data=True).query(streamlines, k=1)

    elif algorithm == "cutting_plane":
        streamlines_resamp = fiber_utils.resample_fibers(
            streamlines, NR_SEGMENTS * ANTI_INTERPOL_MULT)
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines_resamp)
        centroid = Streamlines(clusters.centroids)[0]
        # index of the middle cluster
        middle_idx = int(NR_SEGMENTS / 2) * ANTI_INTERPOL_MULT
        middle_point = centroid[middle_idx]
        segment_idxs = fiber_utils.get_idxs_of_closest_points(
            streamlines, middle_point)
        # Align along the middle and assign indices
        segment_idxs_eqlen = []
        for idx, sl in enumerate(streamlines):
            sl_middle_pos = segment_idxs[idx]
            before_elems = sl_middle_pos
            after_elems = len(sl) - sl_middle_pos
            base_idx = 1000  # use higher index to avoid negative numbers for area below middle
            r = range((base_idx - before_elems), (base_idx + after_elems))
            segment_idxs_eqlen.append(r)
        segment_idxs = segment_idxs_eqlen

    # Add extra point otherwise coloring BUG
    streamlines = _add_extra_point_to_last_streamline(streamlines)

    renderer = window.Renderer()
    colors_all = []  # final shape will be [nr_streamlines, nr_points, 3]
    for jdx, sl in enumerate(streamlines):
        colors_sl = []
        for idx, p in enumerate(sl):
            if idx >= len(segment_idxs[jdx]):
                seg_idx = segment_idxs[jdx][idx - 1]
            else:
                seg_idx = segment_idxs[jdx][idx]

            m = metrics[int(seg_idx / ANTI_INTERPOL_MULT)]
            color = colors[int(m)]
            colors_sl.append(color)
        colors_all.append(
            colors_sl
        )  # this can not be converted to numpy array because last element has one more elem

    sl_actor = actor.streamtube(streamlines,
                                colors=colors_all,
                                linewidth=0.2,
                                opacity=1)
    renderer.add(sl_actor)

    # plot brain mask
    mask = nib.load(brain_mask_path).get_data()
    cont_actor = vtk_utils.contour_from_roi_smooth(
        mask,
        affine=beginnings_img.affine,
        color=[.9, .9, .9],
        opacity=.2,
        smoothing=50)
    renderer.add(cont_actor)

    if show_color_bar:
        lut_cmap = actor.colormap_lookup_table(scale_range=(metrics_min,
                                                            metrics_max),
                                               hue_range=(0.0, 0.0),
                                               saturation_range=(0.0, 1.0))
        renderer.add(actor.scalar_bar(lut_cmap))

    if orientation == "sagittal":
        renderer.set_camera(position=(-412.95, -34.38, 80.15),
                            focal_point=(102.46, -16.96, -11.71),
                            view_up=(0.1806, 0.0, 0.9835))
    elif orientation == "coronal":
        renderer.set_camera(position=(-48.63, 360.31, 98.37),
                            focal_point=(-20.16, 92.89, 36.02),
                            view_up=(-0.0047, -0.2275, 0.9737))
    elif orientation == "axial":
        pass
    else:
        raise ValueError("Invalid orientation provided")

    # Use this to interatively get new camera angle
    # window.show(renderer, size=img_size, reset_camera=False)
    # print(renderer.get_camera())

    window.record(renderer, out_path=output_path, size=img_size)
Ejemplo n.º 28
0
**Note:** Inputs must be sequences of same length.
"""

from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import AveragePointwiseEuclideanMetric

# Get some streamlines.
streamlines = get_streamlines()  # Previously defined.

# Make sure our streamlines have the same number of points.
from dipy.tracking.streamline import set_number_of_points
streamlines = set_number_of_points(streamlines, nb_points=12)

# Create the instance of `AveragePointwiseEuclideanMetric` to use.
metric = AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=10., metric=metric)
clusters = qb.cluster(streamlines)

print("Nb. clusters:", len(clusters))
print("Cluster sizes:", map(len, clusters))
"""

::

    Nb. clusters: 4

    Cluster sizes: [64, 191, 44, 1]

.. _clustering-examples-SumPointwiseEuclideanMetric:
Ejemplo n.º 29
0
def outliers_removal_using_hierarchical_quickbundles(streamlines,
                                                     nb_points=12,
                                                     min_threshold=0.5,
                                                     nb_samplings_max=30,
                                                     sampling_seed=1234,
                                                     fast_approx=False):
    """
    Classify inliers and outliers from a list of streamlines.
    Parameters
    ----------
    streamlines: list of ndarray
        The list of streamlines from which inliers and outliers are separated.
    min_threshold: float
        Quickbundles distance threshold for the last threshold.
    nb_samplings_max: int
        Number of run executed to explore the search space.
        A different sampling is used each time.
    sampling_seed: int
        Random number generation initialization seed.
    Returns
    -------
    ndarray: Float value representing the 0-1 score for each streamline
    """
    if nb_samplings_max < 2:
        raise ValueError("'nb_samplings_max' must be >= 2")

    rng = np.random.RandomState(sampling_seed)
    resample_feature = ResampleFeature(nb_points=nb_points)
    metric = AveragePointwiseEuclideanMetric(resample_feature)

    box_min, box_max = get_streamlines_bounding_box(streamlines)

    # Half of the bounding box's halved diagonal length.
    initial_threshold = np.min(np.abs(box_max - box_min)) / 2.

    # Quickbundle's threshold is halved between hierarchical level.
    if fast_approx:
        thresholds = np.array([2 / 1.2**i for i in range(25)][1:])
        thresholds = np.concatenate(([40, 20, 10, 5, 2.5],
                                     thresholds[thresholds > min_threshold]))
    else:
        thresholds = takewhile(lambda t: t >= min_threshold,
                               (initial_threshold / 1.2**i for i in count()))
        thresholds = list(thresholds)

    ordering = np.arange(len(streamlines))
    path_lengths_per_streamline = 0

    streamlines_path = np.ones((len(streamlines), len(thresholds),
                                nb_samplings_max), dtype=int) * -1

    for i in range(nb_samplings_max):
        rng.shuffle(ordering)

        cluster_orderings = [ordering]
        for j, threshold in enumerate(thresholds):
            id_cluster = 0

            next_cluster_orderings = []
            qb = QuickBundles(metric=metric, threshold=threshold)
            for cluster_ordering in cluster_orderings:
                clusters = qb.cluster(streamlines, ordering=cluster_ordering)

                for _, cluster in enumerate(clusters):
                    streamlines_path[cluster.indices, j, i] = id_cluster
                    id_cluster += 1
                    if len(cluster) > 10:
                        next_cluster_orderings.append(cluster.indices)

            cluster_orderings = next_cluster_orderings

        if i <= 1:  # Needs at least two orderings to compute stderror.
            continue

        path_lengths_per_streamline = np.sum((streamlines_path != -1),
                                             axis=1)[:, :i]

    summary = np.mean(path_lengths_per_streamline,
                      axis=1) / np.max(path_lengths_per_streamline)
    return summary
Ejemplo n.º 30
0
# random.seed(123)
# oldidx = random.sample(oldidx,10)
# old = [l[o] for o in oldidx]
# age = [young,old]

# %%
#set path
mypath = '/Users/alex/code/Wenlin/data'
outpath = '/Users/alex/code/Wenlin/Tracts_Registration/results'

# %%
#set parameter
num_points1 = 50
distance1 = 1
feature1 = ResampleFeature(nb_points=num_points1)
metric1 = AveragePointwiseEuclideanMetric(feature=feature1)

#group cluster parameter
num_points2 = 50
distance2 = 2
feature2 = ResampleFeature(nb_points=num_points2)
metric2 = AveragePointwiseEuclideanMetric(feature=feature2)

# %%
#load the control animal
streams_control, hdr_control = load_trk(
    mypath + '/wenlin_results/N54900_bmCSA_detr_small.trk')
labels_control, affine_labels_control = load_nifti(
    mypath + '/wenlin_data/labels/fa_labels_warp_N54900_RAS.nii.gz')
fa_control, affine_fa_control = load_nifti(
    '/Users/alex/code/Wenlin/data/wenlin_results/bmfaN54900.nii.gz')
Ejemplo n.º 31
0
def remove_similar_streamlines(streamlines, threshold=5, do_avg=False):
    """ Remove similar streamlines, shuffling streamlines will impact the 
    results.
    Only provide a small set of streamlines (below 2000 if possible).

    Parameters
    ----------
    streamlines : list of numpy.ndarray
        Input streamlines to remove duplicates from.
    threshold : float
        Distance threshold to consider two streamlines similar, in mm.
    do_avg : bool
        Instead of removing similar streamlines, average all similar streamlines
        as a single smoother streamline.

    Returns
    -------
    streamlines : list of numpy.ndarray
    """
    if len(streamlines) == 1:
        return streamlines

    sample_20_streamlines = set_number_of_points(streamlines, 20)
    distance_matrix = distance_matrix_mdf(sample_20_streamlines,
                                          sample_20_streamlines)

    current_indice = 0
    avg_streamlines = []
    while True:
        sim_indices = np.where(distance_matrix[current_indice] < threshold)[0]

        pop_count = 0
        if do_avg:
            avg_streamline_list = []

        # Every streamlines similar to yourself (excluding yourself)
        # should be deleted from the set of desired streamlines
        for ind in sim_indices:
            if not current_indice == ind:
                streamlines.pop(ind-pop_count)

                distance_matrix = np.delete(distance_matrix, ind-pop_count,
                                            axis=0)
                distance_matrix = np.delete(distance_matrix, ind-pop_count,
                                            axis=1)
                pop_count += 1

            if do_avg:
                kicked_out = sample_20_streamlines[ind]
                avg_streamline_list.append(kicked_out)

        if do_avg:
            if len(avg_streamline_list) > 1:
                metric = AveragePointwiseEuclideanMetric()
                qb = QuickBundles(threshold=100, metric=metric)
                clusters = qb.cluster(avg_streamline_list)
                avg_streamlines.append(clusters.centroids[0])
            else:
                avg_streamlines.append(avg_streamline_list[0])

        current_indice += 1
        # Once you reach the end of the remaining streamlines
        if current_indice >= len(distance_matrix):
            break

    if do_avg:
        return avg_streamlines
    else:
        return streamlines