示例#1
0
def test_3D_segments():
    points = np.array([[[1, 0, 0],
                        [1, 1, 0]],
                       [[3, 1, 0],
                        [3, 0, 0]],
                       [[2, 0, 0],
                        [2, 1, 0]],
                       [[5, 1, 0],
                        [5, 0, 0]],
                       [[5.5, 0, 0],
                        [5.5, 1, 0]]], dtype="f4")

    thresholds = [4, 2, 1]

    feature = ResampleFeature(nb_points=20)
    metric = AveragePointwiseEuclideanMetric(feature)
    qbx = QuickBundlesX(thresholds, metric=metric)
    tree = qbx.cluster(points)
    clusters_0 = tree.get_clusters(0)
    clusters_1 = tree.get_clusters(1)
    clusters_2 = tree.get_clusters(2)

    assert_equal(len(clusters_0.centroids), len(clusters_1.centroids))
    assert_equal(len(clusters_2.centroids) > len(clusters_1.centroids), True)

    assert_array_equal(clusters_2[1].indices, np.array([3, 4], dtype=np.int32))
示例#2
0
def gen_sagittal_views(show=False):
    from dipy.viz import window
    from dipy.viz.clustering import show_clusters
    streamlines = get_data()

    thresholds = [40, 30, 25]#, 20, 15]
    qbx_class = QuickBundlesX(thresholds)
    print "Clustering {} streamlines ({})...".format(len(streamlines), thresholds)
    qbx = qbx_class.cluster(streamlines)

    clusters = qbx.get_clusters(len(thresholds))
    clusters.refdata = streamlines

    print "Displaying {} clusters...".format(len(clusters))

    tree = qbx.get_tree_cluster_map()
    tree.refdata = streamlines
    color_tree(tree)

    # #TMP
    # clusters = tree.get_clusters(len(thresholds))
    # clusters.refdata = streamlines
    # ren = show_clusters(clusters, show=True)
    # #window.snapshot(ren, fname="sagittal_{}".format(thresholds[-1]), size=(1200, 1200))
    # return

    for level in range(1, len(thresholds) + 1):
        print level, thresholds[level-1]
        clusters = tree.get_clusters(level)
        clusters.refdata = streamlines
        ren = show_clusters(clusters, show=show)
        ren.reset_camera_tight()
        window.snapshot(ren, fname="sagittal_{}".format(thresholds[level-1]), size=(1200, 1200))
示例#3
0
def test_circle_parallel_fornix():
    
    circle = streamlines_in_circle(100, step_size=2)
    
    parallel = streamlines_parallel(100)

    thresholds = [1, 0.1]

    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(circle)
    
    clusters = tree.get_clusters(0)    
    assert_equal(len(clusters), 1)
    
    clusters = tree.get_clusters(1)
    assert_equal(len(clusters), 3)
    
    clusters = tree.get_clusters(2)
    assert_equal(len(clusters), 34)
        
    thresholds = [.5]
    
    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(parallel)
    
    clusters = tree.get_clusters(0)    
    assert_equal(len(clusters), 1)
    
    clusters = tree.get_clusters(1)
    assert_equal(len(clusters), 100)
示例#4
0
def test_3D_segments():
    points = np.array([[[1, 0, 0],
                        [1, 1, 0]],
                       [[3, 1, 0],
                        [3, 0, 0]],
                       [[2, 0, 0],
                        [2, 1, 0]],
                       [[5, 1, 0],
                        [5, 0, 0]],
                       [[5.5, 0, 0],
                        [5.5, 1, 0]]], dtype="f4")

    thresholds = [4, 2, 1]

    feature = ResampleFeature(nb_points=20)
    metric = AveragePointwiseEuclideanMetric(feature)
    qbx = QuickBundlesX(thresholds, metric=metric)
    tree = qbx.cluster(points)
    clusters_0 = tree.get_clusters(0)
    clusters_1 = tree.get_clusters(1)
    clusters_2 = tree.get_clusters(2)

    assert_equal(len(clusters_0.centroids), len(clusters_1.centroids))
    assert_equal(len(clusters_2.centroids) > len(clusters_1.centroids), True)

    assert_array_equal(clusters_2[1].indices, np.array([3, 4], dtype=np.int32))
示例#5
0
def test_with_simulated_bundles2():

    # Generate synthetic streamlines
    bundles = bearing_bundles(4, 2)
    bundles.append(straight_bundle(1))
    streamlines = list(itertools.chain(*bundles))

    thresholds = [10, 2, 1]
    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(streamlines)
    # By default `refdata` refers to data being clustered.
    assert_equal(tree.refdata, streamlines)
示例#6
0
def test_with_simulated_bundles2():

    # Generate synthetic streamlines
    bundles = bearing_bundles(4, 2)
    bundles.append(straight_bundle(1))
    streamlines = list(itertools.chain(*bundles))

    thresholds = [10, 2, 1]
    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(streamlines)
    # By default `refdata` refers to data being clustered.
    assert_equal(tree.refdata, streamlines)
示例#7
0
def test_3D_points():

    points = np.array(
        [[[1, 0, 0]], [[3, 0, 0]], [[2, 0, 0]], [[5, 0, 0]], [[5.5, 0, 0]]],
        dtype="f4")

    thresholds = [4, 2, 1]
    metric = AveragePointwiseEuclideanMetric()
    qbx = QuickBundlesX(thresholds, metric=metric)
    tree = qbx.cluster(points)
    clusters_2 = tree.get_clusters(2)
    assert_array_equal(clusters_2.clusters_sizes(), [3, 2])
    clusters_0 = tree.get_clusters(0)
    assert_array_equal(clusters_0.clusters_sizes(), [5])
示例#8
0
def test_with_simulated_bundles():

    streamlines = simulated_bundle(3, False, 2)
    thresholds = [10, 3, 1]
    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(streamlines)
    for level in range(len(thresholds) + 1):
        clusters = tree.get_clusters(level)

    assert_equal(tree.leaves[0].indices[0], 0)
    assert_equal(tree.leaves[2][0], 2)
    clusters.refdata = streamlines

    assert_array_equal(clusters[0][0],
                       np.array([[0., -10., -5.], [0., 10., -5.]]))
示例#9
0
def test_3D_points():

    points = np.array([[[1, 0, 0]],
                       [[3, 0, 0]],
                       [[2, 0, 0]],
                       [[5, 0, 0]],
                       [[5.5, 0, 0]]], dtype="f4")

    thresholds = [4, 2, 1]
    metric = AveragePointwiseEuclideanMetric()
    qbx = QuickBundlesX(thresholds, metric=metric)
    tree = qbx.cluster(points)
    clusters_2 = tree.get_clusters(2)
    assert_array_equal(clusters_2.clusters_sizes(), [3, 2])
    clusters_0 = tree.get_clusters(0)
    assert_array_equal(clusters_0.clusters_sizes(), [5])
示例#10
0
def test_with_simulated_bundles():

    streamlines = simulated_bundle(3, False, 2)
    thresholds = [10, 3, 1]
    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(streamlines)
    for level in range(len(thresholds) + 1):
        clusters = tree.get_clusters(level)

    assert_equal(tree.leaves[0].indices[0], 0)
    assert_equal(tree.leaves[2][0], 2)
    clusters.refdata = streamlines

    assert_array_equal(clusters[0][0],
                       np.array([[0., -10., -5.],
                                 [0., 10., -5.]]))
示例#11
0
def gen_qbx_tree(show=False):
    from dipy.viz import window
    from dipy.viz.clustering import show_clusters_graph
    streamlines = get_data()

    thresholds = [40, 30, 25]#, 20, 15]
    #thresholds = [30, 25, 15]
    qbx_class = QuickBundlesX(thresholds)
    print "Clustering {} streamlines ({})...".format(len(streamlines), thresholds)
    qbx = qbx_class.cluster(streamlines)

    print "Displaying clusters graph..."
    tree = qbx.get_tree_cluster_map()
    tree.refdata = streamlines
    color_tree2(tree, min_level=0)
    #color_tree(tree, min_level=0)
    ren = show_clusters_graph(tree, show=show)
    ren.reset_camera_tight()
示例#12
0
def test_qbx_and_merge():

    # Generate synthetic streamlines
    bundles = bearing_bundles(4, 2)
    bundles.append(straight_bundle(1))

    streamlines = Streamlines(list(itertools.chain(*bundles)))

    thresholds = [10, 2, 1]

    rng = np.random.RandomState(seed=42)
    qbxm_centroids = qbx_and_merge(streamlines, thresholds, rng=rng).centroids

    qbx = QuickBundlesX(thresholds)
    tree = qbx.cluster(streamlines)
    qbx_centroids = tree.get_clusters(3).centroids

    assert_equal(len(qbx_centroids) > len(qbxm_centroids), True)
示例#13
0
def gen_qbx_tree_progress():
    from dipy.viz import window
    from dipy.viz.clustering import show_clusters_graph_progress
    streamlines = get_data()

    thresholds = [40, 30, 25]#, 20, 15]
    qbx_class = QuickBundlesX(thresholds)
    print "Clustering {} streamlines ({})...".format(len(streamlines), thresholds)
    qbx = qbx_class.cluster(streamlines)

    print "Displaying clusters graph..."
    tree = qbx.get_tree_cluster_map()
    tree.refdata = streamlines
    color_tree(tree, min_level=0)

    #max_indices = [100, 500, 1000, 3000, len(streamlines)]
    max_indices = [100, 250, 500, 750, 1000, 2000, 3000, 5000, len(streamlines)]
    #max_indices = np.arange(10, len(streamlines), 100)
    for i, ren in enumerate(show_clusters_graph_progress(tree, max_indices, show=False)):
        ren.reset_camera_tight()
        window.snapshot(ren, fname="tree_{}_part_{}".format("-".join(map(str, thresholds)), i), size=(1200, 1200))
示例#14
0
def test_circle_parallel_fornix():

    circle = streamlines_in_circle(100, step_size=2)

    parallel = streamlines_parallel(100)

    thresholds = [1, 0.1]

    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(circle)

    clusters = tree.get_clusters(0)
    assert_equal(len(clusters), 1)

    clusters = tree.get_clusters(1)
    assert_equal(len(clusters), 3)

    clusters = tree.get_clusters(2)
    assert_equal(len(clusters), 34)

    thresholds = [.5]

    qbx_class = QuickBundlesX(thresholds)
    tree = qbx_class.cluster(parallel)

    clusters = tree.get_clusters(0)
    assert_equal(len(clusters), 1)

    clusters = tree.get_clusters(1)
    assert_equal(len(clusters), 100)
def score_auto_extract_auto_IBs(streamlines, bundles_masks, ref_bundles, ROIs, wm,
                                save_segmented=False, save_IBs=False,
                                save_VBs=False, save_VCWPs=False,
                                out_segmented_strl_dir='',
                                base_out_segmented_strl='',
                                ref_anat_fname=''):
    """
    TODO document


    Parameters
    ------------
    streamlines : sequence
        sequence of T streamlines. One streamline is an ndarray of shape (N, 3),
        where N is the number of points in that streamline, and
        ``streamlines[t][n]`` is the n-th point in the t-th streamline. Points
        are of form x, y, z in *voxel* coordinates.
    bundles_masks : sequence
        list of nibabel objects corresponding to mask of bundles
    ROIs : sequence
        list of nibabel objects corresponding to mask of ROIs
    wm : nibabel object
        mask of the white matter
    save_segmented : bool
        if true, returns indices of streamlines composing VC, IC, VCWP and NC

    Returns
    ---------
    scores : dict
        dictionnary containing a score for each metric
    indices : dict
        dictionnary containing the indices of streamlines composing VC, IC,
        VCWP and NC

    """

    # Load all streamlines, since streamlines is a generator.
    # full_strl = [s for s in streamlines]

    VC_indices, found_vbs_info = _auto_extract_VCs(streamlines, ref_bundles)
    VC = len(VC_indices)
    logging.debug('Found {} candidate VC'.format(VC))

    if save_VBs:
        _save_extracted_VBs(found_vbs_info, streamlines, out_segmented_strl_dir,
                            base_out_segmented_strl, ref_anat_fname)

    # TODO might be readded
    # To keep track of streamlines that have been classified
    # classified_streamlines_indices = VC_indices

    # New algorithm
    # Step 1: remove streamlines shorter than threshold (currently 35)
    # Step 2: apply Quickbundle with a distance threshold of 20
    # Step 3: remove singletons
    # Step 4: assign to closest ROIs pair
    logging.debug("Starting IC, IB scoring")

    total_strl_count = len(streamlines)
    candidate_ic_strl_indices = sorted(set(range(total_strl_count)) - VC_indices)

    length_thres = 35.

    candidate_ic_streamlines = []
    rejected_streamlines = []

    for idx in candidate_ic_strl_indices:
        if slength(streamlines[idx]) >= length_thres:
            candidate_ic_streamlines.append(streamlines[idx].astype('f4'))
        else:
            rejected_streamlines.append(streamlines[idx].astype('f4'))

    logging.debug('Found {} candidate IC'.format(len(candidate_ic_streamlines)))
    logging.debug('Found {} streamlines that were too short'.format(len(rejected_streamlines)))

    ic_counts = 0
    ib_pairs = {}

    if len(candidate_ic_streamlines):

        # Fix seed to always generate the same output
        # Shuffle to try to reduce the ordering dependency for QB
        random.seed(0.2)
        random.shuffle(candidate_ic_streamlines)


        # TODO threshold on distance as arg
        qb = QuickBundlesX([30, 25, 20, 15])
        clusters_obj = qb.cluster(candidate_ic_streamlines)
        clusters = clusters_obj.get_clusters(-1)  # Retrieves clusters obtained with the smallest threshold.
        # clusters = qb.cluster(candidate_ic_streamlines)

        logging.debug("Found {} potential IB clusters".format(len(clusters)))

        # TODO this should be better handled
        rois_info = []
        for roi in ROIs:
            rois_info.append((get_root_image_name(os.path.basename(roi.get_filename())),
                              np.array(np.where(roi.get_data())).T))

        centroids = nib.streamlines.Tractogram(clusters.centroids)
        centroids.apply_affine(np.linalg.inv(ROIs[0].affine))
        all_centroids_closest_pairs = get_closest_roi_pairs_for_all_streamlines(centroids.streamlines, rois_info)

        for c_idx, c in enumerate(clusters):
            closest_for_cluster = all_centroids_closest_pairs[c_idx]

            if closest_for_cluster not in ib_pairs:
                ib_pairs[closest_for_cluster] = []

            ic_counts += len(c)
            ib_pairs[closest_for_cluster].extend(c.indices)

        # all_ics_closest_pairs = get_closest_roi_pairs_for_all_streamlines(candidate_ic_streamlines, rois_info)

        # for c_idx, c in enumerate(clusters):
        #     closest_for_cluster = [all_ics_closest_pairs[i] for i in clusters[c]['indices']]

        #     if len(clusters[c]['indices']) > 1:
        #         ic_counts += len(clusters[c]['indices'])
        #         occurences = Counter(closest_for_cluster)

        #         # TODO handle either an equality or maybe a range
        #         most_frequent = occurences.most_common(1)[0][0]

        #         val = ib_pairs.get(most_frequent)
        #         if val is None:
        #             # Check if flipped pair exists
        #             val1 = ib_pairs.get((most_frequent[1], most_frequent[0]))
        #             if val1 is not None:
        #                 val1.append(c_idx)
        #             else:
        #                 ib_pairs[most_frequent] = [c_idx]
        #         else:
        #             val.append(c_idx)
        #     else:
        #         rejected_streamlines.append(candidate_ic_streamlines[clusters[c]['indices'][0]])

        if save_segmented and save_IBs:
            for k, v in ib_pairs.iteritems():
                out_strl = []
                # for c_idx in v:
                #     out_strl.extend([s for s in np.array(candidate_ic_streamlines)[clusters[c_idx]['indices']]])
                out_strl = np.array(candidate_ic_streamlines)[v]
                out_fname = os.path.join(out_segmented_strl_dir,
                                         base_out_segmented_strl + \
                                         '_IB_{0}_{1}.tck'.format(k[0], k[1]))

                nib.streamlines.save(nib.streamlines.Tractogram(out_strl, affine_to_rasmm=np.eye(4)), out_fname)
                # ib_f = TCK.create(out_fname)
                # save_tracts_tck_from_dipy_voxel_space(ib_f, ref_anat_fname,
                                                      # out_strl)

    if len(rejected_streamlines) > 0 and save_segmented:
        out_nc_fname = os.path.join(out_segmented_strl_dir,
                                    '{}_NC.tck'.format(base_out_segmented_strl))
        nib.streamlines.save(nib.streamlines.Tractogram(rejected_streamlines, affine_to_rasmm=np.eye(4)), out_nc_fname)
        # out_file = TCK.create(out_nc_fname)
        # save_tracts_tck_from_dipy_voxel_space(out_file, ref_anat_fname,
                                              # rejected_streamlines)

    # TODO readd classifed_steamlines_indices to validate
    if ic_counts != len(candidate_ic_strl_indices) - len(rejected_streamlines):
        raise ValueError("Some streamlines were not correctly assigned to NC")

    VC /= total_strl_count
    IC = (len(candidate_ic_strl_indices) - len(rejected_streamlines)) / total_strl_count
    NC = len(rejected_streamlines) / total_strl_count
    VCWP = 0

    # TODO could have sanity check on global extracted streamlines vs all
    # possible indices

    nb_VB_found = [v['nb_streamlines'] > 0 for k, v in found_vbs_info.iteritems()].count(True)
    streamlines_per_bundle = {k: v['nb_streamlines'] for k, v in found_vbs_info.iteritems() if v['nb_streamlines'] > 0}

    scores = {}
    scores['version'] = 2
    scores['algo_version'] = 5
    scores['VC'] = VC
    scores['IC'] = IC
    scores['VCWP'] = VCWP
    scores['NC'] = NC
    scores['VB'] = nb_VB_found
    scores['IB'] = len(ib_pairs.keys())
    scores['streamlines_per_bundle'] = streamlines_per_bundle
    scores['total_streamlines_count'] = total_strl_count

    return scores
示例#16
0
def score_auto_extract_auto_IBs(streamlines,
                                bundles_masks,
                                ref_bundles,
                                ROIs,
                                wm,
                                save_segmented=False,
                                save_IBs=False,
                                save_VBs=False,
                                save_VCWPs=False,
                                out_segmented_strl_dir='',
                                base_out_segmented_strl='',
                                ref_anat_fname=''):
    """
    TODO document


    Parameters
    ------------
    streamlines : sequence
        sequence of T streamlines. One streamline is an ndarray of shape (N, 3),
        where N is the number of points in that streamline, and
        ``streamlines[t][n]`` is the n-th point in the t-th streamline. Points
        are of form x, y, z in *voxel* coordinates.
    bundles_masks : sequence
        list of nibabel objects corresponding to mask of bundles
    ROIs : sequence
        list of nibabel objects corresponding to mask of ROIs
    wm : nibabel object
        mask of the white matter
    save_segmented : bool
        if true, returns indices of streamlines composing VC, IC, VCWP and NC

    Returns
    ---------
    scores : dict
        dictionnary containing a score for each metric
    indices : dict
        dictionnary containing the indices of streamlines composing VC, IC,
        VCWP and NC

    """

    # Load all streamlines, since streamlines is a generator.
    # full_strl = [s for s in streamlines]

    VC_indices, found_vbs_info = _auto_extract_VCs(streamlines, ref_bundles)
    VC = len(VC_indices)
    logging.debug('Found {} candidate VC'.format(VC))

    if save_VBs:
        _save_extracted_VBs(found_vbs_info, streamlines,
                            out_segmented_strl_dir, base_out_segmented_strl,
                            ref_anat_fname)

    # TODO might be readded
    # To keep track of streamlines that have been classified
    # classified_streamlines_indices = VC_indices

    # New algorithm
    # Step 1: remove streamlines shorter than threshold (currently 35)
    # Step 2: apply Quickbundle with a distance threshold of 20
    # Step 3: remove singletons
    # Step 4: assign to closest ROIs pair
    logging.debug("Starting IC, IB scoring")

    total_strl_count = len(streamlines)
    candidate_ic_strl_indices = sorted(
        set(range(total_strl_count)) - VC_indices)

    length_thres = 35.

    candidate_ic_streamlines = []
    rejected_streamlines = []

    for idx in candidate_ic_strl_indices:
        if slength(streamlines[idx]) >= length_thres:
            candidate_ic_streamlines.append(streamlines[idx].astype('f4'))
        else:
            rejected_streamlines.append(streamlines[idx].astype('f4'))

    logging.debug('Found {} candidate IC'.format(
        len(candidate_ic_streamlines)))
    logging.debug('Found {} streamlines that were too short'.format(
        len(rejected_streamlines)))

    ic_counts = 0
    ib_pairs = {}

    if len(candidate_ic_streamlines):

        # Fix seed to always generate the same output
        # Shuffle to try to reduce the ordering dependency for QB
        random.seed(0.2)
        random.shuffle(candidate_ic_streamlines)

        # TODO threshold on distance as arg
        qb = QuickBundlesX([30, 25, 20, 15])
        clusters_obj = qb.cluster(candidate_ic_streamlines)
        clusters = clusters_obj.get_clusters(
            -1)  # Retrieves clusters obtained with the smallest threshold.
        # clusters = qb.cluster(candidate_ic_streamlines)

        logging.debug("Found {} potential IB clusters".format(len(clusters)))

        # TODO this should be better handled
        rois_info = []
        for roi in ROIs:
            rois_info.append(
                (get_root_image_name(os.path.basename(roi.get_filename())),
                 np.array(np.where(roi.get_data())).T))

        centroids = nib.streamlines.Tractogram(clusters.centroids)
        centroids.apply_affine(np.linalg.inv(ROIs[0].affine))
        all_centroids_closest_pairs = get_closest_roi_pairs_for_all_streamlines(
            centroids.streamlines, rois_info)

        for c_idx, c in enumerate(clusters):
            closest_for_cluster = all_centroids_closest_pairs[c_idx]

            if closest_for_cluster not in ib_pairs:
                ib_pairs[closest_for_cluster] = []

            ic_counts += len(c)
            ib_pairs[closest_for_cluster].extend(c.indices)

        # all_ics_closest_pairs = get_closest_roi_pairs_for_all_streamlines(candidate_ic_streamlines, rois_info)

        # for c_idx, c in enumerate(clusters):
        #     closest_for_cluster = [all_ics_closest_pairs[i] for i in clusters[c]['indices']]

        #     if len(clusters[c]['indices']) > 1:
        #         ic_counts += len(clusters[c]['indices'])
        #         occurences = Counter(closest_for_cluster)

        #         # TODO handle either an equality or maybe a range
        #         most_frequent = occurences.most_common(1)[0][0]

        #         val = ib_pairs.get(most_frequent)
        #         if val is None:
        #             # Check if flipped pair exists
        #             val1 = ib_pairs.get((most_frequent[1], most_frequent[0]))
        #             if val1 is not None:
        #                 val1.append(c_idx)
        #             else:
        #                 ib_pairs[most_frequent] = [c_idx]
        #         else:
        #             val.append(c_idx)
        #     else:
        #         rejected_streamlines.append(candidate_ic_streamlines[clusters[c]['indices'][0]])

        if save_segmented and save_IBs:
            for k, v in ib_pairs.iteritems():
                out_strl = []
                # for c_idx in v:
                #     out_strl.extend([s for s in np.array(candidate_ic_streamlines)[clusters[c_idx]['indices']]])
                out_strl = np.array(candidate_ic_streamlines)[v]
                out_fname = os.path.join(out_segmented_strl_dir,
                                         base_out_segmented_strl + \
                                         '_IB_{0}_{1}.tck'.format(k[0], k[1]))

                nib.streamlines.save(
                    nib.streamlines.Tractogram(out_strl,
                                               affine_to_rasmm=np.eye(4)),
                    out_fname)
                # ib_f = TCK.create(out_fname)
                # save_tracts_tck_from_dipy_voxel_space(ib_f, ref_anat_fname,
                # out_strl)

    if len(rejected_streamlines) > 0 and save_segmented:
        out_nc_fname = os.path.join(
            out_segmented_strl_dir,
            '{}_NC.tck'.format(base_out_segmented_strl))
        nib.streamlines.save(
            nib.streamlines.Tractogram(rejected_streamlines,
                                       affine_to_rasmm=np.eye(4)),
            out_nc_fname)
        # out_file = TCK.create(out_nc_fname)
        # save_tracts_tck_from_dipy_voxel_space(out_file, ref_anat_fname,
        # rejected_streamlines)

    # TODO readd classifed_steamlines_indices to validate
    if ic_counts != len(candidate_ic_strl_indices) - len(rejected_streamlines):
        raise ValueError("Some streamlines were not correctly assigned to NC")

    VC /= total_strl_count
    IC = (len(candidate_ic_strl_indices) -
          len(rejected_streamlines)) / total_strl_count
    NC = len(rejected_streamlines) / total_strl_count
    VCWP = 0

    # TODO could have sanity check on global extracted streamlines vs all
    # possible indices

    nb_VB_found = [
        v['nb_streamlines'] > 0 for k, v in found_vbs_info.iteritems()
    ].count(True)
    streamlines_per_bundle = {
        k: v['nb_streamlines']
        for k, v in found_vbs_info.iteritems() if v['nb_streamlines'] > 0
    }

    scores = {}
    scores['version'] = 2
    scores['algo_version'] = 5
    scores['VC'] = VC
    scores['IC'] = IC
    scores['VCWP'] = VCWP
    scores['NC'] = NC
    scores['VB'] = nb_VB_found
    scores['IB'] = len(ib_pairs.keys())
    scores['streamlines_per_bundle'] = streamlines_per_bundle
    scores['total_streamlines_count'] = total_strl_count

    return scores