Exemple #1
0
def QB_reps(limits=[0, np.Inf], reps=1):

    ids = ["02", "03", "04", "05", "06", "08", "09", "10", "11", "12"]

    sizes = []

    for id in range(len(test_tractography_sizes)):
        filename = "subj_" + ids[id] + "_QB_reps.pkl"
        f = open(filename, "w")
        ur_tracks = get_tracks(id, limits)
        res = {}
        # res['filtered'] = len(ur_tracks)
        res["qb_threshold"] = qb_threshold
        res["limits"] = limits
        # res['ur_tracks'] = ur_tracks
        print "Dataset", id, res["filtered"], "filtered tracks"
        res["shuffle"] = {}
        res["clusters"] = {}
        res["nclusters"] = {}
        res["centroids"] = {}
        res["cluster_sizes"] = {}
        for i in range(reps):
            print "Subject", ids[id], "shuffle", i
            shuffle = np.random.permutation(np.arange(len(ur_tracks)))
            res["shuffle"][i] = shuffle
            tracks = [ur_tracks[i] for i in shuffle]
            qb = QuickBundles(tracks, qb_threshold, downsampling)
            res["clusters"][i] = {}
            for k in qb.clusters().keys():
                # this would be improved if
                # we 'enumerated' QB's keys and used the enumerator as
                # as the key in the result
                res["clusters"][i][k] = qb.clusters()[k]["indices"]
            res["centroids"][i] = qb.centroids
            res["nclusters"][i] = qb.total_clusters
            res["cluster_sizes"][i] = qb.clusters_sizes()
            print "QB for has", qb.total_clusters, "clusters"
            sizes.append(qb.total_clusters)
        pickle.dump(res, f)
        f.close()
        print "Results written to", filename
Exemple #2
0
def QB_reps_singly(limits=[0, np.Inf], reps=1):

    ids = ["02", "03", "04", "05", "06", "08", "09", "10", "11", "12"]
    replabs = [str(i) for i in range(reps)]

    for id in range(len(test_tractography_sizes)):
        ur_tracks = get_tracks(id, limits)
        for i in range(reps):
            res = {}
            # res['filtered'] = len(ur_tracks)
            res["qb_threshold"] = qb_threshold
            res["limits"] = limits
            res["shuffle"] = {}
            res["clusters"] = {}
            res["nclusters"] = {}
            res["centroids"] = {}
            res["cluster_sizes"] = {}
            print "Subject", ids[id], "shuffle", i
            shuffle = np.random.permutation(np.arange(len(ur_tracks)))
            res["shuffle"] = shuffle
            tracks = [ur_tracks[j] for j in shuffle]
            print "... starting QB"
            qb = QuickBundles(tracks, qb_threshold, downsampling)
            print "... finished QB"
            res["clusters"] = {}
            for k in qb.clusters().keys():
                # this would be improved if
                # we 'enumerated' QB's keys and used the enumerator as
                # as the key in the result
                res["clusters"][k] = qb.clusters()[k]["indices"]
            res["centroids"] = qb.centroids
            res["nclusters"] = qb.total_clusters
            res["cluster_sizes"] = qb.clusters_sizes()
            print "QB for has", qb.total_clusters, "clusters"
            filename = "subj_" + ids[id] + "_QB_rep_" + replabs[i] + ".pkl"
            f = open(filename, "w")
            pickle.dump(res, f)
            f.close()
        print "Results written to", filename
    return sizes
eu = EuDX(a=fa, ind=ind, seeds=100000, odf_vertices=sphere.vertices,
          a_low=0.2)  # FA uses a_low = 0.2
streamlines = [line for line in eu]
print('Number of streamlines %i' % len(streamlines))
'''
for line in streamlines:
  print(line.shape)
'''
# Do steamline clustering using QuickBundles (QB) using Eular's Method
# dist_thr (distance threshold) which affects number of clusters and their size
# pts (number of points in each streamline) which will be used for downsampling before clustering
# Default values : dist_thr = 4 & pts = 12
qb = QuickBundles(streamlines, dist_thr=20, pts=20)
clusters = qb.clusters()
print('Number of clusters %i' % qb.total_clusters)
print('Cluster size', qb.clusters_sizes())

# Display streamlines
ren = window.Renderer()
ren.add(actor.streamtube(streamlines, window.colors.white))
window.show(ren)
window.record(ren, out_path=filename + '_stream_lines_eu.png', size=(600, 600))

# Display centroids
window.clear(ren)
colormap = actor.create_colormap(np.arange(qb.total_clusters))
ren.add(actor.streamtube(streamlines, window.colors.white, opacity=0.1))
ren.add(actor.streamtube(qb.centroids, colormap, linewidth=0.5))
window.show(ren)
window.record(ren, out_path=filename + '_centroids_eu.png', size=(600, 600))
Exemple #4
0
           'response_dhollander/101107/Structure/T1w_acpc_dc_restore_brain1.25.nii.gz'

img = nib.load(img_path)
fa = Fasciculus(fib)
streamlines = fa.get_data()
length_t = fa.get_lengths()
ind = length_t > 10
streamlines = streamlines[ind]
fa.set_data(streamlines)
fibcluster = FibClustering(fa)
print len(streamlines)

# 1
qb = QuickBundles(streamlines, 2)
clusters = qb.clusters()
print qb.clusters_sizes()
indexs = []
for i in range(len(clusters)):
    if clusters[i]['N'] >= 400:
        indexs += clusters[i]['indices']

# 2
streamlines = streamlines[indexs]
qb = QuickBundles(streamlines, 2)
clusters = qb.clusters()

centroids = qb.centroids
centroids_lengths = np.array(list(length(centroids)))
print centroids_lengths

indexs_c = []