Пример #1
0
    def update_info(self, filepath):
        """
        Compute missing or inconsistent information in the cache.
        """
        save = False

        try:
            self.buffers
        except AttributeError:
            print "Computing buffers."
            self.buffers = compute_buffers(self.T, alpha=1.0, save=False)
            save = True
            
        try:
            self.num_prototypes
        except AttributeError:
            print "Defining number of prototypes"
            self.num_prototypes = 40
            save = True
            
        try:
            self.full_dissimilarity_matrix
        except AttributeError:
            print "Computing dissimilarity matrix"
            self.full_dissimilarity_matrix = compute_dissimilarity(self.T, distance=bundles_distances_mam, prototype_policy='sff', num_prototypes=self.num_prototypes)
            save = True    
        
        try:
            assert(self.full_dissimilarity_matrix.shape[0] == len(self.T))
        except AssertionError:
            print "Re-computing dissimilarity matrix."
            self.num_prototypes = 40
            self.full_dissimilarity_matrix = compute_dissimilarity(self.T, distance=bundles_distances_mam, prototype_policy='sff', num_prototypes=self.num_prototypes)
            save = True

        try:
            self.clusters
        except AttributeError:
            print "Computing MBKM"
            size_T = len(self.T)
            if  size_T > 150:
                n_clusters = 150
            else:
                n_clusters = size_T

            streamlines_ids = np.arange(size_T, dtype=np.int)
            self.clusters = mbkm_wrapper(self.full_dissimilarity_matrix, n_clusters, streamlines_ids)
            save = True
            
        try:
            self.kdt
        except AttributeError:
            print "Computing KDTree"
            self.compute_kdtree()
            save=True

        if save: self.save_info(filepath)
Пример #2
0
    def update_info(self, filepath):
        """
        Compute the missing information not available in the cache.
        """
        try:
            self.buffers
        except AttributeError:
            print "Computing buffers."
            self.buffers = compute_buffers(self.T, alpha=1.0, save=False)
            
        try:
            self.num_prototypes
        except AttributeError:
            print "Computing dissimilarity matrix"
            self.num_prototypes = 40
            self.full_dissimilarity_matrix = compute_dissimilarity(self.T, distance=bundles_distances_mam, prototype_policy='sff', num_prototypes=self.num_prototypes)
 
        try:
            self.clusters
        except AttributeError:
            print "Computing MBKM"
            size_T = len(self.T)
            if  size_T > 150:
                n_clusters = 150
            else:
                n_clusters = size_T

            streamlines_ids = np.arange(size_T, dtype=np.int)
            self.clusters = mbkm_wrapper(self.full_dissimilarity_matrix, n_clusters, streamlines_ids)
                    
        self.save_info(filepath)
Пример #3
0
def tractome_preprocessing(src_trk_dir, subj_name):

    seeds = par_eudx_seeds
    par2fun = {par_prototype_distance: bundles_distances_mam}
    prototype_distance = par2fun[par_prototype_distance]
    trk_basename = "%s_%d%s%s" % (
        subj_name, seeds / 10**6 if seeds > 10**5 else seeds / 10**3,
        'K' if seeds < 1000000 else 'M', par_trk_suffix)
    spa_basename = os.path.splitext(trk_basename)[0] + '.spa'
    src_trk_file = os.path.join(src_trk_dir, trk_basename)
    out_spa_dir = os.path.join(src_trk_dir, '.temp')
    if not os.path.exists(out_spa_dir):
        os.makedirs(out_spa_dir)
    out_spa_file = os.path.join(out_spa_dir, spa_basename)

    streams, hdr = nib.trackvis.read(src_trk_file, points_space='voxel')
    streamlines = np.array([s[0] for s in streams], dtype=np.object)
    dissimilarity_matrix = compute_dissimilarity(streamlines,
                                                 prototype_distance,
                                                 par_prototype_policy,
                                                 par_prototype_num)

    info = {'dismatrix': dissimilarity_matrix, 'nprot': par_prototype_num}
    pickle.dump(info,
                open(out_spa_file, 'w+'),
                protocol=pickle.HIGHEST_PROTOCOL)
Пример #4
0
def Inertia(tract):
    
    from dissimilarity_common import compute_dissimilarity
    from dipy.tracking.distances import bundles_distances_mam
    from sklearn import metrics
    from sklearn.cluster import MiniBatchKMeans#, KMeans
    #from sklearn.metrics.pairwise import euclidean_distances
    diss = compute_dissimilarity(tract, distance=bundles_distances_mam, prototype_policy='sff', num_prototypes=20)
    
    mbk = MiniBatchKMeans(init='k-means++', n_clusters=1, batch_size=100,
                              n_init=10, max_no_improvement=10, verbose=0)        
    mbk.fit(diss)
        
    labels = mbk.labels_        
    print labels    
    #labels = np.zeros(len(tract))
    #sil = metrics.silhouette_score(diss, labels, metric='euclidean')
    return mbk.inertia_#, sil 
Пример #5
0
def tractome_preprocessing(src_trk_dir, subj_name):

    seeds = par_eudx_seeds
    par2fun={par_prototype_distance:bundles_distances_mam}
    prototype_distance=par2fun[par_prototype_distance]
    trk_basename = "%s_%d%s%s" % (subj_name,
                                  seeds/10**6 if seeds>10**5 else seeds/10**3, 
                                  'K' if seeds < 1000000 else 'M',
                                  par_trk_suffix)
    spa_basename = os.path.splitext(trk_basename)[0] + '.spa'
    src_trk_file = os.path.join(src_trk_dir, trk_basename)
    out_spa_dir = os.path.join(src_trk_dir, '.temp')
    if not os.path.exists(out_spa_dir):
        os.makedirs(out_spa_dir)
    out_spa_file = os.path.join(out_spa_dir, spa_basename)

    streams, hdr = nib.trackvis.read(src_trk_file, points_space='voxel')
    streamlines =  np.array([s[0] for s in streams], dtype=np.object)
    dissimilarity_matrix = compute_dissimilarity(streamlines, 
            prototype_distance, par_prototype_policy, par_prototype_num)

    info = {'dismatrix':dissimilarity_matrix,'nprot':par_prototype_num}
    pickle.dump(info, open(out_spa_file,'w+'), protocol=pickle.HIGHEST_PROTOCOL)
Пример #6
0
    def update_info(self, filepath):
        """
        Compute the missing information not available in the cache.
        """
        try:
            self.buffers
        except AttributeError:
            print "Computing buffers."
            self.buffers = compute_buffers(self.T, alpha=1.0, save=False)

        try:
            self.num_prototypes
        except AttributeError:
            print "Computing dissimilarity matrix"
            self.num_prototypes = 40
            self.full_dissimilarity_matrix = compute_dissimilarity(
                self.T,
                distance=bundles_distances_mam,
                prototype_policy='sff',
                num_prototypes=self.num_prototypes)

        try:
            self.clusters
        except AttributeError:
            print "Computing MBKM"
            size_T = len(self.T)
            if size_T > 150:
                n_clusters = 150
            else:
                n_clusters = size_T

            streamlines_ids = np.arange(size_T, dtype=np.int)
            self.clusters = mbkm_wrapper(self.full_dissimilarity_matrix,
                                         n_clusters, streamlines_ids)

        self.save_info(filepath)
Пример #7
0
    def update_info(self, filepath):
        """
        Compute missing or inconsistent information in the cache.
        """
        save = False

        try:
            self.buffers
        except AttributeError:
            print "Computing buffers."
            self.buffers = compute_buffers(self.T, alpha=1.0, save=False)
            save = True

        try:
            self.num_prototypes
        except AttributeError:
            print "Defining number of prototypes"
            self.num_prototypes = 40
            save = True

        try:
            self.full_dissimilarity_matrix
        except AttributeError:
            print "Computing dissimilarity matrix"
            self.full_dissimilarity_matrix = compute_dissimilarity(
                self.T,
                distance=bundles_distances_mam,
                prototype_policy='sff',
                num_prototypes=self.num_prototypes)
            save = True

        try:
            assert (self.full_dissimilarity_matrix.shape[0] == len(self.T))
        except AssertionError:
            print "Re-computing dissimilarity matrix."
            self.num_prototypes = 40
            self.full_dissimilarity_matrix = compute_dissimilarity(
                self.T,
                distance=bundles_distances_mam,
                prototype_policy='sff',
                num_prototypes=self.num_prototypes)
            save = True

        try:
            self.clusters
        except AttributeError:
            print "Computing MBKM"
            size_T = len(self.T)
            if size_T > 150:
                n_clusters = 150
            else:
                n_clusters = size_T

            streamlines_ids = np.arange(size_T, dtype=np.int)
            self.clusters = mbkm_wrapper(self.full_dissimilarity_matrix,
                                         n_clusters, streamlines_ids)
            save = True

        try:
            self.kdt
        except AttributeError:
            print "Computing KDTree"
            self.compute_kdtree()
            save = True

        if save: self.save_info(filepath)
Пример #8
0
    def loading_full_tractograpy(self, tracpath=None):
        """
        Loading full tractography and creates StreamlineLabeler to
        show it all.
        """
        # load the tracks registered in MNI space
        self.tracpath=tracpath
        basename = os.path.basename(self.tracpath)
        tracks_basename, tracks_format = os.path.splitext(basename)
        
        if tracks_format == '.dpy': 
            
            dpr = Dpy(self.tracpath, 'r')
            print "Loading", self.tracpath
            self.T = dpr.read_tracks()
            dpr.close()
            self.T = np.array(self.T, dtype=np.object)

            
        elif tracks_format == '.trk': 
            streams, self.hdr = nib.trackvis.read(self.tracpath, points_space='voxel')
            print "Loading", self.tracpath
            self.T = np.array([s[0] for s in streams], dtype=np.object)
         

        print "Removing short streamlines"
        self.T = np.array([t for t in self.T if length(t)>= 15],  dtype=np.object)
        
        tracks_directoryname = os.path.dirname(self.tracpath) + '/.temp/'
        general_info_filename = tracks_directoryname + tracks_basename + '.spa'
        
        
        
        
        # Check if there is the .spa file that contains all the
        # computed information from the tractography anyway and try to
        # load it
        try:
            print "Looking for general information file"
            self.load_info(general_info_filename)
                    
        except IOError:
            print "General information not found, recomputing buffers"

            print "Computing buffers."
            self.buffers = compute_buffers(self.T, alpha=1.0, save=False)
            
            print "Computing dissimilarity matrix"
            self.num_prototypes = 40
            self.full_dissimilarity_matrix = compute_dissimilarity(self.T, distance=bundles_distances_mam, prototype_policy='sff', num_prototypes=self.num_prototypes)
            
            # compute initial MBKM with given n_clusters
            print "Computing MBKM"

            size_T = len(self.T)
            if  size_T > 150:
                n_clusters = 150
            else:
                n_clusters = size_T
                
            streamlines_ids = np.arange(size_T, dtype=np.int)
            self.clusters = mbkm_wrapper(self.full_dissimilarity_matrix, n_clusters, streamlines_ids)
            
        
            print "Saving computed information from tractography"
            
            if not os.path.exists(tracks_directoryname):
                os.makedirs(tracks_directoryname)
            self.save_info(general_info_filename)
            
       
        # create the interaction system for tracks, 
        self.streamlab  = StreamlineLabeler('Bundle Picker',
                                            self.buffers, self.clusters,
                                            vol_shape=self.dims, 
                                            affine=np.copy(self.affine),
                                            clustering_parameter=len(self.clusters),
                                            clustering_parameter_max=len(self.clusters),
                                            full_dissimilarity_matrix=self.full_dissimilarity_matrix)
                
        self.scene.add_actor(self.streamlab)