Beispiel #1
0
def anteriorzplus(xyz):
    axis = np.array([1, 0, 0.0])
    theta = -90.0
    post_mat = from_matvec(rotation_matrix(axis, theta))
    axis = np.array([0, 1, 0])
    theta = 180.0
    post_mat = np.dot(from_matvec(rotation_matrix(axis, theta)), post_mat)

    return np.dot(post_mat[:3, :3], xyz.T).T
Beispiel #2
0
def anteriorzplus(xyz):
    axis = np.array([1, 0, 0.])
    theta = -90. 
    post_mat = from_matvec(rotation_matrix(axis, theta))
    axis = np.array([0, 1, 0])
    theta = 180. 
    post_mat = np.dot(
                from_matvec(rotation_matrix(axis, theta)), 
                post_mat)

    return np.dot(post_mat[:3, :3], xyz.T).T
Beispiel #3
0
 def __init__(self, name, x, y, z, radius, color, colorname,  method, coords, indextracks, affine, vol_shape):
     """
     """
     Actor.__init__(self, name) 
     self.coordinates = [x, y, z]
     self.radius = radius
     self.color=color
     self.colorname =colorname
     self.dims = vol_shape
     
     self.methods = {0 : self.trackvis,
        1 : self.tractome_inside,
        2 : self.tractome_intersect,
     }
     self.activemethod = self.methods[method]
     self.coords = coords
     self. indextracks  = indextracks
     # Generating index
     if affine is None: self.affine = np.eye(4, dtype = np.float32)
     else: self.affine = affine
     if method ==0:
         self.affine[1, 1] = self.affine[1, 1]*(-1)
     if vol_shape is not None:
         I, J, K = vol_shape
         centershift = img_to_ras_coords(np.array([[I/2., J/2., K/2.]]), affine)
         centeraffine = from_matvec(np.eye(3), centershift.squeeze())
         affine[:3,3] = affine[:3, 3] - centeraffine[:3, 3]
     self.glaffine = (GLfloat * 16)(*tuple(self.affine.T.ravel()))
     
     self.activemethod() 
    # vertices are still needed for something
     self.vertices = np.array([self.coordinates])
Beispiel #4
0
    def __init__(self, name, color, colorname, coords, mask_coords, indextracks, affine, vol_shape):
        """
        """
        Actor.__init__(self, name) 
        self.color=color
        self.colorname =colorname

        # color_array = np.array(self.color*len(mask_coords[0]), dtype='f4')

        self.coords = coords
        self. indextracks  = indextracks
        self.mask_coords = mask_coords
        colors = [color,]*len(self.mask_coords[0])
        self.color_points = np.array(colors, dtype=np.float32)
        self.streamlines_mask()
        # Generating index
        
        if affine is None: self.affine = np.eye(4, dtype = np.float32)
        else: self.affine = affine
        if vol_shape is not None:
            I, J, K = vol_shape
            centershift = img_to_ras_coords(np.array([[I/2., J/2., K/2.]]), affine)
            centeraffine = from_matvec(np.eye(3), centershift.squeeze())
            affine[:3,3] = affine[:3, 3] - centeraffine[:3, 3]
        self.glaffine = (GLfloat * 16)(*tuple(self.affine.T.ravel()))
        
        # vertices are still needed for something
        self.vertices = np.array([self.voxels])
    def __init__(self, name,qb, tracks, reps='exemplars',
                 colors=None, vol_shape=None, 
                 virtuals_line_width=5.0, tracks_line_width=2.0, 
                 virtuals_alpha=1.0, tracks_alpha=0.6, 
                 affine=None, verbose=False):
        """TrackLabeler is meant to explore and select subsets of the
        tracks. The exploration occurs through QuickBundles (qb) in
        order to simplify the scene.
        """
        super(StreamlineLabeler, self).__init__(name)

        if affine is None: self.affine = np.eye(4, dtype = np.float32)
        else: self.affine = affine
        if vol_shape is not None:
            I, J, K = vol_shape
            centershift = img_to_ras_coords(np.array([[I/2., J/2., K/2.]]), affine)
            centeraffine = from_matvec(np.eye(3), centershift.squeeze())
            affine[:3,3] = affine[:3, 3] - centeraffine[:3, 3]
        self.glaffine = (GLfloat * 16)(*tuple(affine.T.ravel()))
        self.glaff = affine
        self.mouse_x=None
        self.mouse_y=None
        self.cache = {}
        self.qb = qb
        self.reps = reps
        #virtual tracks
        if self.reps=='virtuals':
            self.virtuals=qb.virtuals()
        if self.reps=='exemplars':
            self.virtuals,self.ex_ids = qb.exemplars()
        self.virtuals_alpha = virtuals_alpha
        self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(self.virtuals, colors, self.virtuals_alpha)
        #full tractography (downsampled at 12 pts per track)
        self.tracks = tracks
        self.tracks_alpha = tracks_alpha
        self.tracks_ids = np.arange(len(self.tracks), dtype=np.int)
        self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(self.tracks, colors, self.tracks_alpha)
        #calculate boundary box for entire tractography
        self.min = np.min(self.tracks_buffer,axis=0)
        self.max = np.max(self.tracks_buffer,axis=0)      
        self.vertices=self.tracks_buffer
        #coord1 = np.array([self.tracks_buffer[:,0].min(),self.tracks_buffer[:,1].min(),self.tracks_buffer[:,2].min()], dtype = 'f4')        
        #coord2 = np.array([self.tracks_buffer[:,0].max(),self.tracks_buffer[:,1].max(),self.tracks_buffer[:,2].max()], dtype = 'f4')
        #self.make_aabb((coord1,coord2),0)
        #show size of tractography buffer
        print('MBytes %f' % (self.tracks_buffer.nbytes/2.**20,))
        self.position = (0,0,0)
        #buffer for selected virtual tracks
        self.selected = []
        self.virtuals_line_width = virtuals_line_width
        self.tracks_line_width = tracks_line_width
        self.old_color = {}
        self.hide_virtuals = False
        self.expand = False
        self.verbose = verbose
        self.tracks_visualized_first = np.array([], dtype='i4')
        self.tracks_visualized_count = np.array([], dtype='i4')
        self.history = [[self.qb, self.tracks, self.tracks_ids, self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count, self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count]]
        #shifting of track is necessary for dipy.tracking.vox2track.track_counts
        #we also upsample using 30 points in order to increase the accuracy of track counts
        self.vol_shape = vol_shape
        if self.vol_shape !=None:
            #self.tracks_shifted =[t+np.array(vol_shape)/2. for t in self.tracks]
            self.virtuals_shifted =[downsample(t+np.array(self.vol_shape)/2.,30) for t in self.virtuals]

        else:
            #self.tracks_shifted=None
            self.virtuals_shifted=None
Beispiel #6
0
    def __init__(self, name, buffers, clusters, representative_buffers=None, colors=None, vol_shape=None, representatives_line_width=5.0, streamlines_line_width=2.0, representatives_alpha=1.0, streamlines_alpha=1.0, affine=None, verbose=False, clustering_parameter=None, clustering_parameter_max=None, full_dissimilarity_matrix=None):
        """StreamlineLabeler is meant to explore and select subsets of
        the streamlines. The exploration occurs through clustering in
        order to simplify the scene.
        """
        # super(StreamlineLabeler, self).__init__(name)
        Actor.__init__(self, name) # direct call of the __init__ seems better in case of multiple inheritance

        if affine is None: self.affine = np.eye(4, dtype = np.float32)
        else: self.affine = affine
        if vol_shape is not None:
            I, J, K = vol_shape
            centershift = img_to_ras_coords(np.array([[I/2., J/2., K/2.]]), affine)
            centeraffine = from_matvec(np.eye(3), centershift.squeeze())
            affine[:3,3] = affine[:3, 3] - centeraffine[:3, 3]
        self.glaffine = (GLfloat * 16)(*tuple(affine.T.ravel()))
        self.glaff = affine
         
        self.mouse_x=None
        self.mouse_y=None

        self.buffers = buffers
        

        self.clusters = clusters
        self.save_init_set = True
         
        # MBKM:
        Manipulator.__init__(self, initial_clusters=clusters, clustering_function=mbkm_wrapper)

        # We keep the representative_ids as list to preserve order,
        # which is necessary for presentation purposes:
        self.representative_ids_ordered = sorted(self.clusters.keys())

        self.representatives_alpha = representatives_alpha

        # representative buffers:
        if representative_buffers is None:
            representative_buffers = compute_buffers_representatives(buffers, self.representative_ids_ordered)

        self.representatives_buffer = representative_buffers['buffer']
        self.representatives_colors = representative_buffers['colors']
        self.representatives_first = representative_buffers['first']
        self.representatives_count = representative_buffers['count']

        self.representatives = buffer2coordinates(self.representatives_buffer,
                                                  self.representatives_first,
                                                  self.representatives_count)

        # full tractography buffers:
        self.streamlines_buffer = buffers['buffer']
        self.streamlines_colors = buffers['colors']
        self.streamlines_first = buffers['first']
        self.streamlines_count = buffers['count']
        
        print('MBytes %f' % (self.streamlines_buffer.nbytes/2.**20,))

        self.hide_representatives = False
        self.expand = False
        self.knnreset = False
        self.representatives_line_width = representatives_line_width
        self.streamlines_line_width = streamlines_line_width
        self.vertices = self.streamlines_buffer # this is apparently requested by Actor
        
        self.color_storage = {}
        # This is the color of a selected representative.
        self.color_selected = np.array([1.0, 1.0, 1.0, 1.0], dtype='f4')

        # This are the visualized streamlines.
        # (Note: maybe a copy is not strictly necessary here)
        self.streamlines_visualized_first = self.streamlines_first.copy()
        self.streamlines_visualized_count = self.streamlines_count.copy()
        
        # Clustering:
        self.clustering_parameter = clustering_parameter
        self.clustering_parameter_max = clustering_parameter_max
        self.full_dissimilarity_matrix = full_dissimilarity_matrix
        self.cantroi = 0