示例#1
0
    def _forward_dataset(self, ds):
        chunks_attr = self.__chunks_attr
        mds = Dataset([])
        mds.a = ds.a
       # mds.sa =ds.sa
       # mds.fa =ds.fa
        if chunks_attr is None:
	       # global kmeans
           mds.samples = self._kmeans(ds.samples).labels_
           print max(mds.samples)
        else:
	       # per chunk kmeans
            for c in ds.sa[chunks_attr].unique:
                slicer = np.where(ds.sa[chunks_attr].value == c)[0]
                mds.samples = ds.samples[0,:]
                mds.samples[slicer] = self._kmeans(ds.samples[slicer]).labels_

        return mds
示例#2
0
    def _forward_dataset(self, ds):
        chunks_attr = self.__chunks_attr
        mds = Dataset([])
        mds.a = ds.a
        # mds.sa =ds.sa
        # mds.fa =ds.fa
        if chunks_attr is None:
            # global kmeans
            mds.samples = self._kmeans(ds.samples).labels_
            print max(mds.samples)
        else:
            # per chunk kmeans
            for c in ds.sa[chunks_attr].unique:
                slicer = np.where(ds.sa[chunks_attr].value == c)[0]
                mds.samples = ds.samples[0, :]
                mds.samples[slicer] = self._kmeans(ds.samples[slicer]).labels_

        return mds
示例#3
0
 def _forward_dataset(self, ds):
     out_ds = Dataset([])
     out_ds.a = ds.a
     pdb.set_trace()
     iv = np.nonzero(ds.samples)[0]
     coords = ds.sa.values()[0][iv]
     out_ds.fa = coords
     dim = ds.a.voxel_dim
     nbdim = self.__neighbor_shape.nbdim
     nbsize = self.__neighbor_shape.nbsize
     shape_type = self.__neighbor_shape.shape_type
     volnb = volneighbors(coords, dim, nbdim, nbsize, shape_type)
     distmsk = volnb.compute_offsets()
             
     if self.__outsparse == True:
         out_ds.samples = distmask
     elif self.__outsparse == False:
         distmask = distmask.todense()
         out_ds.samples = distmask
     else:
         raise RuntimeError('%outsparse should be True or False.')
         
     
     return out_ds 
示例#4
0
    def _call(self, ds):
        '''
        Parameters
        ----------
        ds: Dataset
            input dataset 
        
        Returns
        -------
        wds: Dataset
            Result with one sample (if axis=='feature') or one feature (if 
            axis=='samples') and an equal number of features (or samples,
            respectively) as the input dataset.
        '''

        axis = self.__axis
        fx = self.__fx

        # ensure it's a dataset
        if not isinstance(ds, Dataset):
            ds = Dataset(ds)

        samples = ds.samples

        # apply the function
        winners = fx(ds)

        # set the new shape
        new_shape = list(ds.shape)
        new_shape[axis] = 1

        # the output dataset
        wta = Dataset(np.reshape(winners, new_shape))

        # copy dataset attributes
        wta.a = ds.a.copy()

        # copy feature attribute and set sample attributes, or vice versa
        fas = [ds.fa, wta.fa]
        sas = [ds.sa, wta.sa]
        fas_sas = [fas, sas]
        to_copy, to_leave = [fas_sas[(i + axis) % 2] for i in xrange(2)]

        # copy each attribute
        for k, v in to_copy[0].iteritems():
            to_copy[1][k] = copy.copy(v)

        # set source and target. feature attributes become
        # sample attribtues; or vice versa
        src, _ = to_leave
        trg = to_copy[1]
        prefix = self.__other_axis_prefix
        for k, v in src.iteritems():
            # set the prefix
            prek = ('' if prefix is None else prefix) + k

            if prek in trg:
                raise KeyError("Key clash: %s already in %s" %
                               (prek, to_copy[1]))
            trg[prek] = v.value[winners]

        return wta
示例#5
0
文件: winner.py 项目: neurosbh/PyMVPA
    def _call(self, ds):
        '''
        Parameters
        ----------
        ds: Dataset
            input dataset 
        
        Returns
        -------
        wds: Dataset
            Result with one sample (if axis=='feature') or one feature (if 
            axis=='samples') and an equal number of features (or samples,
            respectively) as the input dataset.
        '''

        axis = self.__axis
        fx = self.__fx

        # ensure it's a dataset
        if not isinstance(ds, Dataset):
            ds = Dataset(ds)

        samples = ds.samples

        # apply the function
        winners = fx(ds)

        # set the new shape
        new_shape = list(ds.shape)
        new_shape[axis] = 1

        # the output dataset
        wta = Dataset(np.reshape(winners, new_shape))

        # copy dataset attributes
        wta.a = ds.a.copy()

        # copy feature attribute and set sample attributes, or vice versa
        fas = [ds.fa, wta.fa]
        sas = [ds.sa, wta.sa]
        fas_sas = [fas, sas]
        to_copy, to_leave = [fas_sas[(i + axis) % 2] for i in xrange(2)]

        # copy each attribute
        for k, v in to_copy[0].iteritems():
            to_copy[1][k] = copy.copy(v)

        # set source and target. feature attributes become
        # sample attributes; or vice versa
        src, _ = to_leave
        trg = to_copy[1]
        prefix = self.__other_axis_prefix
        for k, v in src.iteritems():
            # set the prefix
            prek = ('' if prefix is None else prefix) + k

            if prek in trg:
                raise KeyError("Key clash: %s already in %s"
                                    % (prek, to_copy[1]))
            trg[prek] = v.value[winners]

        return wta
示例#6
0
 def _forward_dataset(self, ds):
     mds = Dataset([])
     mds.a = ds.a
     vectordist = self._fdistance(ds.samples)
     mds.samples = squareform(vectordist, force='no', checks=True)
     return mds
示例#7
0
def main():
    '''
    Spectral clustering...
    '''
    st =  time.time()
    tmpset = Dataset([])
   # hfilename = "/nfs/j3/userhome/dangxiaobin/workingdir/cutROI/%s/fdt_matrix2_targets_sc.T.hdf5"%(id)
    hfilename = 'fdt_matrix2.T.hdf5'
    print hfilename
    #load connectivity profile of seed mask voxels  
    conn = open_conn_mat(hfilename) 
    tmpset.a = conn.a
    print conn.shape,conn.a
    #remove some features
    mask = create_mask(conn.samples,0.5,1)
   # print mask,mask.shape
    conn_m = mask_feature(conn.samples,mask)
   # print  conn_m
    map = conn_m.T
    print "map:"
    print map.shape,map.max(),map.min()
    
    voxel = np.array(conn.fa.values())
    print voxel[0]
    v = voxel[0]
    spacedist = ds.cdist(v,v,'euclidean') 
    print spacedist

    """
    similar_mat = create_similarity_mat(map,conn.fa,0.1,2)
    X = np.array(similar_mat)
    print "similarity matrix: shape:",X.shape
    print X
    """
    
    corr = np.corrcoef(map)
    corr = np.abs(corr)
    corr = 0.1*corr + 0.9/(spacedist+1)
    
    print "Elaspsed time: ", time.time() - st
    print corr.shape,corr
    plt.imshow(corr,interpolation='nearest',cmap=cm.jet)
    cb = plt.colorbar() 
    pl.xticks(())
    pl.yticks(())
    pl.show()
    
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'precomputed',near,None,True)
    #sc.fit(map)
    sc.fit_predict(corr)
    '''
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'nearest_neighbors',near,None,True)
    sc.fit(map)
   # sc.fit_predict(X)
   # param = sc.get_params(deep=True)
    '''
    tmpset.samples = sc.labels_+1
   # print sc.affinity_matrix_
    #print list(sc.labels_)
    print "Elaspsed time: ", time.time() - st
    print "Number of voxels: ", sc.labels_.size
    print "Number  of clusters: ", np.unique(sc.labels_).size

    result = map2nifti(tmpset)
    result.to_filename("fg_parcel_S0006.nii.gz")
    print ".....The end........"
示例#8
0
def spectral_seg(hfilename,outf):
    '''
    Spectral clustering...
    '''
    tmpset = Dataset([])
    #pdb.set_trace()
    print "hdf name:",hfilename
    st =  time.time()
    ###1.load connectivity profile of seed mask voxels
    conn = h5load(hfilename)
    tmpset.a = conn.a
    print "connection matrix shape:"
    print conn.shape
    ###2.features select
    mask = create_mask(conn.samples,5)
    conn_m = conn.samples[mask]
    map = conn_m.T
    print "masked conn matrix:"
    print map.shape,map.max(),map.min()
    
    ###3.average the connection profile.
    temp = np.zeros(map.shape)
    voxel = np.array(conn.fa.values())
    v = voxel[0]
    v = v.tolist()
    
    shape = [256,256,256]
    
    i = 0
    for coor in v:
        mean_f = map[i]
        #print mean_f.shape
        #plt.plot(mean_f)
        #plt.show()
        
        neigh =get_neighbors(coor,2,shape)
        #print "neigh:",neigh

        count = 1
        for n in neigh:
            if n in v:
               mean_f = (mean_f*count + map[v.index(n)])/(count+1)
               count+=1

        temp[i] = mean_f
        i+=1
    #sys.exit(0)
    map = temp
    print "average connection matrix"
    
    ###4.spacial distance
    spacedist = ds.cdist(v,v,'euclidean') 
    #print spacedist
    
    ###5.correlation matrix
    corr = np.corrcoef(map)
    corr = np.abs(corr)
    
    ###6.mix similariry matrix.
    corr = 0.7*corr + 0.3/(spacedist+1)
    #plt.imshow(corr,interpolation='nearest',cmap=cm.jet)
    #cb = plt.colorbar() 
    #pl.xticks(())
    #pl.yticks(())
    #pl.show()
    print "mix up the corr and spacial matrix"
    
    #sys.exit(0)
    ###7.spectral segmentation    
    print "do segmentation"
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'precomputed',near,None,True)
    sc.fit_predict(corr)
    
    tmpset.samples = sc.labels_+1
    print "Number of voxels: ", sc.labels_.size
    print "Number  of clusters: ", np.unique(sc.labels_).size
    print "Elapsed time: ", time.time() - st
    
    ###8.save the segmentation result.
    print "save the result to xxx_parcel.nii.gz"
    result = map2nifti(tmpset)
    result.to_filename(outf)
    print ".....Segment end........"
    
    return True
示例#9
0
def main():
    '''
    Spectral clustering...
    '''
    st = time.time()
    tmpset = Dataset([])
    # hfilename = "/nfs/j3/userhome/dangxiaobin/workingdir/cutROI/%s/fdt_matrix2_targets_sc.T.hdf5"%(id)
    hfilename = 'fdt_matrix2.T.hdf5'
    print hfilename
    #load connectivity profile of seed mask voxels
    conn = open_conn_mat(hfilename)
    tmpset.a = conn.a
    print conn.shape, conn.a
    #remove some features
    mask = create_mask(conn.samples, 0.5, 1)
    # print mask,mask.shape
    conn_m = mask_feature(conn.samples, mask)
    # print  conn_m
    map = conn_m.T
    print "map:"
    print map.shape, map.max(), map.min()

    voxel = np.array(conn.fa.values())
    print voxel[0]
    v = voxel[0]
    spacedist = ds.cdist(v, v, 'euclidean')
    print spacedist
    """
    similar_mat = create_similarity_mat(map,conn.fa,0.1,2)
    X = np.array(similar_mat)
    print "similarity matrix: shape:",X.shape
    print X
    """

    corr = np.corrcoef(map)
    corr = np.abs(corr)
    corr = 0.1 * corr + 0.9 / (spacedist + 1)

    print "Elaspsed time: ", time.time() - st
    print corr.shape, corr
    plt.imshow(corr, interpolation='nearest', cmap=cm.jet)
    cb = plt.colorbar()
    pl.xticks(())
    pl.yticks(())
    pl.show()

    cnum = 3
    near = 100
    sc = SpectralClustering(cnum, 'arpack', None, 100, 1, 'precomputed', near,
                            None, True)
    #sc.fit(map)
    sc.fit_predict(corr)
    '''
    cnum = 3
    near = 100
    sc = SpectralClustering(cnum,'arpack',None,100,1,'nearest_neighbors',near,None,True)
    sc.fit(map)
   # sc.fit_predict(X)
   # param = sc.get_params(deep=True)
    '''
    tmpset.samples = sc.labels_ + 1
    # print sc.affinity_matrix_
    #print list(sc.labels_)
    print "Elaspsed time: ", time.time() - st
    print "Number of voxels: ", sc.labels_.size
    print "Number  of clusters: ", np.unique(sc.labels_).size

    result = map2nifti(tmpset)
    result.to_filename("fg_parcel_S0006.nii.gz")
    print ".....The end........"
 def _forward_dataset(self, ds):
     mds = Dataset([])
     mds.a = ds.a
     vectordist = self._fdistance(ds.samples)
     mds.samples = squareform(vectordist, force='no', checks=True)
     return mds