def net_square_svd(name=nu.default_name, reset=0): hardcopy = False try: if reset: raise Exception("compute") return nw.readnet(name, hardcopy=hardcopy) except Exception as e: if e.args[0] != "compute": raise Exception() nw.claim_reset() sqa = nu.net_square_affinity(reset=mod(reset, 2)) U, S, Vh = lin.svd(sqa) V = Vh.T uvecs = U[:, : len(S)] vvecs = V dosave = (sqa[1], (uvecs, S, vvecs)) nw.writenet(name, dosave, hardcopy=hardcopy) print """ Ran net square svd for the current values. Nothing has yet been saved to HD. """ if reset: net_square_svd_U(reset=2) net_square_svd_V(reset=2) net_square_svd_S(reset=2) print """ ...saved hardcopies of U,S,V """ return dosave
def smear_sqa(smear_gene = True): sqa = nu.net_square_affinity() if smear_gene: img = array(sqa[0].T) else: img = array(sqa[0]) s = shape(img) for i in range(s[1]): img[:,i] = np.sort(img[:,i])[::-1] sums = np.sum(img,0) img = img[:,np.argsort(sums)[::-1]] draw_smear(img[:xmax:xskip,::yskip])
def draw_hclustered(clustered): c = clustered['HCluster'] clusters = c.cut(0) f = plt.figure(0) f.clear() ax = f.add_axes([0,0,1,1],aspect = 'auto') ax.set_aspect('auto') c0 = c.cut(0) n = max(array(c0)) +1 nlvl = 100 h = zeros((nlvl,n)) cuts = linspace(0,.9,nlvl) appearances = zeros(n)-1 hinds = zeros((nlvl,n),int) for i in range(nlvl): cut = cuts[i] clusters = c.cut(cut) for j in range(n): cval = clusters[j] if appearances[cval] == -1: appearances[cval] = j h[i,j] = appearances[cval] hinds[i,:] = argsort(h[i,:]) h[i,:]=h[i,hinds[i,:]] for i in range(shape(h)[0]): h[i,:] /= max(h[i,:]) ax.imshow(mycolors.imgcolor(h, BW = True), aspect = 'auto', interpolation = 'nearest') saff,ks = nu.net_square_affinity() raff,kt,ktf = nu.net_affinity() tfidxs = [] for tf in ktf.keys(): tfidxs.append(ks.index(tf)) tfidxs= array(tfidxs) is_clustered = tfidxs[nonzero(less(tfidxs,n))[0]] ntf = len(is_clustered) tf_alpha = zeros((nlvl,n)) for i in range(ntf): tf_alpha +=equal(hinds,is_clustered[i]) tf_rgba = mycolors.imgcolor(tf_alpha,alpha = True,color = [1,0,0]) ax.imshow(tf_rgba, aspect = 'auto', interpolation = 'nearest')
def viewmany(all_means, all_clusters, fig = 12): n = len(all_means) f = plt.figure(fig) f.clear() print '''Running viewmany.py For now, viewmany assumes that k is equal across clustering instances this is not really important but has to do with how TF projections are stored. ''' #1 k. k = len(all_means[0]) ax1 = f.add_axes([.05,.05,.95,.4]) ax2 = f.add_axes([.05,.55,.95,.4]) ct0 = mycolors.getct(n) sqa = nu.net_square_affinity()[0] aff = nu.net_affinity()[0] #tf_sqidxs should have length = ntf #with each element giving the coordinate of the #i'th tf in sqa space. sqidxs = nu.net_sq_keyidxs() n_tfidxs = nu.net_tf_keyidxs() trgs,tfs = nu.parse_net() tf_sqidxs = [sqidxs[key] for key in tfs.keys()] tfidxs = n_tfidxs.values() ntf = len(tfidxs) tfweights = zeros(ntf,int) #find tfs of general interest, choosing at most ten for each clustering ntf_each = 20 print '''...Computing representative TFs for each clustering. In the current formulation, we project each mean on to associated tf and then normalize each projection so that each mean has equal weight in TF selection. Not that we have handled the case where we have clusted in TF space explicitly (e.g, dim = 541) and where we are in gene space explicitly, (e.g., dim = 8321, GG matrix or svdU). svdV is emphatically not handled. Neither would svdU of TF-TF which is actually the the exact same thing.''' TFprojs= zeros((n,k,ntf)) for i in range(n): m = all_means[i] dim = shape(m)[1] #we are now going to project clusters on to the tfs #in this form, we only need rows corresponding to tfs. if dim> 500: #If dim = 541, we just read off the most important tfs this_tf_sum = np.abs(m[:,tfidxs]) TFprojs[i,:,:] = this_tf_sum #normalize clusters this_tf_sum = this_tf_sum / np.sum(this_tf_sum,1)[:,newaxis] this_tf_sum = np.sum(this_tf_sum,0) #Now, since we are at the moment only working with GG #and SVD_U, we are in gene space and can undo the mapping #with sqaT elif dim > 8000: #remember, ROWS of the matrix correspond to the #target space. a = sqa.T[tf_sqidxs,:] this_tf_sum = np.abs(np.sum(a[newaxis,:,:]*m[:,newaxis,:],2)) TFprojs[i,:,:] = this_tf_sum #normalize so that each mean has the same weight this_tf_sum = this_tf_sum / np.sum(this_tf_sum,1)[:,newaxis] #sum over cluster means to find the most important tfs this_tf_sum = np.sum(this_tf_sum,0) best = argsort(this_tf_sum)[::-1] tfweights[best[0:ntf_each]]=1 print '''Finished computing representative TFs ''' tfs_of_interest = nonzero(tfweights)[0] ntf = len(tfs_of_interest) avg_unshared = float(ntf)/(n * ntf_each) avg_shared = 1. - float(ntf)/(n * ntf_each) print '''Allowing for each cluster to choose '+str(ntf_each) + 'tfs, we got ''' + str(ntf) + ''' tfs of interest. or a mean sharing ratio of ''' + str(round(avg_shared,3))+ '''.''' #get a color table for clusters. ct = mycolors.getct(n) for i in range(n): #p stands for 'point' as in datapoint. #data points are labeled with clusters. xax = linspace(0,1,ntf) ax1.plot(xax,np.sum(TFprojs[i,:,tfs_of_interest],1)/np.max(TFprojs[i,:,tfs_of_interest],1),color = ct[i]) return TFprojs