def compareMeasureNG2(edges,sims,ratio,names,proms,wName=None,ciss=None,link_func=maxSim): precs=dict.fromkeys(names) recalls=dict.fromkeys(names) ss=dict.fromkeys(names) ee=flatenDictList(edges)#ee is true edges e=mdf.global_remove(edges,ratio) for name in names: ss[name]=flatenDict(netAlignInfer\ (link_func,e,sims[name])) tss=getTS2(edges,ciss,proms,ss[name],0.1) factors=getAssoc2(*tss,min_c=0.9) for name in names: precs[name]=[] recalls[name]=[] for thresh in np.arange(0,1.0,0.1): for key in ss[name].keys(): if key[1] in factors.keys(): if key[0]+str(ciss[key[1]].slen) == tss[1][key[1]]: #print 'BEFORE',ss[name][key] ss[name][key]=1 #print 'AFTER',ss[name][key] (p,r)=prEval(ee,ss[name],thresh) precs[name].append(p) recalls[name].append(r) if not wName==None: (precs[wName],recalls[wName])=doEntropyWeights(ee,e,ciss,\ link_func=link_func) return (precs,recalls,ss)
def compareMeasureNG(edges,sims,ratio,names,wName=None,ciss=None,link_func=maxSim): ''' NG means this experiment is not using the grouping of cis-element ''' precs=dict.fromkeys(names) recalls=dict.fromkeys(names) ss=dict.fromkeys(names) ee=flatenDictList(edges)#ee is true edges e=mdf.global_remove(edges,ratio) for name in names: ss[name]=flatenDict(netAlignInfer\ (link_func,e,sims[name])) for name in names: precs[name]=[] recalls[name]=[] for thresh in np.arange(0,1.0,0.1): (p,r)=prEval(ee,ss[name],thresh) precs[name].append(p) recalls[name].append(r) if not wName==None: (precs[wName],recalls[wName])=doEntropyWeights(ee,e,ciss,\ link_func=link_func) return (precs,recalls,ss)
def doEntropyWeights(ee,e,ciss,link_func=maxSim,proms=None): precs=[] recalls=[] weights=getEntropyWeights(e,ciss) cis_keys=ciss.keys() sims=dict() for x in cis_keys: sims[x]=dict() for y in cis_keys: sims[x][y]=dmSim(ciss[x].seq,ciss[y].seq\ ,w1=weights[x],w2=weights[y]\ ,useLength=True) ss=flatenDict(netAlignInfer(link_func,e,sims)) for thresh in np.arange(0,1,0.1): (p,r)=prEval(ee,ss,thresh) precs.append(p) recalls.append(r) return (precs,recalls)
x2=[] y2=[] x3=[] y3=[] ee=flatenDictList(edges) for exp_i in range(N): e=mdf.global_remove(edges,0.3) scores1=netAlignInfer(maxSim,e,nwSims) scores2=netAlignInfer(maxSim,e,nwSimsL) scores3=netAlignInfer(maxSim,e,dmSimsL) ss1=flatenDict(scores1) ss2=flatenDict(scores2) ss3=flatenDict(scores3) for thresh in np.arange(0,1,0.1): result=prEval(ee,ss1,thresh) result2=prEval(ee,ss2,thresh) result3=prEval(ee,ss3,thresh) x.append(result[0]) y.append(result[1]) x2.append(result2[0]) y2.append(result2[1]) x3.append(result3[0]) y3.append(result3[1]) plt.figure(1) plt.scatter(x,y) plt.figure(2) plt.scatter(x2,y2)
# Create the filter list of the hidden edges # so that the evaluation only focus on these edges for exp_i in range(N): e=mdf.global_remove(edges,0.3) # hid_e is a list of real edges hidden entire_keys=edges.keys() subset_keys=e.keys() tmp_e=flatenDictList(e) hid_e=set(ee.keys())-set(tmp_e.keys()) M=len(hid_e) # number of the hidden edges print "start inferring" scores1=netAlignInfer(maxSim,e,nwSims) scores2=netAlignInfer(maxSim,e,nwSimsL) scores3=netAlignInfer(maxSim,e,dmSimsL) ss1=flatenDict(scores1) ss2=flatenDict(scores2) ss3=flatenDict(scores3) for i in range(len(threshs)): print "thresh is ", threshs[i] thresh=threshs[i] (precs[0][i],ph)=prEval(ee,ss1,thresh) (precs[1][i],ph)=prEval(ee,ss2,thresh) (precs[2][i],ph)=prEval(ee,ss3,thresh) recalls[0][i]=getRecall(ee,ss1,hid_e,thresh) recalls[1][i]=getRecall(ee,ss2,hid_e,thresh) recalls[2][i]=getRecall(ee,ss3,hid_e,thresh) print "end of experiement", exp_i