Example #1
0
def doEntropyWeights(ee,e,ciss,link_func=maxSim,proms=None):
    precs=[]
    recalls=[]
    
    weights=getEntropyWeights(e,ciss)
    cis_keys=ciss.keys()
    sims=dict()
    for x in cis_keys:
        sims[x]=dict()
        for y in cis_keys:
            sims[x][y]=dmSim(ciss[x].seq,ciss[y].seq\
                             ,w1=weights[x],w2=weights[y]\
                             ,useLength=True)
    ss=flatenDict(netAlignInfer(link_func,e,sims))
    for thresh in np.arange(0,1,0.1):
        
            (p,r)=prEval(ee,ss,thresh)
            precs.append(p)
            recalls.append(r)
    return (precs,recalls)
Example #2
0
dmSimsL=getPickle(pp3)
'''

# Repeats of experiments
N=5
ratio=0.3
x=[]
y=[]
x2=[]
y2=[]
x3=[]
y3=[]
ee=flatenDictList(edges)
for exp_i in range(N):
    e=mdf.global_remove(edges,0.3)
    scores1=netAlignInfer(maxSim,e,nwSims)
    scores2=netAlignInfer(maxSim,e,nwSimsL)
    scores3=netAlignInfer(maxSim,e,dmSimsL)
    ss1=flatenDict(scores1)
    ss2=flatenDict(scores2)
    ss3=flatenDict(scores3)

    for thresh in np.arange(0,1,0.1):
        result=prEval(ee,ss1,thresh)
        result2=prEval(ee,ss2,thresh)
        result3=prEval(ee,ss3,thresh)
        x.append(result[0])
        y.append(result[1])
        x2.append(result2[0])
        y2.append(result2[1])
        x3.append(result3[0])