Beispiel #1
0
def nearlyGreedyStrat(counts, locPrior, likelihoodsObj, dag, skipProbs):
    dlocs = [i for i in xrange(len(counts)) if counts[i][1]]
    (currEntropy, entropyResults,
     findProbs) = entropiesFast(counts, locPrior, likelihoodsObj, dag)
    (next, nextE) = findMin(entropyResults)
    if len(dlocs):
        # if there is a detection, calculate the expected entropy after making another observation
        # there and then making a 'greedy' observation.
        dloc = dlocs[-1]
        (t, d) = counts[dloc]
        dcounts = copy.copy(counts)
        tcounts = copy.copy(counts)
        dcounts[dloc] = (t, d + 1)
        tcounts[dloc] = (t + 1, d)
        (currEntropyD, entropyResultsD,
         findProbsD) = entropiesFast(dcounts, locPrior, likelihoodsObj, dag)
        (currEntropyT, entropyResultsT,
         findProbsT) = entropiesFast(tcounts, locPrior, likelihoodsObj, dag)
        (nextD, nextED) = findMin(entropyResultsD)
        (nextT, nextET) = findMin(entropyResultsT)

        expectedEntropy = findProbs[dloc] * nextED + (1 -
                                                      findProbs[dloc]) * nextET

        #        print "c %1.2f n %1.02f c-n %1.04f c-e %1.04f fp %1.02f nf %1.02f nt %1.02f" %(currEntropy,nextE,currEntropy-nextE,currEntropy-expectedEntropy,findProbs[dloc],nextED,nextET)
        if (currEntropy - nextE) < (currEntropy - expectedEntropy) / 2.0:
            return dloc
        else:
            return next

    else:
        return next
Beispiel #2
0
def nearlyGreedyStrat(counts,locPrior,likelihoodsObj,dag,skipProbs):
    dlocs=[i for i in xrange(len(counts)) if counts[i][1]]
    (currEntropy,entropyResults,findProbs)=entropiesFast(counts,locPrior,likelihoodsObj,dag)
    (next,nextE)=findMin(entropyResults)
    if len(dlocs):
        # if there is a detection, calculate the expected entropy after making another observation
        # there and then making a 'greedy' observation.
        dloc=dlocs[-1]
        (t,d)=counts[dloc]
        dcounts=copy.copy(counts)
        tcounts=copy.copy(counts)
        dcounts[dloc]=(t,d+1)
        tcounts[dloc]=(t+1,d)
        (currEntropyD,entropyResultsD,findProbsD)=entropiesFast(dcounts,locPrior,likelihoodsObj,dag)
        (currEntropyT,entropyResultsT,findProbsT)=entropiesFast(tcounts,locPrior,likelihoodsObj,dag)
        (nextD,nextED)=findMin(entropyResultsD)
        (nextT,nextET)=findMin(entropyResultsT)
        
        expectedEntropy=findProbs[dloc]*nextED+(1-findProbs[dloc])*nextET

#        print "c %1.2f n %1.02f c-n %1.04f c-e %1.04f fp %1.02f nf %1.02f nt %1.02f" %(currEntropy,nextE,currEntropy-nextE,currEntropy-expectedEntropy,findProbs[dloc],nextED,nextET)
        if (currEntropy-nextE)<(currEntropy-expectedEntropy)/2.0:
            return dloc
        else:
            return next

    else:
        return next
Beispiel #3
0
def greedyStrat(counts,locPrior,likelihoodsObj,dag,skipProbs):
    (currEntropy,entropyResults,findProbs)=entropiesFast(counts,locPrior,likelihoodsObj,dag)
    # test where expected entropy is smallest
    
    expectedGain = [(currEntropy-entropyResults[i])*(numberType.one-skipProbs[i]) for
                    i in xrange(len(entropyResults))]
    (next,nextp)=findMax(expectedGain)

    return next
Beispiel #4
0
def greedyStrat(counts, locPrior, likelihoodsObj, dag, skipProbs):
    (currEntropy, entropyResults,
     findProbs) = entropiesFast(counts, locPrior, likelihoodsObj, dag)
    # test where expected entropy is smallest

    expectedGain = [
        (currEntropy - entropyResults[i]) * (numberType.one - skipProbs[i])
        for i in xrange(len(entropyResults))
    ]
    (next, nextp) = findMax(expectedGain)

    return next
Beispiel #5
0
import testCases


random.seed(1)



mult=100

out=file("data/N_T.singleRate.csv","w")
for n in range(50,1000,50):





    d=testCases.testDag(n,False)


    for i in range(mult):
        (counts,locPrior)=testCases.randomEntropyData(i+1,n,d,False,True,10)
        start=time.clock()
        junk=evidence.entropiesFast(counts,locPrior,likelihoods.singleRateCalc,d)
        end=time.clock()
        out.write("%d,%f\n" % (n,(end-start)))
    print n

out.close()
                  

Beispiel #6
0
#
#    You should have received a copy of the GNU General Public License
#    along with BBChop.  If not, see <http://www.gnu.org/licenses/>.

import evidence
import time
import random
import likelihoods
import testCases

random.seed(1)

mult = 100

out = file("data/N_T.singleRate.csv", "w")
for n in range(50, 1000, 50):

    d = testCases.testDag(n, False)

    for i in range(mult):
        (counts,
         locPrior) = testCases.randomEntropyData(i + 1, n, d, False, True, 10)
        start = time.clock()
        junk = evidence.entropiesFast(counts, locPrior,
                                      likelihoods.singleRateCalc, d)
        end = time.clock()
        out.write("%d,%f\n" % (n, (end - start)))
    print n

out.close()