def observationModel(s): # All of the rooms in our world have white walls, except for room # 2, which has green walls; observations are wrong with # probability 0.1 if s == 2: return dist.DDist({'green' : 0.9, 'white' : 0.1}) else: return dist.DDist({'white' : 0.9, 'green' : 0.1})
def efficientTotalProbability(self, priorState, transitionContidition): totalProbs = {} for state in priorState.support(): transitionProbs = transitionContidition(state) for transed_state in transitionProbs.support(): incrDictEntry(totalProbs, transed_state, priorState.prob(state)*transitionProbs.prob(transed_state)) return dist.DDist(totalProbs)
def makeNoisyKnownInitLoc(initLoc, hallway=standardHallway): return makeSim(hallway, actions, noisyObsNoiseModel, standardDynamics, noisyTransNoiseModel, 'known init', initialDist=dist.DDist({initLoc: 1}))
def sonar_pass(self, loc): (r, c) = loc # self.grid[r][c] = True d = dist.DDist({1: self.grid[r][c], 0: (1 - self.grid[r][c])}) def prob_hit_given_wall(x): if x == 1: return dist.DDist({1.: 0.8, 0.: 0.2}) return dist.DDist({1.: 0.1, 0.: 0.9}) self.grid[r][c] = dist.bayes_rule(d, prob_hit_given_wall, 0).prob(1.)
def sonar_hit(self, loc): (r, c) = loc # self.grid[r][c] = False d = dist.DDist({1: (self.grid[r][c]), 0: (1 - self.grid[r][c])}) def prob_hit_given_wall(x): #x is 0 or 1 if x == 1: return dist.DDist({1.: 0.8, 0.: 0.2}) return dist.DDist({1.: 0.1, 0.: 0.9}) self.grid[r][c] = dist.bayes_rule(d, prob_hit_given_wall, 1).prob(1.)
def transGivenA(oldS): # Robot moves to the nominal new location (that is, the # old location plus the action) with probability 0.8; some # chance that it moves one step too little, or one step too # much. Be careful not to run off the end of the world. nominalNewS = oldS + action # This is from Wk.11.1.2, Part 3 d = {} dist.incrDictEntry(d, util.clip(nominalNewS, 0, 5), 0.8) dist.incrDictEntry(d, util.clip(nominalNewS+1, 0, 5), 0.1) dist.incrDictEntry(d, util.clip(nominalNewS-1, 0, 5), 0.1) return dist.DDist(d)
def efficientBayesEvidence( self, state, observation ): # P(O|S)(observation distribution)*P(S)(state distribution)/P(O) bayesDict = {} normalizationCoefficient = 0. potentialStates = state.support() for outcome in potentialStates: # P(O|S)*P(S) bayesDict[outcome] = self.model.observationDistribution( outcome).prob(observation) * state.prob(outcome) normalizationCoefficient += bayesDict[outcome] for element in bayesDict.keys(): # normalize or /P(O) bayesDict[element] = bayesDict[element] / normalizationCoefficient return dist.DDist(bayesDict)
def efficientBayesEvidence(self, state, observationCondition, observation): ## state should be condition probability ## just calculation the useful part belief = {} observation_prob = 0. for now_state in state.support(): observa_probs_with_now = observationCondition(now_state) this_joint_prob = state.prob(now_state)*observa_probs_with_now.prob(observation) belief[now_state] = this_joint_prob observation_prob += this_joint_prob # print("belief", belief) for key in belief.keys(): belief[key] = belief[key] / observation_prob return dist.DDist(belief)
def totalProbability(self, belief): n = 0 partialDist = {} for potentialState in belief.d.keys(): # go through states print "potentialState = ", potentialState partialDist[n] = self.model.transitionDistribution(0)( potentialState).d # what would it look like at this state for outcome in partialDist[n].keys(): partialDist[n][outcome] *= belief.prob( potentialState ) # multiply by the probability of being in that state n += 1 totalDist = partialDist[0] for event in partialDist[0].keys(): for count in range(1, n): totalDist[event] += partialDist[count][event] # normalize beliefPrime = dist.DDist(totalDist) print beliefPrime return beliefPrime
def transitionUpdate(self, belief): totalProbDict = {} normalizationCoefficient = 0. potentialStates = belief.support() for outcome in potentialStates: # P(St+1) = sum_t(P(St+1|I1,St)*P(St|O) # iterates over St for possibility in potentialStates: # iterates over St+1 if possibility not in totalProbDict.keys(): totalProbDict[possibility] = belief.prob( outcome) * self.model.transitionDistribution(0)( outcome).prob(possibility) else: totalProbDict[possibility] += belief.prob( outcome) * self.model.transitionDistribution(0)( outcome).prob(possibility) for outcome in totalProbDict.keys( ): # calculate normalization coefficient normalizationCoefficient += totalProbDict[outcome] for outcome in totalProbDict.keys(): # normalize totalProbDict[ outcome] = totalProbDict[outcome] / normalizationCoefficient return dist.DDist(totalProbDict)
class StateEstimator(sm.SM): def __init__(self, model): self.model = model self.startState = model.startDistribution def getNextValues(self, state, inp): (o, i) = inp # Test transitionTable = \ {'good': dist.DDist({'good' : 0.7, 'bad' : 0.3}), 'bad' : dist.DDist({'good' : 0.1, 'bad' : 0.9})} observationTable = \ {'good': dist.DDist({'perfect' : 0.8, 'smudged' : 0.1, 'black' : 0.1}), 'bad': dist.DDist({'perfect' : 0.1, 'smudged' : 0.7, 'black' : 0.2})} copyMachine = \ ssm.StochasticSM(dist.DDist({'good' : 0.9, 'bad' : 0.1}), # Input is irrelevant; same dist no matter what lambda i: lambda s: transitionTable[s], lambda s: observationTable[s]) obs = [('perfect', 'step'), ('smudged', 'step'), ('perfect', 'step')] cmse = StateEstimator(copyMachine) print cmse.transduce(obs)
import lib601.hmm as hmm from simulator import Simulator from random import randrange def obsGivenLoc(loc): if loc == 0 or loc == 3: return dist.DDist({1: .8, 8: .2}) else: return dist.DDist({1: .2, 8: .8}) ideal = [1, 8, 8, 1, 1, 8, 1, 8, 8] numStates = len(ideal) PA = dist.DDist({0: 0, 1: 0.0625, 2: 0.25, 3: 0.6874999999999999}) PBgA = obsGivenLoc b = 1 # print dist.bayesRule(PA, PBgA, b) ## OBSERVATION MODELS def perfectObsModel(state): return dist.deltaDist(ideal[state]) ideal = [1, 8, 8, 1, 1]
return dist.mixture(right_d, zero_d, 0.7) def teleportModel2(state): current_d = dist.deltaDist(state) random_d = dist.uniformDist(range(numStates)) return dist.mixture(current_d, random_d, 0.7) def resetModel2(state): current_d = dist.deltaDist(state) zero_d = dist.deltaDist(0) return dist.mixture(current_d, zero_d, 0.7) ## STARTING DISTRIBUTIONS uniformPrior = dist.uniformDist(range(len(ideal))) alwaysLeftPrior = dist.DDist({0: 1.0}) ## SIMULATION CODE def simulate(transDist, obsDist): testSE = StateEstimator(uniformPrior, transDist, obsDist) testHMM = HMM(alwaysLeftPrior, transDist, obsDist) Simulator(testHMM, testSE, ideal).simulate() simulate(moveRightModel, obsModelB)
def obsModelC(state): return dist.DDist({9 - ideal[state]: 1.0})
def prob_hit_given_wall(x): if x == 1: return dist.DDist({1.: 0.8, 0.: 0.2}) return dist.DDist({1.: 0.1, 0.: 0.9})
def obsModelD(state): return dist.DDist({9 - ideal[state]: 0.5, ideal[state]: 0.5})
def PTgD(diseaseValue): if diseaseValue=='disease': return dist.DDist({'posTest':0.98,'negTest':0.02}) else: return dist.DDist({'posTest':0.05,'negTest':0.95})
def obsGivenLoc(loc): if loc == 0 or loc == 3: return dist.DDist({1: .8, 8: .2}) else: return dist.DDist({1: .2, 8: .8})
def TESTgivenAIDS(AIDS): if AIDS == 'true': return dist.DDist({'positive': 0.985946, 'negative': 0.014054}) else: return dist.DDist({'positive': 0.023000, 'negative': 0.977000})
retDic = {} for key in self.support(): if removeElt(key, index) == value: new_key = removeElt(key, abs(index - 1)) incrDictEntry(retDic, new_key, self.prob(key) / denominator) return DDist(retDic) def PTgD(val): if val == 'disease': return dist.DDist({'posTest': 0.9, 'negTest': 0.1}) else: return dist.DDist({'posTest': 0.5, 'negTest': 0.5}) disease = dist.DDist({'disease': 0.1, 'noDisease': 0.9}) jointP = dist.JDist(disease, PTgD) print(jointP) testP = DDist({('noDisease', 'posTest'): 0.450000, ('disease', 'posTest'): 0.090000, \ ('noDisease', 'negTest'): 0.450000, ('disease', 'negTest'): 0.010000}) print(testP.conditionOnVar(0, 'posTest')) print(testP.conditionOnVar(0, 'negTest')) def bayesEvidence(PBgA, PA, b): # first cal joint probability # PBgA and PA # then condition On Var pass
def PTgD(val): if val == 'disease': return dist.DDist({'posTest': 0.9, 'negTest': 0.1}) else: return dist.DDist({'posTest': 0.5, 'negTest': 0.5})
import lib601.dist as dist ''' toss = dist.DDist({'head':0.5,'tail':0.5}) print toss print toss.prob('head') print toss.prob('tail') print toss.prob('H') ''' def TESTgivenAIDS(AIDS): if AIDS == 'true': return dist.DDist({'positive': 0.985946, 'negative': 0.014054}) else: return dist.DDist({'positive': 0.023000, 'negative': 0.977000}) #print TESTgivenAIDS('true') #print TESTgivenAIDS('true').prob('negative') AIDS = dist.DDist({'true': 0.0037, 'false': 0.9963}) AIDSandTEST = dist.JDist(AIDS, TESTgivenAIDS) print AIDSandTEST