示例#1
0
def simulate(hmm,horizon):
    """returns a pair of (state sequence, observation sequence) of length horizon.
    for each time t, the agent is in state_sequence[t] and
    observes observation_sequence[t]
    """
    state = sample_one(hmm.indist)
    obsseq=[]
    stateseq=[]
    for time in range(horizon):
        stateseq.append(state)
        newobs = {obs:sample_one({0:1-hmm.pobs[obs][state],1:hmm.pobs[obs][state]})
                  for obs in hmm.obsvars}
        obsseq.append(newobs)
        state = sample_one(hmm.trans[state])
    return stateseq,obsseq
示例#2
0
def simobs(hmm,stateseq):
    """returns observation sequence for the state sequence"""
    obsseq=[]
    for state in stateseq:
        newobs = {obs:sample_one({0:1-hmm.pobs[obs][state],1:hmm.pobs[obs][state]})
                  for obs in hmm.obsvars}
        obsseq.append(newobs)
    return obsseq
示例#3
0
 def query(self,
           qvar,
           obs={},
           number_samples=1000,
           burn_in=100,
           sample_order=None):
     """computes P(qvar|obs) where
     qvar is a variable.
     obs is a variable:value dictionary.
     sample_order is a list of non-observed variables in order.
     """
     counts = {val: 0 for val in qvar.domain}
     if sample_order is not None:
         variables = sample_order
     else:
         variables = [v for v in self.bn.variables if v not in obs]
     var_to_factors = {v: set() for v in self.bn.variables}
     for fac in self.bn.factors:
         for var in fac.variables:
             var_to_factors[var].add(fac)
     sample = {var: random.choice(var.domain) for var in variables}
     self.display(2, "Sample:", sample)
     sample.update(obs)
     for i in range(burn_in + number_samples):
         if sample_order == None:
             random.shuffle(variables)
         for var in variables:
             # get probability distribution of var given its neighbours
             vardist = {val: 1 for val in var.domain}
             for val in var.domain:
                 sample[var] = val
                 for fac in var_to_factors[var]:  # Markov blanket
                     vardist[val] *= fac.get_value(sample)
             sample[var] = sample_one(vardist)
         if i >= burn_in:
             counts[sample[qvar]] += 1
     tot = sum(counts.values())
     return counts, {c: v / tot for (c, v) in counts.items()}
示例#4
0
 def advance(self):
     """advance to the next time.
     This assumes that all of the weights are 1."""
     self.particles = [
         sample_one(self.hmm.trans[st]) for st in self.particles
     ]
示例#5
0
 def __init__(self, hmm, number_particles=1000):
     self.hmm = hmm
     self.particles = [
         sample_one(hmm.indist) for i in range(number_particles)
     ]
     self.weights = [1 for i in range(number_particles)]