Beispiel #1
0
 def __init__(self, address, distribution, value):
     self.address = address
     self.distribution = distribution
     if distribution is None:
         self.address_suffixed = address
     else:
         self.address_suffixed = address + distribution.address_suffix
     # self.instance = None
     self.value = util.to_tensor(value)
     self.value_dim = None
     self.lstm_input = None
     self.lstm_output = None
Beispiel #2
0
 def __init__(self, *args, **kwargs):
     self._observation = [
         0.9, 0.8, 0.7, 0.0, -0.025, -5.0, -2.0, -0.1, 0.0, 0.13, 0.45, 6,
         0.2, 0.3, -1, -1
     ]
     self._posterior_mean_correct = util.to_tensor(
         [[0.3775, 0.3092, 0.3133], [0.0416, 0.4045, 0.5539],
          [0.0541, 0.2552, 0.6907], [0.0455, 0.2301, 0.7244],
          [0.1062, 0.1217, 0.7721], [0.0714, 0.1732, 0.7554],
          [0.9300, 0.0001, 0.0699], [0.4577, 0.0452, 0.4971],
          [0.0926, 0.2169, 0.6905], [0.1014, 0.1359, 0.7626],
          [0.0985, 0.1575, 0.7440], [0.1781, 0.2198, 0.6022],
          [0.0000, 0.9848, 0.0152], [0.1130, 0.1674, 0.7195],
          [0.0557, 0.1848, 0.7595], [0.2017, 0.0472, 0.7511],
          [0.2545, 0.0611, 0.6844]])
     super().__init__(*args, **kwargs)
    def __init__(self, *args, **kwargs):
        # http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
        class HiddenMarkovModel(Model):
            def __init__(self, init_dist, trans_dists, obs_dists, obs_length):
                self.init_dist = init_dist
                self.trans_dists = trans_dists
                self.obs_dists = obs_dists
                self.obs_length = obs_length
                super().__init__('Hidden Markov model')

            def forward(self):
                states = [pyprob.sample(init_dist)]
                for i in range(self.obs_length):
                    state = pyprob.sample(self.trans_dists[int(states[-1])])
                    pyprob.observe(self.obs_dists[int(state)], name='obs{}'.format(i))
                    states.append(state)
                return torch.stack([util.one_hot(3, int(s)) for s in states])

        init_dist = Categorical([1, 1, 1])
        trans_dists = [Categorical([0.1, 0.5, 0.4]),
                       Categorical([0.2, 0.2, 0.6]),
                       Categorical([0.15, 0.15, 0.7])]
        obs_dists = [Normal(-1, 1),
                     Normal(1, 1),
                     Normal(0, 1)]

        self._observation = [0.9, 0.8, 0.7, 0.0, -0.025, -5.0, -2.0, -0.1, 0.0, 0.13, 0.45, 6, 0.2, 0.3, -1, -1]
        self._model = HiddenMarkovModel(init_dist, trans_dists, obs_dists, len(self._observation))
        self._posterior_mean_correct = util.to_tensor([[0.3775, 0.3092, 0.3133],
                                                       [0.0416, 0.4045, 0.5539],
                                                       [0.0541, 0.2552, 0.6907],
                                                       [0.0455, 0.2301, 0.7244],
                                                       [0.1062, 0.1217, 0.7721],
                                                       [0.0714, 0.1732, 0.7554],
                                                       [0.9300, 0.0001, 0.0699],
                                                       [0.4577, 0.0452, 0.4971],
                                                       [0.0926, 0.2169, 0.6905],
                                                       [0.1014, 0.1359, 0.7626],
                                                       [0.0985, 0.1575, 0.7440],
                                                       [0.1781, 0.2198, 0.6022],
                                                       [0.0000, 0.9848, 0.0152],
                                                       [0.1130, 0.1674, 0.7195],
                                                       [0.0557, 0.1848, 0.7595],
                                                       [0.2017, 0.0472, 0.7511],
                                                       [0.2545, 0.0611, 0.6844]])
        super().__init__(*args, **kwargs)