Exemplo n.º 1
0
 def forward(self, observation=[]):
     states = [pyprob.sample(init_dist)]
     for o in observation:
         state = pyprob.sample(self.trans_dists[int(states[-1])])
         pyprob.observe(self.obs_dists[int(state)], o)
         states.append(state)
     return torch.stack([util.one_hot(3, int(s)) for s in states])
Exemplo n.º 2
0
 def forward(self):
     states = [pyprob.sample(init_dist)]
     for i in range(self.obs_length):
         state = pyprob.sample(self.trans_dists[int(states[-1])])
         pyprob.observe(self.obs_dists[int(state)], name='obs{}'.format(i))
         states.append(state)
     return torch.stack([util.one_hot(3, int(s)) for s in states])
Exemplo n.º 3
0
 def forward(self):
     mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
     likelihood = Normal(mu, self.likelihood_stddev)
     # pyprob.observe usage alternative #2
     pyprob.sample(likelihood, name='obs0')
     pyprob.sample(likelihood, name='obs1')
     return mu
Exemplo n.º 4
0
 def marsaglia(self, mean, stddev):
     uniform = Uniform(-1, 1)
     s = 1
     while float(s) >= 1:
         x = pyprob.sample(uniform)[0]
         y = pyprob.sample(uniform)[0]
         s = x * x + y * y
     return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
Exemplo n.º 5
0
 def marsaglia(self, mean, stddev):
     uniform = Uniform(-1, 1)
     s = 1
     while float(s) >= 1:
         pyprob.rs_start()
         x = pyprob.sample(uniform)
         y = pyprob.sample(uniform)
         s = x * x + y * y
     pyprob.rs_end()
     return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
Exemplo n.º 6
0
            def forward(self):
                count_prior = Poisson(4)
                r = pyprob.sample(count_prior)
                if 4 < float(r):
                    l = 6
                else:
                    l = 1 + self.fibonacci(3 * int(r)) + pyprob.sample(count_prior)

                pyprob.observe(Poisson(l), name='obs')
                return r
Exemplo n.º 7
0
            def forward(self):
                uniform = Uniform(-1, 1)
                for i in range(2):
                    x = pyprob.sample(uniform)
                    y = pyprob.sample(uniform)
                    s = x * x + y * y

                likelihood = Normal(s, 0.1)
                pyprob.observe(likelihood, name='obs0')
                pyprob.observe(likelihood, name='obs1')
                return s
Exemplo n.º 8
0
 def forward(self, observation=[]):
     uniform = Uniform(0, 1)
     ret = pyprob.sample(uniform)
     ret = pyprob.sample(uniform)
     ret = pyprob.sample(uniform, control=False)
     ret = pyprob.sample(uniform, control=False)
     ret = pyprob.sample(uniform, control=False)
     pyprob.observe(uniform, 0.5)
     pyprob.observe(uniform, 0.5)
     pyprob.observe(uniform, 0.5)
     pyprob.observe(uniform, 0.5)
     return ret
Exemplo n.º 9
0
    def forward(self):
        format_index = int(
            pyprob.sample(dists.Categorical(torch.tensor([1 / 6] * 6))).item())

        firstname_probs = FIRST_NAMES['count'].tolist(
        ) / FIRST_NAMES['count'].sum()
        firstname_index = int(
            pyprob.sample(dists.Categorical(
                torch.tensor(firstname_probs))).item())
        firstname = FIRST_NAMES['name'][firstname_index].lower()

        lastname_probs = LAST_NAMES['count'].tolist(
        ) / LAST_NAMES['count'].sum()
        lastname_index = int(
            pyprob.sample(dists.Categorical(
                torch.tensor(lastname_probs))).item())
        lastname = LAST_NAMES['name'][lastname_index].lower()

        if format_index == 0 or format_index == 1:
            # The person has no middle name
            middlename = ""
        if format_index == 2 or format_index == 3:
            # The person has a middle name
            middlename_probs = MIDDLE_NAMES['count'].tolist(
            ) / MIDDLE_NAMES['count'].sum()
            middlename_index = int(
                pyprob.sample(dists.Categorical(
                    torch.tensor(middlename_probs))).item())
            middlename = MIDDLE_NAMES['name'][middlename_index].lower()
        if format_index == 4 or format_index == 5:
            # The person has a middle name initial
            middlename_index = int(
                pyprob.sample(dists.Categorical(torch.tensor([1 / 26] *
                                                             26))).item())
            middlename = ALL_LETTERS[middlename_index]

        # make a categorical distribution that observes each letter independently (like 50 independent categoricals)
        output = pad_string(original=format_name(firstname, middlename,
                                                 lastname, format_index),
                            desired_len=MAX_STRING_LEN)

        probs = torch.ones(MAX_STRING_LEN, N_CHARACTERS) * (
            (1 - self.peak_prob) / (N_CHARACTERS - 1))
        for i, character in enumerate(output):
            probs[i, character_to_index(character)] = self.peak_prob
        pyprob.observe(OneHot2DCategorical(probs), name=f"name_string")

        return output, {
            'firstname': firstname,
            'middlename': middlename,
            'lastname': lastname
        }
Exemplo n.º 10
0
 def forward(self):
     uniform = Uniform(0, 1)
     val = pyprob.sample(uniform)
     val = pyprob.sample(uniform)
     val = pyprob.sample(uniform, control=False)
     val = pyprob.sample(uniform, control=False)
     val = pyprob.sample(uniform, control=False)
     pyprob.tag(value=val, name='val')
     pyprob.observe(uniform, 0.5)
     pyprob.observe(uniform, 0.5)
     pyprob.observe(uniform, 0.5)
     pyprob.observe(uniform, 0.5)
     return val
Exemplo n.º 11
0
 def forward(self):
     mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
     likelihood = Normal(mu, self.likelihood_stddev)
     likelihood_func = lambda x: likelihood.log_prob(x)
     pyprob.factor(log_prob=likelihood_func(8))
     pyprob.factor(log_prob=likelihood_func(9))
     return mu
Exemplo n.º 12
0
def rangeSample(span):
	mini = span[0]
	maxi = span[1]
	size = maxi - mini + 1
	samp = pyprob.sample(pyprob.distributions.Categorical(torch.tensor([1.]*size)))
	n = samp + mini
	return n
Exemplo n.º 13
0
 def rejection_sampling(self):
     u = pyprob.sample(Uniform(0, 1), control=False)
     if u > 0.5:
         while True:
             x = pyprob.sample(Normal(self.prior_mean,
                                      self.prior_stddev * 4),
                               replace=True)
             u2 = pyprob.sample(Uniform(0, 1), control=False)
             if x < 0 and u2 < 0.25 * torch.exp(
                     Normal(self.prior_mean, self.prior_stddev).log_prob(x)
                     - Normal(self.prior_mean, self.prior_stddev *
                              4).log_prob(x)):
                 return x
     else:
         while True:
             x = pyprob.sample(Normal(self.prior_mean, self.prior_stddev),
                               replace=True)
             if x >= 0:
                 return x
Exemplo n.º 14
0
 def marsaglia(self, mean, stddev):
     uniform = Uniform(-1, 1)
     s = 1
     i = 0
     while True:
         x = pyprob.sample(uniform, replace=self.replace)
         y = pyprob.sample(uniform, replace=self.replace)
         s = x * x + y * y
         i += 1
         if float(s) < 1:
             pyprob.tag(x, name='x_accepted')
             pyprob.tag(y, name='y_accepted')
             pyprob.tag(s, name='s_accepted')
             break
         else:
             pyprob.tag(x, name='x_rejected')
             pyprob.tag(y, name='y_rejected')
             pyprob.tag(s, name='s_rejected')
     pyprob.tag(i, name='iterations')
     return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
Exemplo n.º 15
0
 def true_posterior(self, observation=6):
     count_prior = Poisson(4)
     vals = []
     log_weights = []
     for r in range(40):
         for s in range(40):
             if 4 < float(r):
                 l = 6
             else:
                 f = self.fibonacci(3 * r)
                 l = 1 + f + pyprob.sample(count_prior)
             vals.append(r)
             log_weights.append(
                 Poisson(l).log_prob(observation) +
                 count_prior.log_prob(r) + count_prior.log_prob(s))
     return Empirical(vals, log_weights)
Exemplo n.º 16
0
    def forward(self):
        country_index = int(pyprob.sample(dists.Categorical(torch.tensor([1/len(COUNTRY_INFO)]*len(COUNTRY_INFO)))).item())
        country_info = COUNTRY_INFO[country_index]

        # Obtain formatted country code
        country_code = country_info['cc']
        cc_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/3] + [1/9]*6))).item())
        full_cc = format_cc(country_code, cc_format)
        
        structure_index = int(pyprob.sample(dists.Categorical(torch.tensor([1/len(country_info['structure'])]*len(country_info['structure'])))).item())
        number_structure = country_info['structure'][structure_index]

        # Obtain formatted area code
        area_code_len = number_structure[0]
        area_code = ""
        for _ in range(area_code_len):
            curr_digit = int(pyprob.sample(dists.Categorical(torch.tensor([1/N_DIGIT]*N_DIGIT))).item())
            area_code += str(curr_digit)
        ac_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/6]*6))).item())
        full_ac = format_ac(area_code, ac_format)

        # Obtain formatted line number
        line_number_structure = number_structure[1:]
        line_number_block_len = len(line_number_structure)
        line_number_blocks = []
        for i in range(line_number_block_len):
            number_block_len = line_number_structure[i]
            number_block_digits = ""
            for _ in range(number_block_len):
                number = int(pyprob.sample(dists.Categorical(torch.tensor([1/N_DIGIT]*N_DIGIT))).item())
                number_block_digits += str(number)
            line_number_blocks.append(number_block_digits)
        line_number = " ".join(line_number_blocks)
        line_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/3]*3))).item())
        full_line = format_line_number(line_number_blocks, line_format)

        # make a categorical distribution that observes each letter independently (like 30 independent categoricals)
        output = pad_string(original=full_cc+full_ac+full_line, desired_len=MAX_STRING_LEN)
        probs = torch.ones(MAX_STRING_LEN, N_LETTER)*0.001
        for i, letter in enumerate(output):
            probs[i, letter_to_index(letter)] = 1.
        pyprob.observe(OneHot2DCategorical(probs), name=f"phone_string")

        return output, {'country': country_info['country'],'country code': country_code, 'area code': area_code, 'line number': line_number}
Exemplo n.º 17
0
 def forward(self, observation=[]):
     mu = pyprob.sample(Normal(self.prior_mean, self.prior_stddev))
     likelihood = Normal(mu, self.likelihood_stddev)
     for o in observation:
         pyprob.observe(likelihood, o)
     return mu
Exemplo n.º 18
0
 def forward(self, observation=None):
     categorical_value = pyprob.sample(Categorical([0.1, 0.1, 0.8]))
     normal_value = pyprob.sample(Normal(5, 2))
     return categorical_value, normal_value
Exemplo n.º 19
0
def selectFromList(lst):
	size = len(lst)
	idx = pyprob.sample(pyprob.distributions.Categorical(torch.tensor([1.]*size))).long().item()
	popped = lst[idx]
	return popped
Exemplo n.º 20
0
def popFromList(lst):	
	size = len(lst)
	idx = pyprob.sample(pyprob.distributions.Categorical(torch.tensor([1.]*size))).long().item()
	popped = lst[idx]
	newLst = lst[:idx] + lst[idx+1:]
	return popped, newLst
Exemplo n.º 21
0
def sample_bernoulli(p):
	return pyprob.sample( pyprob.distributions.Categorical(torch.tensor([1-p, p])))
Exemplo n.º 22
0
def dummySample(i):
	return pyprob.sample( pyprob.distributions.Categorical(torch.tensor([0.5,0.5])))
Exemplo n.º 23
0
 def forward(self):
     a = pyprob.sample(Bernoulli(0.5), name='a')
     b = pyprob.sample(Bernoulli(0.25 if a else 0.75), name='b')
Exemplo n.º 24
0
 def forward(self, observation=None):
     categorical_value = pyprob.sample(Categorical([0.1, 0.1, 0.8]))
     normal_value = pyprob.sample(Normal(5., 2.))
     return float(categorical_value), normal_value
Exemplo n.º 25
0
 def forward(self):
     letter_id = pyprob.sample(Categorical(self._probs))
     image = self.render(self._alphabet[letter_id]).view(-1)
     likelihood = Normal(image, self._noise)
     pyprob.observe(likelihood, name='query_image')
     return letter_id