Ejemplo n.º 1
0
def test_rejector(rate, factor):
    num_samples = 100000
    rates = torch.tensor(torch.tensor(rate).expand(num_samples, 1), requires_grad=True)
    factors = torch.tensor(torch.tensor(factor).expand(num_samples, 1), requires_grad=True)

    dist1 = Exponential(rates)
    dist2 = RejectionExponential(rates, factors)  # implemented using Rejector
    x1 = dist1.rsample()
    x2 = dist2.rsample()
    assert_equal(x1.mean(), x2.mean(), prec=0.02, msg='bug in .rsample()')
    assert_equal(x1.std(), x2.std(), prec=0.02, msg='bug in .rsample()')
    assert_equal(dist1.log_prob(x1), dist2.log_prob(x1), msg='bug in .log_prob()')
Ejemplo n.º 2
0
 def model():
     lambda_latent = pyro.sample("lambda_latent",
                                 Gamma(self.alpha0, self.beta0))
     x_dist = Exponential(lambda_latent)
     pyro.observe("obs0", x_dist, self.data[0])
     pyro.observe("obs1", x_dist, self.data[1])
     return lambda_latent
Ejemplo n.º 3
0
def test_exponential_elbo(rate, factor):
    num_samples = 100000
    rates = torch.full((num_samples, 1), rate).requires_grad_()
    factors = torch.full((num_samples, 1), factor).requires_grad_()
    model = Exponential(torch.ones(num_samples, 1))
    guide1 = Exponential(rates)
    guide2 = RejectionExponential(rates, factors)  # implemented using Rejector

    grads = []
    for guide in [guide1, guide2]:
        grads.append(compute_elbo_grad(model, guide, [rates])[0])
    expected, actual = grads
    assert_equal(actual.mean(), expected.mean(), prec=0.05, msg='bad grad for rate')

    actual = compute_elbo_grad(model, guide2, [factors])[0]
    assert_equal(actual.mean().item(), 0.0, prec=0.05, msg='bad grad for factor')
Ejemplo n.º 4
0
 def survives(self, t, λ, μ, ρ):
     t_end = t - Exponential(μ).sample()
     if t_end <= 0:
         if Bernoulli(ρ).sample():
             return True
         t_end = 0
     for i in range(int(Poisson(λ * (t - t_end)).sample())):
         τ = Uniform(t_end, t).sample()
         if self.survives(τ, λ, μ, ρ):
             return True
     return False
Ejemplo n.º 5
0
 def step(self, state, branch, ρ=1.0):
     Δ = branch["t_beg"] - branch["t_end"]
     if branch['parent_id'] is None and Δ < 1e-5:
         return
     count_hs = sample(f"count_hs_{branch['id']}", Poisson(state["λ"] * Δ))
     f = vec_survives(branch["t_end"], branch["t_beg"], count_hs.numpy(), state["λ"].numpy(), state["μ"].numpy(), ρ)
     factor(f"factor_hs_{branch['id']}", f)
     sample(f"num_ex_{branch['id']}", Poisson(state["μ"] * Δ), obs=tensor(0))
     if branch["has_children"]:
         sample(f"spec_{branch['id']}", Exponential(state["λ"]), obs=tensor(1e-40))
     else:
         sample(f"obs_{branch['id']}", Bernoulli(ρ), obs=tensor(1.))
def model(dataset_total_length, x_data, y_data):
    priors = generate_nnet_priors()
    # scale = pyro.sample('sigma', Uniform(0., 10.))
    lifted_module = pyro.random_module('module', regression_model, priors)

    lifted_module_sample = lifted_module()

    # with pyro.plate('map', x_data.shape[0]): # no subsample
    # with pyro.plate('map', dataset_total_length, subsample_size=x_data.shape[0]): # dont do this.
    with pyro.plate('map', dataset_total_length, subsample=x_data):
        prediction_mean = lifted_module_sample(x_data).squeeze(-1)
        # pyro.sample('observations', LogNormal(prediction_mean, scale), obs=y_data)
        pyro.sample('observations',
                    Exponential(prediction_mean),
                    obs=y_data.squeeze(-1))
        return prediction_mean
Ejemplo n.º 7
0
 def step(self, state, branch, ρ=1.0):
     Δ = branch["t_beg"] - branch["t_end"]
     if branch['parent_id'] is None and Δ == 0:
         return
     count_hs = sample(f"count_hs_{branch['id']}", Poisson(state["λ"] * Δ))
     f = zeros(state._num_particles)
     for n in range(state._num_particles):
         for i in range(int(count_hs[n])):
             t = Uniform(branch["t_end"], branch["t_beg"]).sample()
             if self.survives(t, state["λ"][n], state["μ"][n], ρ):
                 f[n] = -float('inf')
                 break
             f[n] += log(tensor(2))
     factor(f"factor_hs_{branch['id']}", f)
     sample(f"num_ex_{branch['id']}", Poisson(state["μ"] * Δ), obs=tensor(0))
     if branch["has_children"]:
         sample(f"spec_{branch['id']}", Exponential(state["λ"]), obs=tensor(1e-40))
     else:
         sample(f"obs_{branch['id']}", Bernoulli(ρ), obs=tensor(1.))
Ejemplo n.º 8
0
def model(dataset_total_length, x_data, y_data, kl_factor):
    global n_features, n_hidden, n_out
    a1_mean = torch.zeros(n_features, n_hidden)
    a1_scale = torch.ones(n_features, n_hidden)

    a2_mean = torch.zeros(n_hidden + 1, n_out)
    a2_scale = torch.ones(n_hidden + 1, n_out)

    with pyro.plate('map', dataset_total_length, subsample=x_data):
    # with pyro.plate('map', size=x_data.shape[0]):
        # sample first hidden layer
        h1 = pyro.sample('h1', bnn.HiddenLayer(x_data, a1_mean, a1_scale,
                                                   non_linearity=nnf.leaky_relu,
                                                   KL_factor=kl_factor))
        # sample second hidden layer
        rate = pyro.sample('rate', bnn.HiddenLayer(h1, a2_mean, a2_scale,
                                                   non_linearity=lambda x: nnf.relu(x)+1e-3,
                                                   KL_factor=kl_factor,
                                                   include_hidden_bias=False))
        # likelihood
        return pyro.sample('observations', Exponential(rate), obs=y_data.squeeze(-1))
Ejemplo n.º 9
0
 def model(prefix, condition=False, generate=0, alpha=None):
     if alpha is None:
         alpha = sample("alpha", Exponential(lam=(Tensor([1]))))
     else:
         alpha = (Tensor([alpha]))
     return core(prefix, alpha, condition, generate)