예제 #1
0
 def sample(self, conv_rate, relative_copy_chrc, depth):
     relative_copy_chrc = relative_copy_chrc.repeat(self.n_sample,1)
     depth = depth.repeat(self.n_sample, 1)
     
     num_nulcear =  dist.Binomial(depth, 1 / (1 + relative_copy_chrc)  ).sample()
     num_non_conv = dist.Binomial(depth -  num_nulcear,  conv_rate).sample()
     return(num_nulcear + num_non_conv)
예제 #2
0
def reparameterized_discrete_model(args, data):
    # Sample global parameters.
    rate_s, prob_i, rho = global_model(args.population)

    # Sequentially sample time-local variables.
    S_curr = torch.tensor(args.population - 1.0)
    I_curr = torch.tensor(1.0)
    for t, datum in enumerate(data):
        # Sample reparameterizing variables.
        # When reparameterizing to a factor graph, we ignored density via
        # .mask(False). Thus distributions are used only for initialization.
        S_prev, I_prev = S_curr, I_curr
        S_curr = pyro.sample("S_{}".format(t),
                             dist.Binomial(args.population, 0.5).mask(False))
        I_curr = pyro.sample("I_{}".format(t),
                             dist.Binomial(args.population, 0.5).mask(False))

        # Now we reverse the computation.
        S2I = S_prev - S_curr
        I2R = I_prev - I_curr + S2I
        pyro.sample(
            "S2I_{}".format(t),
            dist.ExtendedBinomial(S_prev, -(rate_s * I_prev).expm1()),
            obs=S2I,
        )
        pyro.sample("I2R_{}".format(t),
                    dist.ExtendedBinomial(I_prev, prob_i),
                    obs=I2R)
        pyro.sample("obs_{}".format(t),
                    dist.ExtendedBinomial(S2I, rho),
                    obs=datum)
예제 #3
0
def model(data, params):
    # initialize data
    data = {k: torch.tensor(v).float() for k, v in data.items()}
    n1 = data["n1"]
    n2 = data["n2"]
    k1 = data["k1"]
    k2 = data["k2"]
    # init parameters
    theta =  pyro.sample("theta", dist.Beta(1., 1.))
    pyro.sample("k1", dist.Binomial(n1, theta), obs=k1)
    pyro.sample("k2", dist.Binomial(n2, theta), obs=k2)
예제 #4
0
def test_beta_binomial_dependent_sample():
    total = 10
    counts = dist.Binomial(total, 0.3).sample()
    concentration1 = torch.tensor(0.5)
    concentration0 = torch.tensor(1.5)

    prior = dist.Beta(concentration1, concentration0)
    posterior = dist.Beta(concentration1 + counts,
                          concentration0 + total - counts)

    def model(counts):
        prob = pyro.sample("prob", prior)
        pyro.sample("counts", dist.Binomial(total, prob), obs=counts)

    reparam_model = poutine.reparam(
        model,
        {
            "prob":
            ConjugateReparam(
                lambda counts: dist.Beta(1 + counts, 1 + total - counts)),
        },
    )

    with poutine.trace() as tr, pyro.plate("particles", 10000):
        reparam_model(counts)
    samples = tr.trace.nodes["prob"]["value"]

    assert_close(samples.mean(), posterior.mean, atol=0.01)
    assert_close(samples.std(), posterior.variance.sqrt(), atol=0.01)
예제 #5
0
def test_beta_binomial_hmc():
    num_samples = 1000
    total = 10
    counts = dist.Binomial(total, 0.3).sample()
    concentration1 = torch.tensor(0.5)
    concentration0 = torch.tensor(1.5)

    prior = dist.Beta(concentration1, concentration0)
    likelihood = dist.Beta(1 + counts, 1 + total - counts)
    posterior = dist.Beta(concentration1 + counts,
                          concentration0 + total - counts)

    def model():
        prob = pyro.sample("prob", prior)
        pyro.sample("counts", dist.Binomial(total, prob), obs=counts)

    reparam_model = poutine.reparam(model,
                                    {"prob": ConjugateReparam(likelihood)})

    kernel = HMC(reparam_model)
    samples = MCMC(kernel, num_samples, warmup_steps=0).run()
    pred = Predictive(reparam_model, samples, num_samples=num_samples)
    trace = pred.get_vectorized_trace()
    samples = trace.nodes["prob"]["value"]

    assert_close(samples.mean(), posterior.mean, atol=0.01)
    assert_close(samples.std(), posterior.variance.sqrt(), atol=0.01)
예제 #6
0
    def model(self, n_samples=None, anwser_mask=True):
        """p(x) for BigFiveModel

        Keyword Arguments:
            n_samples {int} -- Number of samples to generate (default: {None})
            anwser_mask {bool or list of booleans} -- Mask with shape of anwsers. 1.0 represent observed value and False represent unobserved (default: {True})

        Returns:
            (anwser, trait) -- Anwsers and traits that were generated by the model
        """
        if n_samples is None:
            n_samples = len(self._observations)

        with pyro.plate("person", n_samples):
            # Draw a trait value from Beta. We assume a Gaussian-shaped Beta with mean 0.5 as a prior

            with pyro.plate('traits', 5):
                trait = pyro.sample(
                    'trait',
                    dist.Beta(torch.tensor(self.ALPHA_PRIOR),
                              torch.tensor(self.BETA_PRIOR)))

                with pyro.plate("question", 10):
                    anwser = pyro.sample(
                        'anwser',
                        dist.Binomial(4, trait).mask(anwser_mask))

                    return anwser, trait
예제 #7
0
def model(data, params):
    # initialize data
    n = torch.tensor(data["n"]).float()
    k = torch.tensor(data["k"]).float()

    theta = pyro.sample("theta", dist.Beta(1., 1.))
    pyro.sample("k", dist.Binomial(n, theta), obs=k)
예제 #8
0
def discrete_model(args, data):
    # Sample global parameters.
    rate_s, prob_i, rho = global_model(args.population)

    # Sequentially sample time-local variables.
    S = torch.tensor(args.population - 1.0)
    I = torch.tensor(1.0)
    for t, datum in enumerate(data):
        S2I = pyro.sample("S2I_{}".format(t),
                          dist.Binomial(S, -(rate_s * I).expm1()))
        I2R = pyro.sample("I2R_{}".format(t), dist.Binomial(I, prob_i))
        S = pyro.deterministic("S_{}".format(t), S - S2I)
        I = pyro.deterministic("I_{}".format(t), I + S2I - I2R)
        pyro.sample("obs_{}".format(t),
                    dist.ExtendedBinomial(S2I, rho),
                    obs=datum)
예제 #9
0
def test_extended_binomial(tol):
    with set_approx_log_prob_tol(tol):
        total_count = torch.tensor([0.0, 1.0, 2.0, 10.0])
        probs = torch.tensor([0.5, 0.5, 0.4, 0.2]).requires_grad_()

        d1 = dist.Binomial(total_count, probs)
        d2 = dist.ExtendedBinomial(total_count, probs)
        # Check on good data.
        data = d1.sample((100, ))
        assert_equal(d1.log_prob(data), d2.log_prob(data))

        # Check on extended data.
        data = torch.arange(-10.0, 20.0).unsqueeze(-1)
        with pytest.raises(ValueError):
            d1.log_prob(data)
        log_prob = d2.log_prob(data)
        valid = d1.support.check(data)
        assert ((log_prob > -math.inf) == valid).all()
        check_grad(log_prob, probs)

        # Check on shape error.
        with pytest.raises(ValueError):
            d2.log_prob(torch.tensor([0.0, 0.0]))

        # Check on value error.
        with pytest.raises(ValueError):
            d2.log_prob(torch.tensor(0.5))

        # Check on negative total_count.
        total_count = torch.arange(-10, 0.0)
        probs = torch.tensor(0.5).requires_grad_()
        d = dist.ExtendedBinomial(total_count, probs)
        log_prob = d.log_prob(data)
        assert (log_prob == -math.inf).all()
        check_grad(log_prob, probs)
예제 #10
0
def test_beta_binomial(hyperpriors):
    def model(data):
        with pyro.plate("plate_0", data.shape[-1]):
            alpha = pyro.sample(
                "alpha", dist.HalfCauchy(1.)) if hyperpriors else torch.tensor(
                    [1., 1.])
            beta = pyro.sample(
                "beta", dist.HalfCauchy(1.)) if hyperpriors else torch.tensor(
                    [1., 1.])
            beta_binom = BetaBinomialPair()
            with pyro.plate("plate_1", data.shape[-2]):
                probs = pyro.sample("probs", beta_binom.latent(alpha, beta))
                with pyro.plate("data", data.shape[0]):
                    pyro.sample("binomial",
                                beta_binom.conditional(
                                    probs=probs, total_count=total_count),
                                obs=data)

    true_probs = torch.tensor([[0.7, 0.4], [0.6, 0.4]])
    total_count = torch.tensor([[1000, 600], [400, 800]])
    num_samples = 80
    data = dist.Binomial(
        total_count=total_count,
        probs=true_probs).sample(sample_shape=(torch.Size((10, ))))
    hmc_kernel = NUTS(collapse_conjugate(model),
                      jit_compile=True,
                      ignore_jit_warnings=True)
    mcmc = MCMC(hmc_kernel, num_samples=num_samples, warmup_steps=50)
    mcmc.run(data)
    samples = mcmc.get_samples()
    posterior = posterior_replay(model, samples, data, num_samples=num_samples)
    assert_equal(posterior["probs"].mean(0), true_probs, prec=0.05)
예제 #11
0
 def nested():
     true_probs = torch.ones(5) * 0.7
     num_trials = torch.ones(5) * 1000
     num_success = dist.Binomial(num_trials, true_probs).sample()
     conditioned_model = poutine.condition(model, data={"obs": num_success})
     nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
     mcmc_run = MCMC(nuts_kernel, num_samples=10, warmup_steps=2).run(num_trials)
     return mcmc_run
예제 #12
0
 def _model_(self):
     control_prior = pyro.sample('control_p', dist.Beta(1, 1))
     treatment_prior = pyro.sample('treatment_p', dist.Beta(1, 1))
     return pyro.sample(
         'obs',
         dist.Binomial(self.traffic_size,
                       torch.stack([control_prior, treatment_prior])),
         obs=self.outcome)
예제 #13
0
def model_current(data):
    # define the hyperparameters that control the beta prior
    alpha0 = torch.tensor(10.0)
    beta0 = torch.tensor(10.0)
    f = pyro.sample("prior_current", dist.Beta(alpha0, beta0))

    # Determine the binomial likelihood of the observed data, assuming each hypothesis is true
    for i in range(len(data)):
        pyro.sample(f'obs_prop_{i}', dist.Binomial(probs=f), obs=data[i])
예제 #14
0
def model(data, params):
    # initialize data
    data = {k: torch.tensor(v).float() for k, v in data.items()}
    n = data["n"]
    k = data["k"]
    # model block
    theta = pyro.sample("theta", dist.Beta(1., 1.))
    thetaprior = pyro.sample("thetaprior", dist.Beta(1., 1.))
    k = pyro.sample("k", dist.Binomial(n, theta), obs=k)
예제 #15
0
def test_binomial_approx_sample(total_count, prob):
    sample_shape = (10000, )
    d = dist.Binomial(total_count, prob)
    expected = d.sample(sample_shape)
    with set_approx_sample_thresh(200):
        actual = d.sample(sample_shape)

    assert_close(expected.mean(), actual.mean(), rtol=0.05)
    assert_close(expected.std(), actual.std(), rtol=0.05)
예제 #16
0
def model():
    # define the hyperparameters that control the beta prior
    alpha0 = torch.tensor(10.0)
    beta0 = torch.tensor(10.0)
    f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))

    # Determine the binomial likelihood of the observed data, assuming each hypothesis is true
    lkl = pyro.sample('obs', dist.Binomial(probs=f))
    return lkl
예제 #17
0
 def model1():
     c1 = pyro.param("c1",
                     torch.tensor(0.5),
                     constraint=constraints.positive)
     c0 = pyro.param("c0",
                     torch.tensor(1.5),
                     constraint=constraints.positive)
     with poutine.collapse():
         probs = pyro.sample("probs", dist.Beta(c1, c0))
         pyro.sample("obs", dist.Binomial(total_count, probs), obs=data)
예제 #18
0
def test_binomial_approx_log_prob(tol):
    logits = torch.linspace(-10.0, 10.0, 100)
    k = torch.arange(100.0).unsqueeze(-1)
    n_minus_k = torch.arange(100.0).unsqueeze(-1).unsqueeze(-1)
    n = k + n_minus_k

    expected = torch.distributions.Binomial(n, logits=logits).log_prob(k)
    with set_approx_log_prob_tol(tol):
        actual = dist.Binomial(n, logits=logits).log_prob(k)

    assert_close(actual, expected, atol=tol)
예제 #19
0
def test_posterior_predictive_svi_auto_delta_guide(parallel):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    guide = AutoDelta(conditioned_model)
    svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=1.0)), Trace_ELBO())
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(model, guide=guide, num_samples=10000, parallel=parallel)
    marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
예제 #20
0
def model(data):
    param = pyro.sample("var1", dist.Uniform(20, 50))
    Recruiters = pyro.sample("var2", dist.Poisson(param))
    percentile = pyro.sample("var3", dist.Uniform(0, 1))
    if (percentile > 0.95):
        GPA = 4

    else:
        GPA = pyro.sample("var4", dist.Normal(2.75, 0.5))

    if (GPA == 4):
        Interviews = dist.Binomial(Recruiters, 0.9).sample()

    if (GPA < 4):
        Interviews = dist.Binomial(Recruiters, 0.6).sample()

    for n in range(1, 2):
        with pyro.iarange("data"):
            pyro.sample("obs",
                        dist.Binomial(Interviews, 0.4),
                        obs=data['offers'][n])
예제 #21
0
def test_beta_binomial(sample_shape, batch_shape):
    concentration1 = torch.randn(batch_shape).exp()
    concentration0 = torch.randn(batch_shape).exp()
    total = 10
    obs = dist.Binomial(total, 0.2).sample(sample_shape + batch_shape)

    f = dist.Beta(concentration1, concentration0)
    g = dist.Beta(1 + obs, 1 + total - obs)
    fg, log_normalizer = f.conjugate_update(g)

    x = fg.sample(sample_shape)
    assert_close(f.log_prob(x) + g.log_prob(x), fg.log_prob(x) + log_normalizer)
예제 #22
0
def model(data):
    # define the hyperparameters that control the beta prior
    alpha0 = torch.tensor(10.0)
    beta0 = torch.tensor(10.0)
    # register a distribution named ”latent_fairness” as a learnable value for Pyro.
    f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))

    # Condition the model on the observed data
    # Determine the binomial likelihood of the observed data, assuming each hypothesis is true
    # Register every observation as a learnable value
    for i in range(len(data)):
        sensor = pyro.sample(f'obs_{i}', dist.Binomial(probs=f), obs=data[i])
예제 #23
0
def test_posterior_predictive():
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=1000,
                    warmup_steps=200).run(num_trials)
    posterior_predictive = TracePredictive(model, mcmc_run,
                                           num_samples=10000).run(num_trials)
    marginal_return_vals = EmpiricalMarginal(posterior_predictive)
    assert_equal(marginal_return_vals.mean, torch.ones(5) * 700, prec=30)
예제 #24
0
def test_beta_binomial_log_prob(total_count, shape):
    concentration0 = torch.randn(shape).exp()
    concentration1 = torch.randn(shape).exp()
    value = torch.arange(1. + total_count)

    num_samples = 100000
    probs = dist.Beta(concentration1, concentration0).sample((num_samples, ))
    log_probs = dist.Binomial(total_count, probs).log_prob(value)
    expected = log_probs.logsumexp(0) - math.log(num_samples)

    actual = BetaBinomial(concentration1, concentration0,
                          total_count).log_prob(value)
    assert_close(actual, expected, rtol=0.02)
예제 #25
0
def test_init():
    total = 10
    counts = dist.Binomial(total, 0.3).sample()
    concentration1 = torch.tensor(0.5)
    concentration0 = torch.tensor(1.5)

    prior = dist.Beta(concentration1, concentration0)
    likelihood = dist.Beta(1 + counts, 1 + total - counts)

    def model():
        x = pyro.sample("x", prior)
        pyro.sample("counts", dist.Binomial(total, x), obs=counts)
        return x

    check_init_reparam(model, ConjugateReparam(likelihood))
예제 #26
0
def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    guide = AutoDiagonalNormal(conditioned_model)
    svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO())
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(model, guide=guide, num_samples=10000, parallel=True)
    if return_trace:
        marginal_return_vals = posterior_predictive.get_vectorized_trace(num_trials).nodes["obs"]["value"]
    else:
        marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
예제 #27
0
def model(data, params):
    # initialize data
    N = data["N"]
    n = data["n"]
    r = data["r"]
    x = data["x"]
    # initialize transformed data
    centered_x = data["centered_x"]
    mean_x = data["mean_x"]

    # initialize transformed parameters
    alpha_star = pyro.sample("alpha_star", dist.Normal(0., 1.0))
    beta = pyro.sample("beta", dist.Normal(0., 10000.0))
    with pyro.plate('data', N):
        p = dist.Normal(0., 1.).cdf(alpha_star + beta * centered_x)
        r = pyro.sample("r", dist.Binomial(n, p), obs=r)
 def guide(self,
           lengths=None,
           sequences=None,
           expected_string_length: int = 5):
     pyro.module('gru', self.gru)
     pyro.module('neural_emitter', self.neural_emitter)
     binom_prob_v = pyro.param('binom_prob_v',
                               torch.tensor(expected_string_length /
                                            self.smct.max_chain_length),
                               constraint=constraints.unit_interval)
     binom_prob = pyro.sample('binom_prob', dist.Delta(binom_prob_v))
     if lengths is None:
         lengths = pyro.sample(
             'lengths', dist.Binomial(self.smct.max_chain_length,
                                      binom_prob)).unsqueeze(-1)
     sequence_size = 1 if sequences is None else sequences.size(0)
     initial_pseudocounts = pyro.param('initial_pseudocounts',
                                       torch.ones(self.smct.alphabet_size,
                                                  dtype=torch.float,
                                                  device=lengths.device),
                                       constraint=constraints.interval(
                                           1, 100))
     with pyro.plate('sequences', size=sequence_size, dim=-2) as batch:
         for t in pyro.markov(range(self.smct.max_chain_length),
                              history=self.smct.order):
             if t == 0:
                 probs_t = pyro.sample(
                     f'probs_{t}',
                     dist.Dirichlet(
                         initial_pseudocounts.unsqueeze(-2)).to_event())
                 h_t = torch.randn(self.gru.num_layers,
                                   sequence_size,
                                   self.gru.hidden_size,
                                   dtype=torch.float,
                                   device=lengths.device)
             else:
                 if sequences is not None:
                     x_t = nnf.one_hot(
                         sequences[batch, t - 1:t],
                         num_classes=self.smct.alphabet_size).float()
                 else:
                     x_t = dist.OneHotCategorical(probs_t).sample()
                 gru_out_t, h_t = self.gru.forward(x_t, h_t)
                 pseudo_counts_t = self.neural_emitter.forward(gru_out_t)
                 probs_t = pyro.sample(f'probs_{t}',
                                       dist.Dirichlet(pseudo_counts_t))
예제 #29
0
def model(data, params):
    # XXX: this model currenty NaNs
    # initialize data
    G = data["G"]
    N = data["N"]
    r = data["r"]
    n = data["n"]

    # model block
    with pyro.plate('a_', G, dim=-2):
        mu = pyro.sample('mu', dist.Uniform(0., 1.))
        a_plus_b = pyro.sample('a_plus_b', dist.Pareto(0.1, 1.5))
        a = mu * a_plus_b
        b = (1 - mu) * a_plus_b
        with pyro.plate('data', N, dim=-1):
            p = pyro.sample('p', dist.Beta(a, b))
            r = pyro.sample('r', dist.Binomial(n, p), obs=r)
예제 #30
0
def test_posterior_predictive_svi_manual_guide(parallel):
    true_probs = torch.ones(5) * 0.7
    num_trials = torch.ones(5) * 1000
    num_success = dist.Binomial(num_trials, true_probs).sample()
    conditioned_model = poutine.condition(model, data={"obs": num_success})
    elbo = Trace_ELBO(num_particles=100, vectorize_particles=True)
    svi = SVI(conditioned_model, beta_guide, optim.Adam(dict(lr=1.0)), elbo)
    for i in range(1000):
        svi.step(num_trials)
    posterior_predictive = Predictive(
        model,
        guide=beta_guide,
        num_samples=10000,
        parallel=parallel,
        return_sites=["_RETURN"],
    )
    marginal_return_vals = posterior_predictive(num_trials)["_RETURN"]
    assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)