Exemplo n.º 1
0
def main(args):
    # create an importance sampler (the prior is used as the proposal distribution)
    posterior = Importance(model, guide=None, num_samples=args.num_samples)
    # create a marginal object that consumes the raw execution traces provided by the importance sampler
    marginal = Marginal(posterior)
    # get posterior samples of mu (which is the return value of model)
    print("doing importance sampling...")
    posterior_samples = [
        marginal(observed_data) for i in range(args.num_samples)
    ]

    # calculate statistics over posterior samples
    posterior_mean = torch.mean(torch.cat(posterior_samples))
    posterior_std_dev = torch.std(torch.cat(posterior_samples), 0)

    # report results
    inferred_mu = posterior_mean.data[0]
    inferred_mu_uncertainty = posterior_std_dev.data[0]
    print("the coefficient of friction inferred by pyro is %.3f +- %.3f" %
          (inferred_mu, inferred_mu_uncertainty))

    # note that, given the finite step size in the simulator, the simulated descent times will
    # not precisely match the numbers from the analytic result.
    # in particular the first two numbers reported below should match each other pretty closely
    # but will be systematically off from the third number
    print("the mean observed descent time in the dataset is: %.4f seconds" %
          observed_mean)
    print(
        "the (forward) simulated descent time for the inferred (mean) mu is: %.4f seconds"
        % simulate(posterior_mean).data[0])
    print((
        "disregarding measurement noise, elementary calculus gives the descent time\n"
        + "for the inferred (mean) mu as: %.4f seconds") %
          analytic_T(posterior_mean.data[0]))
    """
Exemplo n.º 2
0
def alice(preference, depth):
    """
    Alice decides where to go by reasoning about Bob's choice
    """
    alice_prior = location(preference)
    return pyro.sample("bob_choice",
                       Marginal(Search(bob)),
                       preference,
                       depth - 1,
                       obs=alice_prior)
Exemplo n.º 3
0
def bob(preference, depth):
    """
    Bob decides where to go by reasoning about Alice's choice
    """
    bob_prior = location(preference)
    if depth > 0:
        return pyro.sample("alice_choice",
                           Marginal(Search(alice)),
                           preference,
                           depth,
                           obs=bob_prior)
    else:
        return bob_prior
Exemplo n.º 4
0
def alice_fb(preference, depth):
    """
    Alice's actual decision process:
    Alice decides where to go by reasoning about Bob's choice
    and choosing the other location.
    """
    alice_prior = location(preference)
    pyro.sample("bob_choice",
                Marginal(Search(bob)),
                preference,
                depth - 1,
                obs=alice_prior)
    return 1 - alice_prior
Exemplo n.º 5
0
def main(args):
    # We sample Bob's choice of location by marginalizing
    # over his decision process.
    bob_decision = Marginal(Search(bob))

    # Here Alice and Bob slightly prefer one location over the other a priori
    shared_preference = torch.tensor([args.preference])

    bob_depth = args.depth
    num_samples = args.num_samples

    # draw num_samples samples from Bob's decision process
    # and use those to estimate the marginal probability
    # that Bob chooses their preferred location
    bob_prob = sum([
        bob_decision(shared_preference, bob_depth) for i in range(num_samples)
    ]) / float(num_samples)

    print("Empirical frequency of Bob choosing their favored location " +
          "given preference {} and recursion depth {}: {}".format(
              shared_preference, bob_depth, bob_prob))
Exemplo n.º 6
0
Arquivo: svb.py Projeto: meobet/vne
 def posterior_latent(self, x, num_traces=100, num_samples=100):
     posterior = Importance(self.pyro_model, self.pyro_guide, num_traces)
     marginal = Marginal(posterior)
     return torch.stack([marginal(x) for _ in range(num_samples)])