Beispiel #1
0
def alice(preference, depth):
    """
    Alice decides where to go by reasoning about Bob's choice
    """
    alice_prior = location(preference)
    with poutine.block():
        bob_marginal = HashingMarginal(Search(bob).run(preference, depth - 1))
    return pyro.sample("bob_choice", bob_marginal, obs=alice_prior)
Beispiel #2
0
def alice_fb(preference, depth):
    """
    Alice's actual decision process:
    Alice decides where to go by reasoning about Bob's choice
    and choosing the other location.
    """
    alice_prior = location(preference)
    with poutine.block():
        bob_marginal = HashingMarginal(Search(bob).run(preference, depth - 1))
    pyro.sample("bob_choice", bob_marginal, obs=alice_prior)
    return 1 - alice_prior
Beispiel #3
0
def bob(preference, depth):
    """
    Bob decides where to go by reasoning about Alice's choice
    """
    bob_prior = location(preference)
    if depth > 0:
        with poutine.block():
            alice_marginal = HashingMarginal(Search(alice).run(preference, depth))
        return pyro.sample("alice_choice", alice_marginal, obs=bob_prior)
    else:
        return bob_prior
Beispiel #4
0
def main(args):

    # Here Alice and Bob slightly prefer one location over the other a priori
    shared_preference = torch.tensor([args.preference])

    alice_depth = args.depth
    num_samples = args.num_samples

    # We sample Alice's true choice of location
    # by marginalizing over her decision process
    alice_decision = HashingMarginal(Search(alice_fb).run(shared_preference, alice_depth))

    # draw num_samples samples from Alice's decision process
    # and use those to estimate the marginal probability
    # that Alice chooses their preferred location
    alice_prob = sum([alice_decision()
                      for i in range(num_samples)]) / float(num_samples)

    print("Empirical frequency of Alice choosing their favored location " +
          "given preference {} and recursion depth {}: {}"
          .format(shared_preference, alice_depth, alice_prob))
Beispiel #5
0
def main(args):
    # Here Alice and Bob slightly prefer one location over the other a priori
    shared_preference = torch.tensor([args.preference])

    bob_depth = args.depth
    num_samples = args.num_samples

    # We sample Bob's choice of location by marginalizing
    # over his decision process.
    bob_decision = HashingMarginal(Search(bob).run(shared_preference, bob_depth))
    bob_prob = bob_decision._dist_and_values()[0].probs
    print("bob prob", bob_prob)

    # draw num_samples samples from Bob's decision process
    # and use those to estimate the marginal probability
    # that Bob chooses their preferred location
    bob_prob = sum([bob_decision()
                    for i in range(num_samples)]) / float(num_samples)

    print("Empirical frequency of Bob choosing their favored location " +
          "given preference {} and recursion depth {}: {}"
          .format(shared_preference, bob_depth, bob_prob))
Beispiel #6
0
def Marginal(fn):
    return memoize(lambda *args: HashingMarginal(Search(fn).run(*args)))
Beispiel #7
0
 def shawarma(*args, **kwargs):
     return HashingMarginal(
         Search(fn, max_tries=int(1e6)).run(*args, **kwargs))