Example #1
0
def testAuxWithoutLatent():
#    raise(SkipTest('for quick results.'))
    "Checks the posterior distribution on a Gaussian given an unlikely observation"
    ripl = get_ripl()
    ripl.assume("a", "(scope_include (quote A) 0 (normal 10.0 1.0))", label="pid")
    ripl.observe("(scope_include (quote B) 0 (normal a 1.0))", 20.0)
    
    # fake mu1=9, so that assumed_gibbs is doomed to fail the statistical test
    MyNormalBiasedPosterior = partial(NormalExactPosterior, mu1=9, sigma1=1.0, sigma2=1.0)
    ripl.register_proposal_program_class("MyNormalBiasedPosterior", MyNormalBiasedPosterior)
    
    # Posterior for a is normal with mean 15, precision 2
    #  ripl.predict("(normal a 1.0)")
    proposal_src = """
        [declare {
        "name":"normalbiased",
        "class":"MyNormalBiasedPosterior",
        "conditioned":[["B",0]],
        "target":[["A",0]],
        "ready":"yes",
        "num_samples":0}]
        """
    ripl.execute_program(proposal_src)
    predictions = collectSamples(ripl,"pid",infer="(custommh normalbiased aux 1 20)")
    cdf = stats.norm(loc=15, scale=math.sqrt(0.5)).cdf
    return reportKnownContinuous(cdf, predictions, "N(15,sqrt(0.5))")
Example #2
0
def testCustomTrainer():
#    raise(SkipTest('for quick results.'))
    "Train a proposal to inverse a QMR model with rare diseases, using custom trainer"
    ripl = get_ripl()
    ripl.assume("d0", "(scope_include (quote D) 0 (bernoulli 0.01))", label="D0")
    ripl.assume("d1", "(scope_include (quote D) 1 (bernoulli 0.01))", label="D1")
    ripl.assume("d2", "(scope_include (quote D) 2 (bernoulli 0.005))", label="D2")
    ripl.assume("joint", "(+ (* d0 4) (* d1 2) d2)", label="pid")
    ripl.observe("(scope_include (quote S) 0 (bernoulli (- 1.0001 (pow 0.5 (+ d0 d1)))))", 1.0)
    ripl.observe("(scope_include (quote S) 1 (bernoulli (- 1.0001 (pow 0.5 (+ d0 d2)))))", 1.0)
    ripl.observe("(scope_include (quote S) 2 (bernoulli (- 1.0001 (pow 0.5 (+ d1 d2)))))", 1.0)
    
    ripl.register_proposal_program_class("LogisticRegressionProposalProgram", LogisticRegressionProposalProgram)
    ripl.register_trainer_src("QMR_highprior", QMR_highprior)
    
    # Posterior for d2 is Bernoulli with p(d2=1) = 0.5
    proposal_src = """
        [declare {
        "name":"logreg",
        "class":"LogisticRegressionProposalProgram",
        "conditioned":[["S",0], ["S",1], ["S",2]],
        "target":[["D",0], ["D",1], ["D",2]],
        "trainer":"QMR_highprior",
        "num_samples":1000}]
        """
    ripl.execute_program(proposal_src)
    predictions = collectSamples(ripl,"pid",infer="(custommh logreg aux 1 5)")
    ans = [(0, 0.000001), (1, 0.003), (2, 0.006), (3, 0.25), (4, 0.006), (5, 0.25), (6, 0.5), (7, 0.004)]
    return reportKnownDiscrete(ans, predictions)
Example #3
0
def testAssumedGibbsWithLatent():
#    raise(SkipTest('for quick results.'))
    "Checks an exact proposal with latent randomness"
    ripl = get_ripl()
    ripl.assume("x", "(scope_include (quote X) 0 (normal 0 1.0))", label="pid")
    ripl.observe("(scope_include (quote Y) 0 (normal (sqrt (* x x)) 1.0))", 10.0)
    
    MyBimodalExactPosterior = partial(BimodalExactPosterior, mu1=0, sigma1=1.0, sigma2=1.0)
    ripl.register_proposal_program_class("MyBimodalExactPosterior", MyBimodalExactPosterior)
    
    # Posterior for a is bi-modal with two modes at -5, 5, both with precision 2
    #  ripl.predict("(normal a 1.0)")
    proposal_src = """
        [declare {
        "name":"bimodalexact",
        "class":"MyBimodalExactPosterior",
        "conditioned":[["Y",0]],
        "target":[["X",0]],
        "ready":"yes",
        "num_samples":0}]
        """
    ripl.execute_program(proposal_src)
    predictions = collectSamples(ripl,"pid",infer="(custommh bimodalexact assumed_gibbs 1 1)")
    cdf = (lambda x:(stats.norm(loc=-5, scale=math.sqrt(0.5)).cdf(x)+stats.norm(loc=5, scale=math.sqrt(0.5)).cdf(x))*0.5)
    return reportKnownContinuous(cdf, predictions, "0.5*N(-5,sqrt(0.5))+0.5*N(5,sqrt(0.5))")
Example #4
0
def testAnnotateInferenceErrorInDefinedDo():
    ripl = get_ripl(persistent_inference_trace=True)
    ripl.define(
        "act", """\
(do (assume x (normal 0 1))
    (y <- (sample x))
    (observe x (+ 1 badness)))""")
    err.assert_error_message_contains(
        """\
(run act)
^^^^^^^^^
(do (assume x (normal 0 1)) (y <- (sample x)) (observe x (add 1 badness)))
                                                                ^^^^^^^
""", ripl.infer, "act")
def testNormalWithObserve1(seed):
    # Checks the posterior distribution on a Gaussian given an unlikely
    # observation
    ripl = get_ripl(seed=seed)
    ripl.assume("a", "(normal 10.0 1.0)", label="pid")
    ripl.observe("(normal a 1.0)", 14.0)
    # Posterior for a is normal with mean 12, precision 2

    (samples, weights) = collectLikelihoodWeighted(ripl, "pid")
    for (s, w) in zip(samples, weights):
        # The weights I have should be deterministically given by the likelihood
        assert_almost_equal(math.exp(w), stats.norm(loc=14, scale=1).pdf(s))
    # The test points should be drawn from the prior
    return reportKnownGaussian(10, 1, samples)
Example #6
0
def testAnnotateInModelError():
    # Tests Github Issue #538.
    ripl = get_ripl()
    ripl.set_mode("venture_script")
    err.assert_error_message_contains(
        """\
*** evaluation: Nested ripl operation signalled an error
(autorun (in_model (run (new_model)) (action (run (sample (add foo 1))))))
                                                  ^^^^^^^^^^^^^^^^^^^^
Caused by
*** evaluation: Cannot find symbol 'foo'
(add foo 1.0)
     ^^^
""", ripl.evaluate, "in_model(run(new_model()), action(run(sample(foo + 1))))")
Example #7
0
def testAnnotateInferenceErrorInDo():
    # TODO I need the inference trace to be persistent to trigger the
    # inference prelude did skipping hack :(
    ripl = get_ripl(persistent_inference_trace=True)
    expression = """\
(do (assume x (normal 0 1))
    (observe x (+ 1 badness)))"""
    err.assert_error_message_contains(
        """\
(run (do (assume x (normal 0 1)) (observe x (add 1 badness))))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(run (do (assume x (normal 0 1)) (observe x (add 1 badness))))
                                                   ^^^^^^^
""", ripl.infer, expression)
Example #8
0
def testHMMResampleSmoke():
  ripl = get_ripl()
  ripl.assume("f","""
(make_lazy_hmm
 (simplex 0.5 0.5)
 (matrix (array (array 0.7 0.3)
               (array 0.3 0.7)))
 (matrix (array (array 0.9 0.2)
               (array 0.1 0.8))))
""")
  ripl.observe("(f 1)","integer<0>")
  ripl.predict("(f 7)")
  ripl.infer("(resample 3)")
  ripl.infer("(mh default one 10)")
Example #9
0
    def check(mode):
        with tempfile.NamedTemporaryFile(prefix='serialized.ripl') as f:
            v1 = get_ripl()
            v1.assume('is_tricky', '(flip 0.2)')
            v1.assume('theta', '(if is_tricky (beta 1.0 1.0) 0.5)')
            v1.assume('flip_coin', '(lambda () (flip theta))')
            v1.observe('(flip_coin)', 'true')

            v1.infer(1)
            result1 = v1.predict('theta', label='theta_prediction')

            call(v1, 'save', f.name, mode)

            v2 = get_ripl()
            call(v2, 'load', f.name, mode)
            result2 = v2.report('theta_prediction')
            result3 = v2.predict('theta')

            assert result1 == result2 and result1 == result3

            text1 = v1.get_text(1)
            text2 = v2.get_text(1)
            assert text1 == text2
Example #10
0
def testBinomial1(seed):
    # A simple test that checks the interface of binomial and its
    # simulate method
    ripl = get_ripl(seed=seed)

    p = 0.3
    n = 4
    ripl.assume("p", "(if (flip) %f %f)" % (p, p))
    ripl.predict("(binomial %d p)" % n, label="pid")

    predictions = collectSamples(ripl, "pid")
    ans = [(x, scipy.stats.binom.pmf(x, n, p)) for x in range(n + 1)]
    assert_almost_equal(sum([xx[1] for xx in ans]), 1)
    return reportKnownDiscrete(ans, predictions)
def testResampling1(seed):
    P = 10
    ripl = get_ripl(seed=seed)

    def a_sample():
        ripl.clear()
        ripl.infer("(resample %d)" % P)
        ripl.assume("x", "(normal 0 1)")
        ripl.observe("(normal x 1)", 2)
        ripl.infer("(resample 1)")
        return ripl.sample("x")

    predictions = [a_sample() for _ in range(default_num_samples())]
    return reportKnownGaussian(1, math.sqrt(0.5), predictions)
Example #12
0
 def check_beta_bernoulli(maker, action, seed):
     if maker == "make_uc_beta_bernoulli" and action in [
             'serialize', 'convert_lite', 'convert_puma'
     ]:
         raise SkipTest(
             "Cannot convert BetaBernoulliSP to a stack dictionary. Issue: https://app.asana.com/0/9277420529946/16149214487233"
         )
     v = get_ripl(seed=seed)
     v.assume('a', '(normal 10.0 1.0)')
     v.assume('f', '({0} a a)'.format(maker))
     v.predict('(f)', label='pid')
     for _ in range(20):
         v.observe('(f)', 'true')
     return _test_serialize_program(v, 'pid', action)
def checkEnumerativeGibbsXOR2(in_parallel, seed):
    # Tests that an XOR chain mixes with enumerative gibbs.
    ripl = get_ripl(seed=seed)

    ripl.assume("x", "(tag 0 0 (bernoulli 0.0015))", label="pid")
    ripl.assume("y", "(tag 0 0 (bernoulli 0.0005))")
    ripl.assume("noisy_true",
                "(lambda (pred noise) (flip (if pred 1.0 noise)))")
    ripl.observe("(noisy_true (= (+ x y) 1) .000001)", "true")
    infer = "(gibbs 0 0 %s %s)" % \
        (default_num_transitions_per_sample(), in_parallel)
    predictions = collectSamples(ripl, "pid", infer=infer)
    ans = [(True, .75), (False, .25)]
    return reportKnownDiscrete(ans, predictions)
Example #14
0
def testMVNormalRandomWalkSoundness(seed):
    # This exercises the subtlety involving block proposals and delta
    # kernels described in the "joint-delta-kernels" footnote in
    # doc/on-latents.md.
    r = get_ripl(seed=seed)
    r.assume("mean", "(multivariate_normal (array 0) (id_matrix 1))")
    r.assume("y", "(multivariate_normal mean (id_matrix 1))")
    predictions = [
        c[0] for c in collectSamples(r,
                                     "y",
                                     infer="(resimulation_mh default all 50)",
                                     num_samples=default_num_samples(10))
    ]
    return reportKnownGaussian(0, math.sqrt(2), predictions)
def testCycleKernel(seed):
    # Same example as testBlockingExample0, but a cycle kernel that
    # covers everything should solve it
    ripl = get_ripl(seed=seed)

    ripl.assume("a", "(tag 0 0 (normal 10.0 1.0))", label="pid")
    ripl.assume("b", "(tag 1 1 (normal a 1.0))")
    ripl.observe("(normal b 1.0)", 14.0)

    infer = "(repeat %s (do (mh 0 0 1) (mh 1 1 1)))" % \
            default_num_transitions_per_sample()

    predictions = collectSamples(ripl, "pid", infer=infer)
    return reportKnownGaussian(34.0 / 3.0, math.sqrt(2.0 / 3.0), predictions)
Example #16
0
def checkBrushScope(operator):
    # Check that putting scope control in the brush doesn't cause
    # particle Gibbs to crash.
    ripl = get_ripl()
    ripl.assume("x1", "(tag (quote state) 0 (normal 1 1))")
    ripl.assume(
        "t", "1")  # This variable matters to get the block id into the brush.
    ripl.assume(
        "x2", """
(if (> x1 1)
    (tag (quote state) t (normal 2 1))
    (tag (quote state) t (normal 0 1)))
""")
    ripl.infer("(%s 'state ordered 4 3)" % operator)
Example #17
0
def testAnnotateErrorTriggeredByInferenceOverProgrammaticAssume():
    # Do not use the do macro yet
    ripl = get_ripl()
    ripl.infer("(assume control (flip))")
    ripl.infer("(force control true)")
    ripl.infer("(predict (if control 1 badness))")
    # TODO Solve the double macroexpansion problem
    err.assert_error_message_contains(
        """\
((biplex control (make_csp (quote ()) (quote 1.0)) (make_csp (quote ()) (quote badness))))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
((biplex control (make_csp (quote ()) (quote 1.0)) (make_csp (quote ()) (quote badness))))
                                                                               ^^^^^^^
""", ripl.infer, "(resimulation_mh default one 50)")
def testOccasionalRejectionBrush(seed):
    # Another version, this time with explicit brush creating the mix mh
    # correction.

    # Note that in this case, the correction is sound: The number of
    # random choices available in the default one scope really is
    # changing, whereas in `testOccasionalRejection` it is not.

    # To see why, consider what transition the operator (gibbs default
    # one 1) induces on this model (assuming the current behavior of
    # always claiming the proposal weight is 0).
    # - False, False is not possible
    # - From False, True:
    #   - With probability 50%, enumerate the second coin, and propose
    #     to keep it at True.
    #   - Else, enumerate the first coin, find that both states are
    #     equally good, and
    #     - With probability 50%, propose to leave it
    #     - Else, propose to change it to True, which is accepted (with
    #       or without the correction)
    #   - Ergo, move to the True state 25% of the time.
    # - From True, enumerate the first coin
    #   - With probability 50%, the second coin comes up False in the brush;
    #     propose to stay in the True state.
    #   - Else, both states equally good
    #     - With probability 50%, propose to stay in the True state
    #     - Else, propose to move to the False, True state
    #       - If the correction is applied, this proposal will be
    #         rejected with probability 50%.
    #   - Ergo, move to the False, True state 25% (no correction) or
    #     12.5% (correction) of the time.
    # - The former will induce a 50/50 stationary distribution on the
    #   value of flip1, whereas the right answer is 2:1 odds in favor of
    #   True.
    r = get_ripl(seed=seed)
    r.execute_program("""
(assume flip1 (flip))
(assume flip1_or_flip2
  (if flip1 true (flip)))
(observe (exactly flip1_or_flip2) true)
;; Reject with a non-negligible probability per transition, which would
;; cause a crash if Gibbs couldn't handle rejection
(gibbs default one 50 false)
""")
    infer = "(gibbs default one %s false)" % default_num_transitions_per_sample(
    )
    predictions = collectSamples(r, address="flip1", infer=infer)
    ans = [(True, 2.0 / 3), (False, 1.0 / 3)]
    return reportKnownDiscrete(ans, predictions)
Example #19
0
def testCategorical1(seed):
    # A simple test that checks the interface of categorical and its
    # simulate method
    ripl = get_ripl(seed=seed)

    ripl.assume("x", "(categorical (simplex 0.1 0.2 0.3 0.4) (array 1 2 3 4))")
    ripl.assume("y", "(categorical (simplex 0.2 0.6 0.2) (array 1 2 3))")
    ripl.predict("(+ x y)", label="pid")

    predictions = collectSamples(ripl, "pid")
    ans = [(2, 0.1 * 0.2), (3, 0.1 * 0.6 + 0.2 * 0.2),
           (4, 0.1 * 0.2 + 0.2 * 0.6 + 0.3 * 0.2),
           (5, 0.2 * 0.2 + 0.3 * 0.6 + 0.4 * 0.2), (6, 0.3 * 0.2 + 0.4 * 0.6),
           (7, 0.4 * 0.2)]
    return reportKnownDiscrete(ans, predictions)
Example #20
0
def testCMVN2D_mu1(seed):
    if backend_name() != "lite": raise SkipTest("CMVN in lite only")
    ripl = get_ripl(seed=seed)
    ripl.assume("m0", "(array 5.0 5.0)")
    ripl.assume("k0", "7.0")
    ripl.assume("v0", "11.0")
    ripl.assume("S0", "(matrix (array (array 13.0 0.0) (array 0.0 13.0)))")
    ripl.assume("f", "(make_niw_normal m0 k0 v0 S0)")

    ripl.predict("(f)", label="pid")

    predictions = collectSamples(ripl, "pid")

    mu1 = [p[0] for p in predictions]
    return reportKnownMean(5, mu1)
Example #21
0
def test_profiling_likelihoodfree():
    # Make sure profiling doesn't break with likelihood-free SP's
    class TestPSP(LikelihoodFreePSP):
        def simulate(self, args):
            x = args.operandValues()[0]
            return x + stats.distributions.norm.rvs()

    tester = typed_nr(TestPSP(), [t.NumberType()], t.NumberType())
    ripl = get_ripl()
    ripl.bind_foreign_sp('test', tester)
    prog = '''
  [ASSUME x (test 0)]
  [INFER (mh default one 10)]'''
    ripl.profiler_enable()
    ripl.execute_program(prog)
Example #22
0
def testGPLogscore1():
    # Is this actually a valid test? The real solution to this problem
    # (and to the corresponding bug with unincorporate) is to wrap the
    # gp in a mem. This could be done automatically I suppose, or better
    # through a library function.

    raise SkipTest(
        "GP logDensity is broken for multiple samples of the same input.")

    ripl = get_ripl()
    prep_ripl(ripl)

    ripl.assume('gp', '(exactly (make_gp zero sq_exp))')
    ripl.predict('(gp (array 0 0))')
    ripl.get_global_logscore()
Example #23
0
def test_gradients(seed):
    ripl = get_ripl(seed=seed)
    ripl.assume('mu_0', '(normal 0 1)')
    ripl.assume('mean', '(gp_mean_const mu_0)')
    ripl.assume('gs_expon_1',
                '(lambda () (- 0. (log_logistic (log_odds_uniform))))')
    ripl.assume('s2', '(gs_expon_1)')
    ripl.assume('alpha', '(gs_expon_1)')
    ripl.assume('cov', '(gp_cov_scale s2 (gp_cov_se alpha))')
    ripl.assume('gp', '(make_gp mean cov)')
    ripl.observe('(gp 0)', '1')
    ripl.observe('(gp 1)', '2')
    ripl.observe('(gp 2)', '4')
    ripl.observe('(gp 3)', '8')
    ripl.infer('(gradient_ascent default one 0.01 5 5)')
Example #24
0
def testWishartPrior1(seed):
  # Confirm that the diagonal elements of a Wishart are a chi-squared
  # distribution.

  if inParallel() and backend_name() == "puma":
    raise SkipTest("The Lite SPs in Puma interface is not thread-safe, and wishart comes from Lite.")

  ripl = get_ripl(seed=seed)
  ripl.assume("s", "(matrix (array (array 2 -1) (array -1 3)))")
  ripl.assume("m", "(wishart s 5)")
  ripl.predict("(lookup m (pair 0 0))", label="prediction")

  predictions = collectSamples(ripl, "prediction")
  cdf = scipy.stats.chi2(df=5, scale=2).cdf
  return reportKnownContinuous(cdf, predictions)
Example #25
0
def testInvWishartPrior2(seed):
  # Confirm that the diagonal elements of an inverse Wishart are an
  # inverse Gamma distribution.

  if inParallel() and backend_name() == "puma":
    raise SkipTest("The Lite SPs in Puma interface is not thread-safe, and wishart comes from Lite.")

  ripl = get_ripl(seed=seed)
  ripl.assume("s", "(matrix (array (array 2 -1) (array -1 3)))")
  ripl.assume("m", "(inv_wishart s 4.2)")
  ripl.predict("(lookup m (pair 1 1))", label="prediction")

  predictions = collectSamples(ripl, "prediction")
  cdf = scipy.stats.invgamma(a=1.6, scale=1.5).cdf
  return reportKnownContinuous(cdf, predictions)
def testReferences1(seed):
  # Checks that the program runs without crashing. At some point, this
  # program caused the old CXX backend to fire an assert.  When the
  # (flip) had a 0.0 or 1.0 it didn't fail.
  ripl = get_ripl(seed=seed)
  ripl.assume("draw_type0", "(make_crp 1.0)")
  ripl.assume("draw_type1", "(if (flip) draw_type0 (lambda () atom<1>))")
  ripl.assume("draw_type2", "(make_dir_cat (array 1.0 1.0))")
  ripl.assume("class", "(if (flip) (lambda (name) (draw_type1)) (lambda (name) (draw_type2)))")
  ripl.predict("(class 1)")
  ripl.predict("(flip)", label="pid")

  predictions = collectSamples(ripl,"pid")
  ans = [(True,0.5), (False,0.5)]
  return reportKnownDiscrete(ans, predictions)
Example #27
0
def checkSliceNormalWithObserve2a(slice_method, seed):
    # Checks the posterior distribution on a Gaussian given an unlikely
    # observation.  The difference between this and 1 is an extra
    # predict, which apparently has a deleterious effect on mixing.
    if (backend_name() != "lite") and (slice_method == 'slice_doubling'):
        raise SkipTest(
            "Slice sampling with doubling only implemented in Lite.")
    ripl = get_ripl(seed=seed)
    ripl.assume("a", "(normal 10.0 1.0)", label="pid")
    ripl.observe("(normal a 1.0)", 14.0)
    # Posterior for a is normal with mean 12, precision 2
    ripl.predict("(normal a 1.0)")

    predictions = myCollectSamples(ripl, slice_method)
    return reportKnownGaussian(12, math.sqrt(0.5), predictions)
Example #28
0
def testCollectLogScore():
  # In the presence of likelihood-free SP's, the calling "collect" or
  # "printf" should not crash the program.
  class TestPSP(LikelihoodFreePSP):
    def simulate(self, args):
      x = args.operandValues()[0]
      return x + stats.distributions.norm.rvs()
  tester = typed_nr(TestPSP(), [t.NumberType()], t.NumberType())
  ripl = get_ripl()
  ripl.bind_foreign_sp('test', tester)
  prog = '''
  [ASSUME x (test 0)]
  [ASSUME y (normal x 1)]
  [infer (collect x)]'''
  ripl.execute_program(prog)
def checkMakeSymDirCatAppControlsFlip(maker_1, maker_2, seed):
    # Two AAA SPs with same parameters, where their applications control
    # which are applied
    ripl = get_ripl(seed=seed)

    ripl.assume("a", "(normal 10.0 1.0)")
    ripl.assume("f", "(%s a 4)" % maker_1)
    ripl.assume("g", "(%s a 4)" % maker_2)
    ripl.predict("(f)", label="pid")
    ripl.predict("(g)")
    for _ in range(5):
        ripl.observe("(g)", "integer<1>")
    ripl.predict("(if (eq (f) integer<1>) (g) (g))")
    ripl.predict("(if (eq (g) integer<1>) (f) (f))")
    return checkDirichletCategoricalAAA(ripl, "pid", infer="mixes_slowly")
Example #30
0
def testPrintf2():
    # Intercept stdout and make sure the message read what we expect
    ripl = get_ripl()
    pattern = make_pattern()
    ripl.infer('(resample 2)')
    ripl.assume('x', 2.1)
    old_stdout = sys.stdout
    result = StringIO()
    sys.stdout = result
    ripl.infer(
        '(repeat 2 (do (resimulation_mh default one 1) (printf (run (collect x (labelled 3.1 foo))))))'
    )
    sys.stdout = old_stdout
    res = result.getvalue()
    assert pattern.match(res) is not None
Example #31
0
def testBernoulliIfNormal2(seed):
    # A simple program with bernoulli, if, and an absorbing application of normal
    if not collect_iid_samples():
        raise SkipTest("This test should not pass without reset.")

    ripl = get_ripl(seed=seed)
    ripl.assume("b", "(bernoulli 0.3)")
    ripl.predict("(normal (if b 0.0 10.0) 1.0)", label="pid")
    predictions = collectSamples(ripl, "pid")

    def cdf(x):
        return 0.3 * stats.norm.cdf(x, loc=0, scale=1) + \
          0.7 * stats.norm.cdf(x, loc=10, scale=1)

    return reportKnownContinuous(cdf, predictions, "0.3*N(0,1) + 0.7*N(10,1)")
Example #32
0
def testBinomial2(seed):
  # A simple test that checks the binomial logdensity
  ripl = get_ripl(seed=seed)

  b = 0.7
  p1 = 0.3
  p2 = 0.4
  n = 4
  ripl.assume("p","(if (flip %f) %f %f)" % (b,p1,p2))
  ripl.predict("(binomial %d p)" % n,label="pid")

  predictions = collectSamples(ripl,"pid")
  ans = [(x,b * scipy.stats.binom.pmf(x,n,p1) + (1 - b) * scipy.stats.binom.pmf(x,n,p2)) for x in range(n+1)]
  assert_almost_equal(sum([xx[1] for xx in ans]),1)
  return reportKnownDiscrete(ans, predictions)
Example #33
0
def initBasicPFripl2(seed):
    ripl = get_ripl(seed=seed)
    ripl.assume(
        "f", """
(mem (lambda (i)
  (tag 0 i
    (normal (if (eq i 0) 0 (f (- i 1))) 1))))
""")

    ripl.assume("g", """
(mem (lambda (i)
  (normal (f i) 1.0)))
""")

    return ripl
Example #34
0
def testDefaultTrainer():
#    raise(SkipTest('for quick results.'))
    "Train a proposal to inverse a joint Gaussian model, using default (target) trainer"
    ripl = get_ripl()
    ripl.assume("a", "(scope_include (quote A) 0 (normal 0.0 1.0))", label="pid")
    ripl.observe("(scope_include (quote B) 0 (normal (+ (* a 2) 2) 1.0))", 4.5)
    
    ripl.register_proposal_program_class("LinearRegressionProposalProgram", LinearRegressionProposalProgram)
    
    # Posterior for a is normal with mean 1.0, precision 5
    proposal_src = """
        [declare {
        "name":"linreg",
        "class":"LinearRegressionProposalProgram",
        "conditioned":[["B",0]],
        "target":[["A",0]],
        "num_samples":1000}]
        """
    ripl.execute_program(proposal_src)
    predictions = collectSamples(ripl,"pid",infer="(custommh linreg aux 1 5)")
    cdf = stats.norm(loc=1.0, scale=math.sqrt(0.2)).cdf
    return reportKnownContinuous(cdf, predictions, "N(1.0,sqrt(0.2))")
Example #35
0
def testCustommhSaveLoad():
#    raise(SkipTest('for quick results.'))
    "Checks the posterior distribution on a Gaussian given an unlikely observation"
    ripl = get_ripl()
    ripl.assume("a", "(scope_include (quote A) 0 (normal 10.0 1.0))", label="pid")
    ripl.observe("(scope_include (quote B) 0 (normal a 1.0))", 20.0)
    
    MyNormalExactPosterior = partial(NormalExactPosterior, mu1=10.0, sigma1=1.0, sigma2=1.0)
    ripl.register_proposal_program_class("MyNormalExactPosterior", MyNormalExactPosterior)
    
    # Posterior for a is normal with mean 15, precision 2
    #  ripl.predict("(normal a 1.0)")
    proposal_src_1 = """
        [declare {
        "name":"normalexact1",
        "class":"MyNormalExactPosterior",
        "conditioned":[["B",0]],
        "target":[["A",0]],
        "ready":"yes",
        "save_to":"test_custommh_save_load.npy",
        "num_samples":0}]
        """
    ripl.execute_program(proposal_src_1)
    proposal_src_2 = """
        [declare {
        "name":"normalexact2",
        "class":"MyNormalExactPosterior",
        "conditioned":[["B",0]],
        "target":[["A",0]],
        "load_from":"test_custommh_save_load.npy",
        "num_samples":0}]
        """
    ripl.execute_program(proposal_src_2)
    predictions = collectSamples(ripl,"pid",infer="(custommh normalexact2 assumed_gibbs 1 1)")
    cdf = stats.norm(loc=15, scale=math.sqrt(0.5)).cdf
    return reportKnownContinuous(cdf, predictions, "N(15,sqrt(0.5))")