示例#1
0
def checkSliceStudentT2(slice_method, seed):
    # Simple program involving simulating from a student_t
    if (backend_name() != "lite") and (slice_method == 'slice_doubling'):
        raise SkipTest(
            "Slice sampling with doubling only implemented in Lite.")
    ripl = get_ripl(seed=seed)
    ripl.assume("a", "(student_t 1.0)")
    ripl.observe("(normal a 1.0)", 3.0)
    ripl.predict("(normal a 1.0)", label="pid")
    predictions = myCollectSamples(ripl, slice_method)

    # Posterior of a is proprtional to
    def postprop(a):
        return stats.t(1).pdf(a) * stats.norm(loc=3).pdf(a)

    import scipy.integrate as integrate
    (normalize, _) = integrate.quad(postprop, -10, 10)

    def posterior(a):
        return postprop(a) / normalize

    (meana, _) = integrate.quad(lambda x: x * posterior(x), -10, 10)
    (meanasq, _) = integrate.quad(lambda x: x * x * posterior(x), -10, 10)
    vara = meanasq - meana * meana
    # TODO Test agreement with the whole shape of the distribution, not
    # just the mean
    return reportKnownMean(meana, predictions, variance=vara + 1.0)
示例#2
0
def testStudentT2(seed):
    # Simple program involving simulating from a student_t, with basic
    # testing of loc and shape params.
    ripl = get_ripl(seed=seed)
    ripl.assume("x", "(student_t 1.0 3 2)")
    ripl.assume("a", "(/ (- x 3) 2)")
    ripl.observe("(normal a 1.0)", 3.0)
    ripl.predict("(normal a 1.0)", label="pid")
    predictions = collectSamples(ripl, "pid", infer="mixes_slowly")

    # Posterior of a is proprtional to
    def postprop(a):
        return stats.t(1).pdf(a) * stats.norm(loc=3).pdf(a)

    import scipy.integrate as integrate
    (normalize, _) = integrate.quad(postprop, -10, 10)

    def posterior(a):
        return postprop(a) / normalize

    (meana, _) = integrate.quad(lambda x: x * posterior(x), -10, 10)
    (meanasq, _) = integrate.quad(lambda x: x * x * posterior(x), -10, 10)
    vara = meanasq - meana * meana
    # TODO Test agreement with the whole shape of the distribution, not
    # just the mean
    return reportKnownMean(meana, predictions, variance=vara + 1.0)
示例#3
0
def testPoisson2(seed):
    # Check that Poisson simulates correctly.
    ripl = get_ripl(seed=seed)

    ripl.assume("lambda", "5")
    ripl.predict("(poisson lambda)", label="pid")

    predictions = collectSamples(ripl, "pid")
    return reportKnownMean(5, predictions)
示例#4
0
def testApply1(seed):
    # This CSP does not handle lists and symbols correctly.
    ripl = get_ripl(seed=seed)

    ripl.assume(
        "apply",
        "(lambda (op args) (eval (pair op args) (get_empty_environment)))")
    ripl.predict(
        "(apply mul (array (normal 10.0 1.0) (normal 10.0 1.0) (normal 10.0 1.0)))",
        label="pid")

    predictions = collectSamples(ripl, "pid")
    return reportKnownMean(1000, predictions, variance=101**3 - 100**3)
示例#5
0
def testGPMean2(seed):
    ripl = get_ripl(seed=seed)
    prep_ripl(ripl)

    ripl.assume('gp', '(make_gp zero sq_exp)')
    ripl.observe('(gp (array -1 1))', array([-1, 1]))

    ripl.predict("(gp (array 0))", label="pid")

    predictions = collectSamples(ripl, "pid")
    xs = [p[0] for p in predictions]

    # TODO: variance
    return reportKnownMean(0, xs)
示例#6
0
def testCMVN2D_mu1(seed):
    if backend_name() != "lite": raise SkipTest("CMVN in lite only")
    ripl = get_ripl(seed=seed)
    ripl.assume("m0", "(array 5.0 5.0)")
    ripl.assume("k0", "7.0")
    ripl.assume("v0", "11.0")
    ripl.assume("S0", "(matrix (array (array 13.0 0.0) (array 0.0 13.0)))")
    ripl.assume("f", "(make_niw_normal m0 k0 v0 S0)")

    ripl.predict("(f)", label="pid")

    predictions = collectSamples(ripl, "pid")

    mu1 = [p[0] for p in predictions]
    return reportKnownMean(5, mu1)
def testConstrainWithAPredict2(seed):
    # This test will fail at first, since we previously considered a
    # program like this to be illegal and thus did not handle it
    # correctly (we let the predict go stale). So we do not continually
    # bewilder our users, I suggest that we handle this case WHEN WE
    # CAN, which means we propagate from a constrain as long as we don't
    # hit an absorbing node or a DRG node with a kernel.
    ripl = get_ripl(seed=seed)
    ripl.assume(
        "f",
        "(if (flip) (lambda () (normal 0.0 1.0)) (mem (lambda () (normal 0.0 1.0))))"
    )
    ripl.observe("(f)", "1.0")
    ripl.predict("(* (f) 100)", label="pid")
    predictions = collectSamples(ripl, "pid")
    return reportKnownMean(
        50, predictions)  # will divide by 0 if there is no sample variance