def testOneSample(seed): np_rng = npr.RandomState(seed) obs_inputs = np.array([1.3, -2.0, 0.0]) obs_outputs = np.array([5.0, 2.3, 8.0]) test_input = 1.4 expect_mu = 4.6307 expect_sig = 0.0027 sigma = 2.1 l = 1.8 observations = OrderedDict(zip(obs_inputs, obs_outputs)) mean = gp.mean_const(0.) covariance = cov.scale(sigma**2, cov.se(l**2)) # _gp_sample(..., test_input) should be normally distributed with # mean expect_mu. n = default_num_samples(4) def sample(): s = gp._gp_sample(mean, covariance, observations, [test_input], np_rng) return s[0] samples = np.array([sample() for _ in xrange(n)]) assert samples.shape == (n, ) return reportKnownGaussian(expect_mu, np.sqrt(expect_sig), samples)
def checkPGibbsBlockingMHHMM1(operator, seed): # The point of this is that it should give reasonable results in # very few transitions but with a large number of particles. ripl = get_ripl(seed=seed) ripl.assume("x0","(tag 0 0 (normal 0.0 1.0))") ripl.assume("x1","(tag 0 1 (normal x0 1.0))") ripl.assume("x2","(tag 0 2 (normal x1 1.0))") ripl.assume("x3","(tag 0 3 (normal x2 1.0))") ripl.assume("x4","(tag 0 4 (normal x3 1.0))") ripl.assume("y0","(normal x0 1.0)") ripl.assume("y1","(normal x1 1.0)") ripl.assume("y2","(normal x2 1.0)") ripl.assume("y3","(normal x3 1.0)") ripl.assume("y4","(normal x4 1.0)") ripl.observe("y0",1.0) ripl.observe("y1",2.0) ripl.observe("y2",3.0) ripl.observe("y3",4.0) ripl.observe("y4",5.0) ripl.predict("x4",label="pid") if ignore_inference_quality(): infer = "(%s 0 ordered 3 2)" % operator else: infer = "(%s 0 ordered 20 10)" % operator predictions = collectSamples(ripl,"pid",infer=infer) return reportKnownGaussian(390.0/89.0, math.sqrt(55/89.0), predictions)
def testNormal4(seed): ripl = get_ripl(seed=seed) ripl.assume("f", "(lambda (mu) (normal mu 1))") ripl.assume("g", "(lambda (x y z) ((lambda () f)))") ripl.predict("((g (f (normal 0 1)) (f 5) (f (f 1))) 5)", label="pid") predictions = collectSamples(ripl, "pid") return reportKnownGaussian(5, 1, predictions)
def checkPGibbsDynamicScope1(operator, seed): ripl = get_ripl(seed=seed) ripl.assume("transition_fn", "(lambda (x) (normal x 1.0))") ripl.assume("observation_fn", "(lambda (y) (normal y 1.0))") ripl.assume("initial_state_fn", "(lambda () (normal 0.0 1.0))") ripl.assume("f",""" (mem (lambda (t) (tag 0 t (if (= t 0) (initial_state_fn) (transition_fn (f (- t 1))))))) """) ripl.assume("g","(mem (lambda (t) (observation_fn (f t))))") ripl.observe("(g 0)",1.0) ripl.observe("(g 1)",2.0) ripl.observe("(g 2)",3.0) ripl.observe("(g 3)",4.0) ripl.observe("(g 4)",5.0) ripl.predict("(f 4)","pid") if ignore_inference_quality(): infer = "(%s 0 ordered 3 2)" % operator else: infer = "(%s 0 ordered 20 10)" % operator predictions = collectSamples(ripl,"pid",infer=infer) return reportKnownGaussian(390/89.0, math.sqrt(55/89.0), predictions)
def testPGibbsDynamicScopeInterval(seed): ripl = get_ripl(seed=seed) ripl.assume("transition_fn", "(lambda (x) (normal x 1.0))") ripl.assume("observation_fn", "(lambda (y) (normal y 1.0))") ripl.assume("initial_state_fn", "(lambda () (normal 0.0 1.0))") ripl.assume("f",""" (mem (lambda (t) (tag 0 t (if (= t 0) (initial_state_fn) (transition_fn (f (- t 1))))))) """) ripl.assume("g","(mem (lambda (t) (observation_fn (f t))))") ripl.observe("(g 0)",1.0) ripl.observe("(g 1)",2.0) ripl.observe("(g 2)",3.0) ripl.observe("(g 3)",4.0) ripl.observe("(g 4)",5.0) ripl.predict("(f 4)","pid") P = 3 if ignore_inference_quality() else 8 T = 2 if ignore_inference_quality() else 10 infer = "(do (pgibbs 0 (ordered_range 0 3) %d %d) (pgibbs 0 (ordered_range 1 4) %d %d))" % (P,P,T,T) predictions = collectSamples(ripl,"pid",infer=infer) return reportKnownGaussian(390/89.0, math.sqrt(55/89.0), predictions)
def checkResamplingSmoke(mode, seed): n = default_num_samples() r = get_ripl(seed=seed) r.infer("(resample%s %s)" % (mode, n)) stack_dicts = r.sivm.core_sivm.engine.sample_all( r._ensure_parsed_expression("(normal 0 1)")) predictions = [d["value"] for d in stack_dicts] return reportKnownGaussian(0, 1, predictions)
def testCollectSmoke3(seed): ripl = get_ripl(seed=seed) prog = """ (let ((d (empty))) (do (repeat %s (bind (collect (labelled (normal 0 1) label)) (curry into d))) (return d)))""" % default_num_samples() predictions = extract_from_dataset(ripl.infer(prog), 'label') return reportKnownGaussian(0.0, 1.0, predictions)
def testDetachRegenInference(seed): ripl = custom_mh_ripl(seed=seed) ripl.assume("x", "(normal 0 1)") ripl.observe("(normal x 1)", 2) predictions = collectSamples(ripl, "x", infer="(repeat %d (custom_mh default all))" % default_num_transitions_per_sample()) return reportKnownGaussian(1, math.sqrt(0.5), predictions)
def checkSliceBasic1(slice_method, seed): # Basic sanity test for slice if (backend_name() != "lite") and (slice_method == 'slice_doubling'): raise SkipTest( "Slice sampling with doubling only implemented in Lite.") ripl = get_ripl(seed=seed) ripl.assume("a", "(normal 10.0 1.0)", label="pid") predictions = myCollectSamples(ripl, slice_method) return reportKnownGaussian(10, 1, predictions)
def testResamplingSmoke4(seed): # Check that limiting the number of processes doesn't screw up # inference too much. n = default_num_samples() r = get_ripl(seed=seed) r.infer("(resample_multiprocess %s %s)" % (n, n / 2)) # Limit the number of processes predictions = r.sample_all("(normal 0 1)") eq_(n, len(predictions)) return reportKnownGaussian(0, 1, predictions)
def testCustomProposalInference(seed): ripl = gaussian_drift_mh_ripl(seed) ripl.assume("x", "(normal 0 1)") ripl.observe("(normal x 1)", 2) predictions = collectSamples( ripl, "x", infer="(repeat %d (gaussian_drift_mh default all 0.5))" % default_num_transitions_per_sample()) return reportKnownGaussian(1, math.sqrt(0.5), predictions)
def testNormalWithObserve1(seed): # Checks the posterior distribution on a Gaussian given an unlikely # observation. ripl = get_ripl(seed=seed) ripl.assume("a", "(normal 10.0 1.0)", label="pid") ripl.observe("(normal a 1.0)", 14.0) # Posterior for a is normal with mean 12, precision 2 # ripl.predict("(normal a 1.0)") predictions = collectSamples(ripl, "pid") return reportKnownGaussian(12, math.sqrt(0.5), predictions)
def testExecuteSmoke(seed): ripl = get_ripl(seed=seed) predictions = [] for _ in range(default_num_samples()): ripl.clear() ripl.execute_program("""[assume x (normal 0 1)] ;; An observation [observe (normal x 1) 2] ; with an end-of-line comment [infer (mh default one %s)]""" % default_num_transitions_per_sample()) predictions.append(ripl.sample("x")) return reportKnownGaussian(1, math.sqrt(0.5), predictions)
def testCollectSmoke1(seed): ripl = get_ripl(seed=seed) ripl.assume("x", "(normal 0 1)") prog = """ (let ((d (empty))) (do (repeat %s (do (mh default one 1) (bind (collect x) (curry into d)))) (return d)))""" % default_num_samples() predictions = extract_from_dataset(ripl.infer(prog), 'x') return reportKnownGaussian(0.0, 1.0, predictions)
def checkMVGaussSmoke(infer, seed): # Confirm that projecting a multivariate Gaussian to one dimension # results in a univariate Gaussian. ripl = get_ripl(seed=seed) ripl.assume( "vec", "(multivariate_normal (vector 1 2) (matrix (list (list 1 0.5) (list 0.5 1))))" ) ripl.assume("x", "(lookup vec 0)", label="pid") predictions = collectSamples(ripl, "pid", infer=infer) return reportKnownGaussian(1, 1, predictions)
def testNormalWithObserve2a(seed): # Checks the posterior distribution on a Gaussian given an unlikely # observation. The difference between this and 1 is an extra # predict, which apparently has a deleterious effect on mixing. ripl = get_ripl(seed=seed) ripl.assume("a", "(normal 10.0 1.0)", label="pid") ripl.observe("(normal a 1.0)", 14.0) # Posterior for a is normal with mean 12, precision 2 ripl.predict("(normal a 1.0)") predictions = collectSamples(ripl, "pid", infer="mixes_slowly") return reportKnownGaussian(12, math.sqrt(0.5), predictions)
def testBlockingExample0(seed): ripl = get_ripl(seed=seed) if not collect_iid_samples(): raise SkipTest("This test should not pass without reset.") ripl.assume("a", "(tag 0 0 (normal 10.0 1.0))", label="pid") ripl.assume("b", "(tag 1 1 (normal a 1.0))") ripl.observe("(normal b 1.0)", 14.0) # If inference only frobnicates b, then the distribution on a # remains the prior. predictions = collectSamples(ripl, "pid", infer="(mh 1 1 10)") return reportKnownGaussian(10, 1.0, predictions)
def testExtendEnv1(seed): ripl = get_ripl(seed=seed) ripl.assume("env1", "(get_current_environment)") ripl.assume("env2", "(extend_environment env1 (quote x) (normal 0.0 1.0))") ripl.assume("env3", "(extend_environment env2 (quote x) (normal 10.0 1.0))") ripl.assume("expr", "(quote (normal x 1.0))") ripl.predict("(normal (eval expr env3) 1.0)", label="pid") predictions = collectSamples(ripl, "pid") return reportKnownGaussian(10, math.sqrt(3), predictions)
def testGPMean1(seed): ripl = get_ripl(seed=seed) prep_ripl(ripl) ripl.assume('gp', '(make_gp zero sq_exp)') ripl.predict("(gp (array 0))", label="pid") predictions = collectSamples(ripl, "pid", num_samples=default_num_samples(2)) xs = [p[0] for p in predictions] return reportKnownGaussian(0, 1, xs)
def testInvWishartPrior4(seed): # Confirm that as dof increases, the elements of a Wishart obey the # central limit theorem. if inParallel() and backend_name() == "puma": raise SkipTest("The Lite SPs in Puma interface is not thread-safe, and wishart comes from Lite.") ripl = get_ripl(seed=seed) ripl.assume("s", "(scale_matrix 10000 (id_matrix 3))") ripl.assume("m", "(inv_wishart s 10000)") ripl.predict("(lookup m (pair 0 1))", label="prediction") predictions = collectSamples(ripl, "prediction") return reportKnownGaussian(0, 0.01, predictions)
def testNormalWithObserve1(seed): # Checks the posterior distribution on a Gaussian given an unlikely # observation ripl = get_ripl(seed=seed) ripl.assume("a", "(normal 10.0 1.0)", label="pid") ripl.observe("(normal a 1.0)", 14.0) # Posterior for a is normal with mean 12, precision 2 (samples, weights) = collectLikelihoodWeighted(ripl, "pid") for (s, w) in zip(samples, weights): # The weights I have should be deterministically given by the likelihood assert_almost_equal(math.exp(w), stats.norm(loc=14, scale=1).pdf(s)) # The test points should be drawn from the prior return reportKnownGaussian(10, 1, samples)
def testResampling1(seed): P = 10 ripl = get_ripl(seed=seed) def a_sample(): ripl.clear() ripl.infer("(resample %d)" % P) ripl.assume("x", "(normal 0 1)") ripl.observe("(normal x 1)", 2) ripl.infer("(resample 1)") return ripl.sample("x") predictions = [a_sample() for _ in range(default_num_samples())] return reportKnownGaussian(1, math.sqrt(0.5), predictions)
def checkSliceNormalWithObserve1(slice_method, seed): # Checks the posterior distribution on a Gaussian given an unlikely # observation if (backend_name() != "lite") and (slice_method == 'slice_doubling'): raise SkipTest( "Slice sampling with doubling only implemented in Lite.") ripl = get_ripl(seed=seed) ripl.assume("a", "(normal 10.0 1.0)", label="pid") ripl.observe("(normal a 1.0)", 14.0) # Posterior for a is normal with mean 12, precision 2 # ripl.predict("(normal a 1.0)") predictions = myCollectSamples(ripl, slice_method) return reportKnownGaussian(12, math.sqrt(0.5), predictions)
def testCycleKernel(seed): # Same example as testBlockingExample0, but a cycle kernel that # covers everything should solve it ripl = get_ripl(seed=seed) ripl.assume("a", "(tag 0 0 (normal 10.0 1.0))", label="pid") ripl.assume("b", "(tag 1 1 (normal a 1.0))") ripl.observe("(normal b 1.0)", 14.0) infer = "(repeat %s (do (mh 0 0 1) (mh 1 1 1)))" % \ default_num_transitions_per_sample() predictions = collectSamples(ripl, "pid", infer=infer) return reportKnownGaussian(34.0 / 3.0, math.sqrt(2.0 / 3.0), predictions)
def testMVNormalRandomWalkSoundness(seed): # This exercises the subtlety involving block proposals and delta # kernels described in the "joint-delta-kernels" footnote in # doc/on-latents.md. r = get_ripl(seed=seed) r.assume("mean", "(multivariate_normal (array 0) (id_matrix 1))") r.assume("y", "(multivariate_normal mean (id_matrix 1))") predictions = [ c[0] for c in collectSamples(r, "y", infer="(mh default all 50)", num_samples=default_num_samples(10)) ] return reportKnownGaussian(0, math.sqrt(2), predictions)
def testDict1(seed): ripl = get_ripl(seed=seed) ripl.assume("x", "(bernoulli 1.0)") ripl.assume( "d", """(dict (array (quote x) (normal 0.0 1.0)) (array (quote y) (normal 10.0 1.0)))""") ripl.predict("""(normal (+ (lookup d (quote x)) (lookup d (quote y)) (lookup d (quote y))) 1.0)""", label="pid") predictions = collectSamples(ripl, "pid") return reportKnownGaussian(20, 2, predictions)
def testModelSwitchingSmoke(seed): ripl = get_ripl(seed=seed, persistent_inference_trace=True) ripl.execute_program(""" [define normal_through_model (lambda (mu sigma) (do (m <- (new_model)) (res <- (in_model m (do (assume x (normal 0 ,(* (sqrt 2) sigma))) (assume y (normal x ,(* (sqrt 2) sigma))) (observe y (* 2 mu)) (mh default one %s) (sample x)))) (return (first res))))] """ % default_num_transitions_per_sample()) predictions = [ripl.infer("(normal_through_model 0 1)") for _ in range(default_num_samples())] return reportKnownGaussian(0.0, 1.0, predictions)
def test_model_without_compound_assume(seed): inf_test_prog = """ [assume a (tag (quote a_scope) 0 (normal 0 10))] [assume b (tag (quote b_scope) 0 (normal -10 10))] [assume obs_1 (make_suff_stat_normal a 1)] [assume obs_2 (make_suff_stat_normal b 1)] """ ripl = get_ripl(seed=seed) ripl.execute_program(inf_test_prog) for _ in range(default_num_data(40)): ripl.observe("(obs_1)", np.random.normal(5, 1)) ripl.predict("(obs_1)", label="predictive") post_samples = collectSamples(ripl, "predictive") return reportKnownGaussian(5, 1, post_samples)
def checkForEachParticleCustomMH(mode, seed): n = max(2, default_num_samples()) ripl = get_ripl(seed=seed, persistent_inference_trace=True) ripl.define( "drift_mh", """\ (lambda (scope block) (mh_correct (on_subproblem default all (symmetric_local_proposal (lambda (x) (normal x 1)))))) """) ripl.assume("x", "(normal 0 1)") ripl.observe("(normal x 1)", 2) ripl.infer("(resample%s %s)" % (mode, n)) for _ in range(default_num_transitions_per_sample()): ripl.infer("(for_each_particle (drift_mh default all))") predictions = ripl.infer("(for_each_particle (sample x))") return reportKnownGaussian(1, 0.5**0.5, predictions)
def testResampling2(seed): # This differs from testResampling1 by an extra resample step, which # is supposed to be harmless P = 20 ripl = get_ripl(seed=seed) def a_sample(): ripl.clear() ripl.infer("(resample %d)" % P) ripl.assume("x", "(normal 0 1)") ripl.observe("(normal x 1)", 2) ripl.infer("(incorporate)") ripl.infer("(resample %d)" % P) ripl.infer("(incorporate)") ripl.infer("(resample 1)") return ripl.sample("x") predictions = [a_sample() for _ in range(4 * default_num_samples())] return reportKnownGaussian(1, math.sqrt(0.5), predictions)