def test_arrow_proc(self): self.run_test( '(arg, arg) -> true', r(0,18,r(11,2,v.sym('lambda'),0,10,r(1,3,v.sym('arg'),6,3,v.sym('arg')),14,4,v.boolean(True)))) self.run_test( '(arg) -> true', r(0,13,r(6,2,v.sym('lambda'),0,5,r(1,3,v.sym('arg')),9,4,v.boolean(True)))) self.run_test( 'arg -> true', r(0,13,r(6,2,v.sym('lambda'),0,3,r(0,3,v.sym('arg')),9,4,v.boolean(True)))) self.run_test( '() -> { a=b;c }', r(0,15,r(3,2,v.sym('lambda'),0,2,r(),6,9,r(8,5,v.sym('do'),8,3,r(9,1,v.sym('let'),8,1,v.sym('a'),10,1,v.sym('b')),12,1,v.sym('c')))))
def test_labeled_predict(self): full_loc = j(0,4,5,1,7,7,15,4) self.run_test( 'name : predict blah', [{'loc':full_loc, 'value':{ 'instruction' : {'loc':full_loc, 'value': 'evaluate'}, 'expression' : {'loc': full_loc, 'value': [{'loc':j(7,7), 'value':v.sym('predict')}, {'loc':j(15,4), 'value':v.sym('blah')}, {'loc':j(0,4), 'value':v.sym('name')}]} }}])
def extract_assessment(maker, params, data): r = get_ripl() r.assume("maker", maker) r.assume("made", v.app(v.sym("maker"), *[v.app(v.sym("exactly"), p) for p in params])) for item in data: r.observe("(made)", item) ans = r.infer("global_log_likelihood") return ans
def test_parse_assume_values(self): output = self.p.parse_instruction("[assume_values (a b) c ]") expected = { 'instruction': 'evaluate', 'expression': [v.sym('assume_values'), [v.sym('a'), v.sym('b')], v.sym('c')] } self.assertEqual(output, expected)
def test_predict(self): # Predict # full_loc = j(2,7,10,4) self.run_test( ' prediCt blah', [{'loc':full_loc, 'value':{ 'instruction' : {'loc':full_loc, 'value': 'evaluate'}, 'expression' : {'loc': full_loc, 'value': [{'loc':j(2,7), 'value':v.sym('predict')}, {'loc':j(10,4), 'value':v.sym('blah')}]} }}])
def extract_sample(maker, params, index, seed): r = get_ripl(seed=seed) r.assume("maker", maker) expr = v.app(v.sym("list"), *[v.app(v.sym("made")) for _ in range(index+1)]) def one_sample(): r.assume("made", v.app(v.sym("maker"), *params)) ans = r.sample(expr)[-1] r.forget("made") return ans results = [one_sample() for _ in range(default_num_samples(5))] return results
def test_labeled_observe(self): full_loc = j(0,4,5,1,7,7,15,1,17,1,19,9) self.run_test( 'name : observe a = count<32>', [{'loc':full_loc, 'value':{ 'instruction' : {'loc':full_loc, 'value': 'evaluate'}, 'expression' : {'loc':full_loc, 'value': [{'loc':j(7,7), 'value':v.sym('observe')}, {'loc':j(15,1), 'value':v.sym('a')}, {'loc':j(19,9), 'value':{'type':'count', 'value':32.0}}, {'loc':j(0,4), 'value':v.sym('name')}]} }}])
def test_labeled_assume(self): full_loc = j(0,4,5,1,7,6,14,1,16,1,18,1) self.run_test( 'name : assume a = b', [{'loc':full_loc, 'value':{ 'instruction' : {'loc': full_loc, 'value':'evaluate'}, 'expression' : {'loc': full_loc, 'value': [{'loc':j(7,6), 'value':v.sym('assume')}, {'loc': j(14,1), 'value':v.sym('a')}, {'loc':j(18,1), 'value':v.sym('b')}, {'loc':j(0,4), 'value':v.sym('name')}]}, }}])
def test_parse_instruction(self): output = self.p.parse_instruction('[assume a (b c d)]') expected = { 'instruction': 'evaluate', 'expression': [ v.sym('assume'), v.sym('a'), [v.sym('b'), v.sym('c'), v.sym('d')] ] } self.assertEqual(output, expected)
def test_infer(self): inst = { 'instruction': 'infer', 'expression': [ v.sym("resimulation_mh"), v.sym("default"), v.sym("one"), v.num(2) ] } self.sivm.execute_instruction(inst)
def test_observe(self): # Observe # full_loc = j(0,7,8,4,13,1,15,3) self.run_test( 'obServe blah = 1.3', [{'loc':full_loc, 'value':{ 'instruction' : {'loc':full_loc, 'value': 'evaluate'}, 'expression' : {'loc': full_loc, 'value': [{'loc':j(0,7), 'value':v.sym('observe')}, {'loc':j(8,4), 'value':v.sym('blah')}, {'loc': j(15,3), 'value':v.number(1.3)}]} }}])
def extract_cross_sample(maker, params, index1, index2, combiner, seed): r = get_ripl(seed=seed) r.assume("maker", maker) index = max(index1, index2) expr = v.app(v.sym("list"), *[v.app(v.sym("made")) for _ in range(index+1)]) def one_sample(): r.assume("made", v.app(v.sym("maker"), *params)) vec = r.sample(expr) r.forget("made") return combiner(vec[index1], vec[index2]) results = [one_sample() for _ in range(default_num_samples(5))] return results
def test_assume(self): # Assume # full_loc = j(0,6,7,4,12,1,14,3) self.run_test( 'assuMe blah = moo', [{'loc': full_loc, 'value': { 'instruction' : {'loc': full_loc, 'value':'evaluate'}, 'expression' : {'loc': full_loc, 'value': [{'loc': j(0,6), 'value':v.sym('assume')}, {'loc': j(7,4), 'value':v.sym('blah')}, {'loc': j(14,3), 'value':v.sym('moo')}]}, }}])
def qqrecur(exp): """Gives the macroexpansion of this (sub-)expression as a 3-tuple (pattern, template, bool). The pattern and template may be used to construct a SyntaxRule object that will do the right thing (but are returned seprately because SyntaxRule objects are not directly composable). The bool is an optimization. It indicates whether quasiquote reduces to quote on this expression; if that turns out to be true for all subexpressions, their expansion can be short-circuited. """ if hasattr(exp, "__iter__") and not isinstance(exp, collections.Mapping): if len(exp) > 0 and getSym(exp[0]) == "unquote": datum_name = unique_name("datum") return ([unique_name("unquote"), datum_name], datum_name, False) else: answers = [qqrecur(expi) for expi in exp] if all([ans[2] for ans in answers]): return quote_result() else: pattern = [answer[0] for answer in answers] template = [v.sym("array") ] + [answer[1] for answer in answers] return (pattern, template, False) else: return quote_result()
def infer(self, program): if self.is_infer_loop_program(program): assert len(program) == 2 self.start_continuous_inference(program[1]) return (None, None) # The core_sivm expects a 2-tuple else: return self.raw_evaluate([v.sym("run"), program])
def testEnumerativeSmoke(): r = get_ripl() r.assume("x", "(categorical (simplex 0.1 0.2 0.3 0.4) (list 1 2 3 4))") r.infer("(enumerative_diversify default all)") assert np.allclose([1, 2, 3, 4], strip_types( r.sivm.core_sivm.engine.sample_all(v.sym("x")))) assert np.allclose([0.1, 0.2, 0.3, 0.4], logWeightsToNormalizedDirect( r.sivm.core_sivm.engine.model.log_weights))
def test_program(self): self.run_test( 'define blah = count<132>;infer 132', [{'loc':j(0,6,7,4,12,1,14,10), 'value':{ 'instruction' : {'loc':j(0,6), 'value':'define'}, 'symbol' : {'loc':j(7,4), 'value':v.sym('blah')}, 'expression' : {'loc':j(14,10), 'value':{'type':'count', 'value':132.0}}, }},{'loc':j(25,5,31,3), 'value':{ 'instruction' : {'loc':j(25,5), 'value':'infer'}, 'expression' : {'loc':j(31,3), 'value':v.number(132.0)}, }}])
def test_expression(self): with self.assertRaises(VentureException): self.p.parse_locexpression('') self.assertEqual( self.p.parse_locexpression('(a b (c number<1>))'), { 'loc': [0, 18], 'value': [{ 'loc': [1, 1], 'value': v.sym('a') }, { 'loc': [3, 3], 'value': v.sym('b') }, { 'loc': [5, 17], 'value': [{ 'loc': [6, 6], 'value': v.sym('c') }, { 'loc': [8, 16], 'value': v.number(1.0) }] }] })
def testEnumerativeStacking(): r = get_ripl() r.assume("x", "(categorical (simplex 0.1 0.2 0.3 0.4) (list 1 2 3 4))") r.infer("(enumerative_diversify default all)") r.infer("(enumerative_diversify default all)") assert np.allclose([1, 2, 3, 4] * 4, strip_types( r.sivm.core_sivm.engine.sample_all(v.sym("x")))) assert np.allclose( [ 0.01, 0.02, 0.03, 0.04, 0.02, 0.04, 0.06, 0.08, 0.03, 0.06, 0.09, 0.12, 0.04, 0.08, 0.12, 0.16 ], #TODO Are these actually the weights I want here? logWeightsToNormalizedDirect( r.sivm.core_sivm.engine.model.log_weights))
def test_assume_values(self): full_loc = j(0,6,7,6,8,1,11,1,16,1) self.run_test('assume (x, y) = z;', [ { 'loc': full_loc, 'value': { 'instruction': {'loc': full_loc, 'value': 'evaluate'}, 'expression': { 'loc': full_loc, 'value': [ {'loc': j(0,6), 'value': v.sym('assume_values')}, { 'loc': j(7,6), 'value': [ {'loc': j(8,1), 'value': v.sym('x')}, {'loc': j(11,1), 'value': v.sym('y')}, ], }, {'loc': j(16,1), 'value': v.sym('z')}, ], }, }, } ])
def in_model(self, model, action): current_model = self.model self.model = model # TODO asStackDict doesn't do the right thing because it tries to # be politely printable. Maybe I should change that. stack_dict_action = {"type":"SP", "value":action} program = [v.sym("run"), v.quote(stack_dict_action)] try: with self.ripl.sivm.cleared(): with self.inference_trace(): did = self._do_raw_evaluate(program) ans = self.infer_trace.extractRaw(did) self.infer_trace.uneval(did) # TODO This becomes "forget" after the engine.Trace wrapper return (ans, model) finally: self.model = current_model
def test_array_syntax(self): self.run_legacy_test('[]', [[v.sym('array')]], 'zero') self.run_legacy_test('[1]', [[v.sym('array'), v.number(1)]], 'one') self.run_legacy_test('[1,2]', [[v.sym('array'), v.number(1), v.number(2)]], 'two') self.run_legacy_test('[1,2,]', [[v.sym('array'), v.number(1), v.number(2)]], 'two') self.run_legacy_test('[1,2, 3]', [[v.sym('array'), v.number(1), v.number(2), v.number(3)]], 'three') self.run_legacy_test('array(1, 2, 3)', [[v.sym('array'), v.number(1), v.number(2), v.number(3)]], 'four') self.run_test('f([1,2 + 3])', r(0,12, r(0,1, v.sym('f'), 2,9, r(2,1, v.sym('array'), 3,1, v.number(1), 5,5, r(7,1, v.sym('add'), 5,1, v.number(2), 9,1, v.number(3))))))
def test_values_syntax(self): self.run_legacy_test('()', [[v.sym('values_list')]], 'zero') self.run_legacy_test('(1,2)', [[v.sym('values_list'), v.number(1), v.number(2)]], 'two') self.run_legacy_test('(1,2, 3)', [[v.sym('values_list'), v.number(1), v.number(2), v.number(3)]], 'three') self.run_legacy_test('(1, 2, 3)', [[v.sym('values_list'), v.number(1), v.number(2), v.number(3)]], 'four') self.run_test('f((1,2 + 3))', r(0,12, r(0,1, v.sym('f'), 2,9, r(2,1, v.sym('values_list'), 3,1, v.number(1), 5,5, r(7,1, v.sym('add'), 5,1, v.number(2), 9,1, v.number(3))))))
def _collectData(iid,ripl,address,num_samples=None,infer=None): if num_samples is None: num_samples = default_num_samples() if infer is None: infer = defaultInfer() elif infer == "mixes_slowly": # TODO Replace this awful hack with proper adjustment of tests for difficulty infer = defaultInfer() if infer is not "(rejection default all 1)": infer = "(repeat 4 (do %s))" % infer predictions = [] for _ in range(num_samples): # TODO Consider going direct here to avoid the parser ripl.infer(infer) predictions.append(ripl.report(address)) if iid: ripl.sivm.core_sivm.engine.reinit_inference_problem() ripl.infer(v.app(v.sym("incorporate"))) return predictions
def _particle_swapping(self, action): ripl = self.ripl # disallow the ripl. class NoRipl(object): def __getattr__(self, attr): if attr in ['sample', 'sample_all', 'force']: return getattr(ripl, attr) else: raise VentureException('Modeling commands not allowed in for_each_particle.') self.ripl = NoRipl() # TODO asStackDict doesn't do the right thing because it tries to # be politely printable. Maybe I should change that. stack_dict_action = {"type":"SP", "value":action} program = [v.sym("run"), v.quote(stack_dict_action)] def do_action(_trace): with self.inference_trace(): did = self._do_raw_evaluate(program) ans = self.infer_trace.extractRaw(did) self.infer_trace.uneval(did) # TODO This becomes "forget" after the engine.Trace wrapper return ans try: yield do_action finally: self.ripl = ripl
def evaluate(self, program): return self.raw_evaluate([v.sym("autorun"), program])
def testBasicAnnotation(): sivm = get_ripl().sivm expr = v.app(v.sym("add"), v.num(1), v.sym("foo")) err.assert_sivm_annotation_succeeds(sivm.assume, v.sym("x"), expr)
def p_applicative_lookup(self, a, o, index, c): assert ast.isloc(a) assert ast.isloc(index) lookup = ast.update_value(o, val.sym('lookup')) return ast.locmerge(a, c, [lookup, a, index])
def points_at(inferrer, i): import venture.value.dicts as val return [(x_to_pixels(i), y_to_pixels(y["value"])) for y in inferrer.engine.sample_all( [val.sym("position"), val.num(i)])]
def ys_at(inferrer, var): import venture.value.dicts as val return [ y_to_pixels(y["value"]) for y in inferrer.engine.sample_all(val.sym(var)) ]