def test_cloning_available(self): gop = generator(integers()) res = gop ** 2 shared = theano.shared(floatX(10)) res1 = theano.clone(res, {gop: shared}) f = theano.function([], res1) assert f() == np.float32(100)
def test_density_scaling_with_genarator(self): # We have different size generators def true_dens(): g = gen1() for i, point in enumerate(g): yield stats.norm.logpdf(point).sum() * 10 t = true_dens() # We have same size models with pm.Model() as model1: Normal("n", observed=gen1(), total_size=100) p1 = aesara.function([], model1.logpt) with pm.Model() as model2: gen_var = generator(gen2()) Normal("n", observed=gen_var, total_size=100) p2 = aesara.function([], model2.logpt) for i in range(10): _1, _2, _t = p1(), p2(), next(t) decimals = select_by_precision(float64=7, float32=2) np.testing.assert_almost_equal( _1, _t, decimal=decimals) # Value O(-50,000) np.testing.assert_almost_equal(_1, _2)
def test_gen_cloning_with_shape_change(self, datagen): gen = generator(datagen) gen_r = tt_rng().normal(size=gen.shape).T X = gen.dot(gen_r) res, _ = theano.scan(lambda x: x.sum(), X, n_steps=X.shape[0]) assert res.eval().shape == (50,) shared = theano.shared(datagen.data.astype(gen.dtype)) res2 = theano.clone(res, {gen: shared**2}) assert res2.eval().shape == (1000,)
def test_default_value(self): def gen(): for i in range(2): yield floatX(np.ones((10, 10)) * i) gop = generator(gen(), np.ones((10, 10)) * 10) f = theano.function([], gop) np.testing.assert_equal(np.ones((10, 10)) * 0, f()) np.testing.assert_equal(np.ones((10, 10)) * 1, f()) np.testing.assert_equal(np.ones((10, 10)) * 10, f()) with pytest.raises(ValueError): gop.set_default(1)
def test_set_gen_and_exc(self): def gen(): for i in range(2): yield floatX(np.ones((10, 10)) * i) gop = generator(gen()) f = theano.function([], gop) np.testing.assert_equal(np.ones((10, 10)) * 0, f()) np.testing.assert_equal(np.ones((10, 10)) * 1, f()) with pytest.raises(StopIteration): f() gop.set_gen(gen()) np.testing.assert_equal(np.ones((10, 10)) * 0, f()) np.testing.assert_equal(np.ones((10, 10)) * 1, f())
def test_gradient_with_scaling(self): with pm.Model() as model1: genvar = generator(gen1()) m = Normal('m') Normal('n', observed=genvar, total_size=1000) grad1 = theano.function([m], tt.grad(model1.logpt, m)) with pm.Model() as model2: m = Normal('m') shavar = theano.shared(np.ones((1000, 100))) Normal('n', observed=shavar) grad2 = theano.function([m], tt.grad(model2.logpt, m)) for i in range(10): shavar.set_value(np.ones((100, 100)) * i) g1 = grad1(1) g2 = grad2(1) np.testing.assert_almost_equal(g1, g2)
def test_gradient_with_scaling(self): with pm.Model() as model1: genvar = generator(gen1()) m = Normal("m") Normal("n", observed=genvar, total_size=1000) grad1 = aesara.function([m.tag.value_var], at.grad(model1.logpt, m.tag.value_var)) with pm.Model() as model2: m = Normal("m") shavar = aesara.shared(np.ones((1000, 100))) Normal("n", observed=shavar) grad2 = aesara.function([m.tag.value_var], at.grad(model2.logpt, m.tag.value_var)) for i in range(10): shavar.set_value(np.ones((100, 100)) * i) g1 = grad1(1) g2 = grad2(1) np.testing.assert_almost_equal(g1, g2)
def test_density_scaling_with_genarator(self): # We have different size generators def true_dens(): g = gen1() for i, point in enumerate(g): yield stats.norm.logpdf(point).sum() * 10 t = true_dens() # We have same size models with pm.Model() as model1: Normal('n', observed=gen1(), total_size=100) p1 = theano.function([], model1.logpt) with pm.Model() as model2: gen_var = generator(gen2()) Normal('n', observed=gen_var, total_size=100) p2 = theano.function([], model2.logpt) for i in range(10): _1, _2, _t = p1(), p2(), next(t) decimals = select_by_precision(float64=7, float32=2) np.testing.assert_almost_equal(_1, _t, decimal=decimals) # Value O(-50,000) np.testing.assert_almost_equal(_1, _2)
def test_pickling(self, datagen): gen = generator(datagen) pickle.loads(pickle.dumps(gen)) bad_gen = generator(integers()) with pytest.raises(Exception): pickle.dumps(bad_gen)