def check_l(m, k=0): m_symb = matrix(dtype=m.dtype) k_symb = iscalar() f = aesara.function([m_symb, k_symb], aet.tril(m_symb, k_symb), mode=mode_with_gpu) result = f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([ isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort() ])
def test_RandomVariable_bcast(): rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True) mu = tensor(config.floatX, [True, False, False]) mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX) sd = tensor(config.floatX, [False, False]) sd.tag.test_value = np.ones((2, 3)).astype(config.floatX) s1 = iscalar() s1.tag.test_value = 1 s2 = iscalar() s2.tag.test_value = 2 s3 = iscalar() s3.tag.test_value = 3 s3 = Assert("testing")(s3, eq(s1, 1)) res = rv.compute_bcast([mu, sd], (s1, s2, s3)) assert res == [False] * 3 size = aet.as_tensor((1, 2, 3), dtype=np.int32).astype(np.int64) res = rv.compute_bcast([mu, sd], size) assert res == [True, False, False]
def test_remove_useless_inputs2(self): x1 = vector("x1") x2 = vector("x2") y1 = vector("y1") y2 = vector("y2") c = iscalar("c") z = ifelse(c, (x1, x1, x1, x2, x2), (y1, y1, y2, y2, y2)) f = function([c, x1, x2, y1, y2], z) ifnode = [ x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse) ][0] assert len(ifnode.outputs) == 3
def test_broadcast_mismatch(self): rng = np.random.RandomState(utt.fetch_seed()) data = rng.rand(5).astype(self.dtype) x = self.shared(data) # print x.broadcastable y = row("y", self.dtype) # print y.broadcastable cond = iscalar("cond") with pytest.raises(TypeError): ifelse(cond, x, y) with pytest.raises(TypeError): ifelse(cond, y, x)
def test_cpu_contiguous(): a = fmatrix("a") i = iscalar("i") a_val = np.asarray(np.random.random((4, 5)), dtype="float32") f = aesara.function([a, i], cpu_contiguous(a.reshape((5, 4))[::i])) topo = f.maker.fgraph.toposort() assert any(isinstance(node.op, CpuContiguous) for node in topo) assert f(a_val, 1).flags["C_CONTIGUOUS"] assert f(a_val, 2).flags["C_CONTIGUOUS"] assert f(a_val, 3).flags["C_CONTIGUOUS"] # Test the grad: utt.verify_grad(cpu_contiguous, [np.random.random((5, 7, 2))])
def test_n_samples_1(): p = fmatrix() u = fvector() n = iscalar() m = MultinomialFromUniform("auto")(p, u, n) f = function([p, u, n], m, allow_input_downcast=True) rng = np.random.default_rng(12345) for i in [1, 5, 10, 100, 1000, 10000]: uni = rng.random(2 * i).astype(config.floatX) res = f([[1.0, 0.0], [0.0, 1.0]], uni, i) utt.assert_allclose(res, [[i * 1.0, 0.0], [0.0, i * 1.0]])
def test_printing_scan(): def f_pow2(x_tm1): return 2 * x_tm1 state = scalar("state") n_steps = iscalar("nsteps") output, updates = aesara.scan( f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False ) f = aesara.function( [state, n_steps], output, updates=updates, allow_input_downcast=True ) pydotprint(output, scan_graphs=True) pydotprint(f, scan_graphs=True)
def test_gpu_contiguous(): a = fmatrix("a") i = iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") # The reshape is needed otherwise we make the subtensor on the CPU # to transfer less data. f = aesara.function([a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous
def test_record_mode_good(): # Like test_record_good, but some events are recorded by the # aesara RecordMode. We don't attempt to check the # exact string value of the record in this case. # Record a sequence of events output = StringIO() recorder = Record(file_object=output, replay=False) record_mode = RecordMode(recorder) i = iscalar() f = function([i], i, mode=record_mode, name="f") num_lines = 10 for i in range(num_lines): recorder.handle_line(str(i) + "\n") f(i) # Make sure that the playback functionality doesn't raise any errors # when we repeat them output_value = output.getvalue() output = StringIO(output_value) playback_checker = Record(file_object=output, replay=True) playback_mode = RecordMode(playback_checker) i = iscalar() f = function([i], i, mode=playback_mode, name="f") for i in range(num_lines): playback_checker.handle_line(str(i) + "\n") f(i)
def test_local_alloc_dimshuffle(): alloc_dimshuffle = out2in(local_alloc_dimshuffle) x = vector("x") m = iscalar("m") y = x.dimshuffle("x", 0) out = aet.alloc(y, m, 1, x.shape[0]) g = FunctionGraph([x, m], [out]) alloc_dimshuffle(g) topo = g.toposort() assert any([not isinstance(x, DimShuffle) for x in topo])
def test_dxdx(): # Tests that the gradient of a scalar with respect to itself is 1 # I use an integer in this case because people keep changing this # gradient to be 0 on integers but according to our interpretation # of the gradient as defined in the Op contract, it should be 1. # If you feel the need to change this unit test you are probably # modifying the Op contract and should definitely get the approval # of multiple people on aesara-dev. x = iscalar() g = grad(x, x) g = g.eval({x: 12}) assert np.allclose(g, 1.0)
def test_known_grads(): # Tests that the grad method with no known_grads # matches what happens if you put its own known_grads # in for each variable full_range = aet.arange(10) x = scalar("x") t = iscalar("t") ft = full_range[t] ft.name = "ft" coeffs = vector("c") ct = coeffs[t] ct.name = "ct" p = x**ft p.name = "p" y = ct * p y.name = "y" cost = sqr(y) cost.name = "cost" layers = [[cost], [y], [ct, p], [ct, x, ft], [coeffs, t, full_range, x]] inputs = [coeffs, t, x] rng = np.random.default_rng([2012, 11, 15]) values = [ rng.standard_normal((10)), rng.integers(10), rng.standard_normal() ] values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)] true_grads = grad(cost, inputs, disconnected_inputs="ignore") true_grads = aesara.function(inputs, true_grads) true_grads = true_grads(*values) for layer in layers: first = grad(cost, layer, disconnected_inputs="ignore") known = OrderedDict(zip(layer, first)) full = grad(cost=None, known_grads=known, wrt=inputs, disconnected_inputs="ignore") full = aesara.function(inputs, full) full = full(*values) assert len(true_grads) == len(full) for a, b, var in zip(true_grads, full, inputs): assert np.allclose(a, b)
def test_normal_infer_shape(): M_aet = iscalar("M") M_aet.tag.test_value = 3 sd_aet = scalar("sd") sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX) test_params = [ ([aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)), sd_aet], None), ( [ aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)), sd_aet ], (M_aet, ), ), ( [ aet.as_tensor_variable(np.array(1.0, dtype=config.floatX)), sd_aet ], (2, M_aet), ), ([aet.zeros((M_aet, )), sd_aet], None), ([aet.zeros((M_aet, )), sd_aet], (M_aet, )), ([aet.zeros((M_aet, )), sd_aet], (2, M_aet)), ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], None), ([aet.zeros((M_aet, )), aet.ones((M_aet, ))], (2, M_aet)), ( [ np.array([[-1, 20], [300, -4000]], dtype=config.floatX), np.array([[1e-6, 2e-6]], dtype=config.floatX), ], (3, 2, 2), ), ( [ np.array([1], dtype=config.floatX), np.array([10], dtype=config.floatX) ], (1, 2), ), ] for args, size in test_params: rv = normal(*args, size=size) rv_shape = tuple(normal._infer_shape(size or (), args, None)) assert tuple(get_test_value(rv_shape)) == tuple( get_test_value(rv).shape)
def test_known_grads_integers(): # Tests that known_grads works on integers x = iscalar() g_expected = scalar() g_grad = grad(cost=None, known_grads={x: g_expected}, wrt=x) f = aesara.function([g_expected], g_grad) x = -3 gv = np.cast[config.floatX](0.6) g_actual = f(gv) assert np.allclose(g_actual, gv)
def test_infer_shape(self): a = dvector() self._compile_and_check([a], [self.op(a, 16, 0)], [np.random.rand(12)], self.op_class) a = dmatrix() for var in [ self.op(a, 16, 1), self.op(a, None, 1), self.op(a, 16, None), self.op(a, None, None), ]: self._compile_and_check([a], [var], [np.random.rand(12, 4)], self.op_class) b = iscalar() for var in [self.op(a, 16, b), self.op(a, None, b)]: self._compile_and_check([a, b], [var], [np.random.rand(12, 4), 0], self.op_class)
def test_bad_shape(self): """Test that at run-time we raise an exception when the shape is not the one specified.""" specify_shape = SpecifyShape() x = vector() xval = np.random.random((2)).astype(config.floatX) f = aesara.function([x], specify_shape(x, 2), mode=self.mode) assert np.array_equal(f(xval), xval) xval = np.random.random((3)).astype(config.floatX) with pytest.raises(AssertionError, match="SpecifyShape:.*"): f(xval) assert isinstance( [ n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape) ][0].inputs[0].type, self.input_type, ) x = matrix() xval = np.random.random((2, 3)).astype(config.floatX) f = aesara.function([x], specify_shape(x, 2, 3), mode=self.mode) assert isinstance( [ n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape) ][0].inputs[0].type, self.input_type, ) assert np.array_equal(f(xval), xval) for shape_ in [(4, 3), (2, 8)]: xval = np.random.random(shape_).astype(config.floatX) with pytest.raises(AssertionError, match="SpecifyShape:.*"): f(xval) s = iscalar("s") f = aesara.function([x, s], specify_shape(x, None, s), mode=self.mode) x_val = np.zeros((3, 2), dtype=config.floatX) assert f(x_val, 2).shape == (3, 2) with pytest.raises(AssertionError, match="SpecifyShape:.*"): f(xval, 3)
def test_perform(self): a = scalar() a.tag.test_value = 5 s_1 = iscalar("s_1") s_1.tag.test_value = 4 shape = (s_1, 1) bcast_res = broadcast_to(a, shape) assert bcast_res.broadcastable == (False, True) bcast_np = np.broadcast_to(5, (4, 1)) bcast_aet = bcast_res.get_test_value() assert np.array_equal(bcast_aet, bcast_np) assert np.shares_memory(bcast_aet, a.get_test_value())
def test_python_perform(self): """Test the Python `Op.perform` implementation.""" x = scalar() s = as_tensor_variable([], dtype=np.int32) y = specify_shape(x, s) f = aesara.function([x], y, mode=Mode("py")) assert f(12) == 12 x = vector() s1 = iscalar() shape = as_tensor_variable([s1]) y = specify_shape(x, shape) f = aesara.function([x, shape], y, mode=Mode("py")) assert f([1], (1, )) == [1] with pytest.raises(AssertionError, match="SpecifyShape:.*"): assert f([1], (2, )) == [1]
def test_infer_shape(self, test_offset): rng = np.random.default_rng(43) x = dmatrix() y = dscalar() z = iscalar() self._compile_and_check( [x, y, z], [self.op(x, y, z)], [rng.random((8, 5)), rng.random(), test_offset], self.op_class, ) self._compile_and_check( [x, y, z], [self.op(x, y, z)], [rng.random((5, 8)), rng.random(), test_offset], self.op_class, )
def test_dirichlet_infer_shape(): M_aet = iscalar("M") M_aet.tag.test_value = 3 test_params = [ ([aet.ones((M_aet, ))], None), ([aet.ones((M_aet, ))], (M_aet + 1, )), ([aet.ones((M_aet, ))], (2, M_aet)), ([aet.ones((M_aet, M_aet + 1))], None), ([aet.ones((M_aet, M_aet + 1))], (M_aet + 2, )), ([aet.ones((M_aet, M_aet + 1))], (2, M_aet + 2, M_aet + 3)), ] for args, size in test_params: rv = dirichlet(*args, size=size) rv_shape = tuple(dirichlet._infer_shape(size or (), args, None)) assert tuple(get_test_value(rv_shape)) == tuple( get_test_value(rv).shape)
def test_infer_shape(self): x = dmatrix() y = dscalar() z = iscalar() for test_offset in (-5, -4, -1, 0, 1, 4, 5): self._compile_and_check( [x, y, z], [self.op(x, y, z)], [np.random.random((8, 5)), np.random.random(), test_offset], self.op_class, ) self._compile_and_check( [x, y, z], [self.op(x, y, z)], [np.random.random((5, 8)), np.random.random(), test_offset], self.op_class, )
def test_local_dimshuffle_subtensor(): dimshuffle_subtensor = out2in(local_dimshuffle_subtensor) x = dtensor4("x") x = aet.patternbroadcast(x, (False, True, False, False)) i = iscalar("i") out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3) g = FunctionGraph([x, i], [out]) dimshuffle_subtensor(g) topo = g.toposort() assert any([not isinstance(x, DimShuffle) for x in topo]) # Test dimshuffle remove dimensions the subtensor don't "see". x = tensor(broadcastable=(False, True, False), dtype="float64") out = x[i].dimshuffle(1) g = FunctionGraph([x, i], [out]) dimshuffle_subtensor(g) topo = g.toposort() assert any([not isinstance(x, DimShuffle) for x in topo]) # Test dimshuffle remove dimensions the subtensor don't "see" but # have in between dimensions. x = tensor(broadcastable=(False, True, False, True), dtype="float64") out = x[i].dimshuffle(1) f = aesara.function([x, i], out) topo = f.maker.fgraph.toposort() assert any([not isinstance(x, DimShuffle) for x in topo]) assert f(np.random.rand(5, 1, 4, 1), 2).shape == (4, ) # Test a corner case that had Aesara return a bug. x = dtensor4("x") x = aet.patternbroadcast(x, (False, True, False, False)) assert x[:, :, 0:3, ::-1].dimshuffle(0, 2, 3).eval({ x: np.ones((5, 1, 6, 7)) }).shape == (5, 3, 7)
def test_perform(self, test_offset, shp): rng = np.random.default_rng(43) x = matrix() y = scalar() z = iscalar() f = function([x, y, z], fill_diagonal_offset(x, y, z)) a = rng.random(shp).astype(config.floatX) val = np.cast[config.floatX](rng.random()) out = f(a, val, test_offset) # We can't use np.fill_diagonal as it is bugged. assert np.allclose(np.diag(out, test_offset), val) if test_offset >= 0: assert (out == val).sum() == min(min(a.shape), a.shape[1] - test_offset) else: assert (out == val).sum() == min(min(a.shape), a.shape[0] + test_offset)
def test_perform(self): x = matrix() y = scalar() z = iscalar() f = function([x, y, z], fill_diagonal_offset(x, y, z)) for test_offset in (-5, -4, -1, 0, 1, 4, 5): for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]: a = np.random.rand(*shp).astype(config.floatX) val = np.cast[config.floatX](np.random.rand()) out = f(a, val, test_offset) # We can't use np.fill_diagonal as it is bugged. assert np.allclose(np.diag(out, test_offset), val) if test_offset >= 0: assert (out == val).sum() == min(min(a.shape), a.shape[1] - test_offset) else: assert (out == val).sum() == min(min(a.shape), a.shape[0] + test_offset)
def test_normal_ShapeFeature(): M_aet = iscalar("M") M_aet.tag.test_value = 3 sd_aet = scalar("sd") sd_aet.tag.test_value = np.array(1.0, dtype=config.floatX) d_rv = normal(aet.ones((M_aet, )), sd_aet, size=(2, M_aet)) d_rv.tag.test_value fg = FunctionGraph( [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)], [d_rv], clone=False, features=[ShapeFeature()], ) s1, s2 = fg.shape_feature.shape_of[d_rv] assert get_test_value(s1) == get_test_value(d_rv).shape[0] assert get_test_value(s2) == get_test_value(d_rv).shape[1]
def test_fail_select_alot(self): # Tests that multinomial_wo_replacement fails when asked to sample more # elements than the actual number of elements th_rng = RandomStream(12345) p = fmatrix() n = iscalar() m = th_rng.multinomial_wo_replacement(pvals=p, n=n) f = function([p, n], m, allow_input_downcast=True) n_elements = 100 n_selected = 200 np.random.seed(12345) pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) with pytest.raises(ValueError): f(pvals, n_selected)
def test_fail_select_alot(self): # Tests that ChoiceFromUniform fails when asked to sample more # elements than the actual number of elements p = fmatrix() u = fvector() n = iscalar() m = multinomial.ChoiceFromUniform(odtype="auto")(p, u, n) f = function([p, u, n], m, allow_input_downcast=True) n_elements = 100 n_selected = 200 np.random.seed(12345) uni = np.random.rand(n_selected).astype(config.floatX) pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) with pytest.raises(ValueError): f(pvals, uni, n_selected)
def test_scan_err1(self): # This test should fail when building fx for the first time k = iscalar("k") A = matrix("A") k.tag.test_value = 3 A.tag.test_value = np.random.random((5, 3)).astype(config.floatX) def fx(prior_result, A): return dot(prior_result, A) with pytest.raises(ValueError) as e: aesara.scan(fn=fx, outputs_info=at.ones_like(A), non_sequences=A, n_steps=k) assert str(e.traceback[0].path).endswith("test_compute_test_value.py") # We should be in the "fx" function defined above assert e.traceback[2].name == "fx"
def test_lazy_if(self): # Tests that lazy if works .. even if the two results have different # shapes but the same type (i.e. both vectors, or matrices or # whatnot of same dtype) x = vector("x", dtype=self.dtype) y = vector("y", dtype=self.dtype) c = iscalar("c") f = function([c, x, y], ifelse(c, x, y), mode=self.mode) self.assertFunctionContains1(f, self.get_ifelse(1)) rng = np.random.RandomState(utt.fetch_seed()) xlen = rng.randint(200) ylen = rng.randint(200) vx = np.asarray(rng.uniform(size=(xlen,)), self.dtype) vy = np.asarray(rng.uniform(size=(ylen,)), self.dtype) assert np.allclose(vx, f(1, vx, vy)) assert np.allclose(vy, f(0, vx, vy))
def test_mvnormal_ShapeFeature(): M_aet = iscalar("M") M_aet.tag.test_value = 2 d_rv = multivariate_normal(aet.ones((M_aet, )), aet.eye(M_aet), size=2) fg = FunctionGraph( [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)], [d_rv], clone=False, features=[ShapeFeature()], ) s1, s2 = fg.shape_feature.shape_of[d_rv] assert get_test_value(s1) == 2 assert M_aet in graph_inputs([s2]) # Test broadcasted shapes mean = tensor(config.floatX, [True, False]) mean.tag.test_value = np.array([[0, 1, 2]], dtype=config.floatX) test_covar = np.diag(np.array([1, 10, 100], dtype=config.floatX)) test_covar = np.stack([test_covar, test_covar * 10.0]) cov = aet.as_tensor(test_covar).type() cov.tag.test_value = test_covar d_rv = multivariate_normal(mean, cov, size=[2, 3]) fg = FunctionGraph( [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)], [d_rv], clone=False, features=[ShapeFeature()], ) s1, s2, s3, s4 = fg.shape_feature.shape_of[d_rv] assert s1.get_test_value() == 2 assert s2.get_test_value() == 3 assert s3.get_test_value() == 2 assert s4.get_test_value() == 3