def test_softmax_optimizations(self): x = matrix("x") one_of_n = lvector("one_of_n") op = crossentropy_categorical_1hot # xe = op(x, one_of_n) fgraph = FunctionGraph([x, one_of_n], [op(softmax_legacy(x), one_of_n)]) assert fgraph.outputs[0].owner.op == op optdb.query(OPT_FAST_RUN).optimize(fgraph) assert fgraph.outputs[ 0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
def test_neg_idx(self): admat = matrix() advec = vector() alvec = lvector() rng = np.random.default_rng(utt.fetch_seed()) admat_val = rng.random((3, 5)).astype(config.floatX) advec_val = rng.random((5)).astype(config.floatX) alvec_val = rng.integers(low=0, high=5, size=3) alvec_val[1] = -1 out = CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec) f = aesara.function([admat, advec, alvec], out) with pytest.raises(ValueError): f(admat_val, advec_val, alvec_val)
def test_opt_order(): """ Verify that scan optimizations are applied before blas optimizations. This is needed as otherwise, the dot won't become a dot22 so it will be slower and won't get transferred to the gpu. """ x = matrix("x") A = matrix("A") z, updates = scan(dot, sequences=[], non_sequences=[x, A], n_steps=2) f = function([x, A], z, mode="FAST_RUN") topo = f.maker.fgraph.toposort() assert any(isinstance(node.op, Dot22) for node in topo) vx = np.array([[1.0, 1.0], [2.0, 2.0]], dtype=config.floatX) vA = np.array([[1.0, 1.0], [1.0, 0.0]], dtype=config.floatX) vR = np.array([[[2, 1], [4, 2]], [[2, 1], [4, 2]]], dtype=config.floatX) utt.assert_allclose(f(vx, vA), vR)
def test_multiple_out_crash(self): # This test failed up to commit 2faeb62c38 p0 = self.shared(np.asarray(np.random.random([4, 8]), dtype=self.dtype)) p1 = self.shared(np.asarray(np.random.random(8), dtype=self.dtype)) p2 = self.shared(np.asarray(np.random.random([8, 3]), dtype=self.dtype)) p3 = self.shared(np.asarray(np.random.random(3), dtype=self.dtype)) p = [p0, p1, p2, p3] # in my code these vars are the result of applying scan ften0 = tensor3("ft0", dtype=self.dtype) fmat1 = matrix("fm1", dtype=self.dtype) ften2 = tensor3("ft2", dtype=self.dtype) fmat3 = matrix("fm3", dtype=self.dtype) # then I keep only the last iteration fsub0 = ften0[-1] fsub1 = fmat1[-1] fsub2 = ften2[-1] fsub3 = fmat3[-1] fsub = [fsub0, fsub1, fsub2, fsub3] acc = aet.constant(1, "int8") >= 0 new_positions = ifelse(acc, fsub, p) new_updates = [(p[0], new_positions[0])] f = function( [ften0, fmat1, ften2, fmat3], [], updates=new_updates, mode=self.mode ) self.assertFunctionContains1(f, self.get_ifelse(4)) i1 = np.asarray(np.random.random([19, 4, 8]), dtype=self.dtype) i2 = np.asarray(np.random.random([19, 8]), dtype=self.dtype) i3 = np.asarray(np.random.random([19, 8, 3]), dtype=self.dtype) i4 = np.asarray(np.random.random([19, 3]), dtype=self.dtype) f(i1, i2, i3, i4)
def make_node(self, A): A = as_tensor_variable(A) assert A.ndim == 2 expm = matrix(dtype=A.dtype) return Apply( self, [ A, ], [ expm, ], )
def run_gpu_solve(self, A_val, x_val, A_struct=None): b_val = np.dot(A_val, x_val) b_val_trans = np.dot(A_val.T, x_val) A = matrix("A", dtype="float32") b = matrix("b", dtype="float32") b_trans = matrix("b", dtype="float32") if A_struct is None: solver = gpu_solve(A, b) solver_trans = gpu_solve(A, b_trans, trans="T") else: solver = gpu_solve(A, b, A_struct) solver_trans = gpu_solve(A, b_trans, A_struct, trans="T") fn = aesara.function([A, b, b_trans], [solver, solver_trans], mode=mode_with_gpu) res = fn(A_val, b_val, b_val_trans) x_res = np.array(res[0]) x_res_trans = np.array(res[1]) utt.assert_allclose(x_val, x_res) utt.assert_allclose(x_val, x_res_trans)
def test_op(self, axis, cond, shape): cond_var = ivector() data = np.random.random(size=shape).astype(config.floatX) data_var = matrix() f = aesara.function([cond_var, data_var], self.op(cond_var, data_var, axis=axis)) expected = np.compress(cond, data, axis=axis) tested = f(cond, data) assert tested.shape == expected.shape assert np.allclose(tested, expected)
def test_correctness(self, lower): rng = np.random.default_rng(utt.fetch_seed()) b_val = np.asarray(rng.random((5, 1)), dtype=config.floatX) A_val = np.asarray(rng.random((5, 5)), dtype=config.floatX) A_val = np.dot(A_val.transpose(), A_val) C_val = scipy.linalg.cholesky(A_val, lower=lower) A = matrix() b = matrix() cholesky = Cholesky(lower=lower) C = cholesky(A) y_lower = solve_triangular(C, b, lower=lower) lower_solve_func = aesara.function([C, b], y_lower) assert np.allclose( scipy.linalg.solve_triangular(C_val, b_val, lower=lower), lower_solve_func(C_val, b_val), )
def check_l(m, k=0): m_symb = matrix(dtype=m.dtype) k_symb = iscalar() f = aesara.function([m_symb, k_symb], aet.tril(m_symb, k_symb), mode=mode_with_gpu) result = f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([ isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort() ])
def test_check_inputs(self): with pytest.raises(AssertionError, match="must be an integer type"): specify_shape([[1, 2, 3], [4, 5, 6]], (2.2, 3)) specify_shape([[1, 2, 3], [4, 5, 6]], (2, 3)) # Incompatible dimensionality is detected right away with pytest.raises(AssertionError, match="will never match"): specify_shape( matrix(), [ 4, ], )
def test_string_var(self): x = matrix("x") x.tag.test_value = np.random.random((3, 4)).astype(config.floatX) y = matrix("y") y.tag.test_value = np.random.random((4, 5)).astype(config.floatX) z = aesara.shared(np.random.random((5, 6)).astype(config.floatX)) # should work out = dot(dot(x, y), z) assert hasattr(out.tag, "test_value") tf = aesara.function([x, y], out) assert _allclose(tf(x.tag.test_value, y.tag.test_value), out.tag.test_value) def f(x, y, z): return dot(dot(x, y), z) # this test should fail z.set_value(np.random.random((7, 6)).astype(config.floatX)) with pytest.raises(ValueError): f(x, y, z)
def test_pushout_seqs2(self): x = matrix() outputs, updates = scan( lambda x: [x * x, at.constant(0).copy().copy()], n_steps=2, sequences=[], non_sequences=[], outputs_info=[x, None], ) # Compile an Aesara function where any optimization error will lead to # an exception being raised function([x], outputs, updates=updates)
def test_non_square_matrix(self): A = matrix("A", dtype=config.floatX) Q = matrix_power(A, 3) f = function([A], [Q]) a = np.array( [ [0.47497769, 0.81869379], [0.74387558, 0.31780172], [0.54381007, 0.28153101], ] ).astype(config.floatX) with pytest.raises(ValueError): f(a)
def make_node(self, x): x = as_tensor_variable(x) assert x.ndim == 2, "The input of qr function should be a matrix." in_dtype = x.type.numpy_dtype out_dtype = np.dtype(f"f{in_dtype.itemsize}") q = matrix(dtype=out_dtype) if self.mode != "raw": r = matrix(dtype=out_dtype) else: r = vector(dtype=out_dtype) if self.mode != "r": q = matrix(dtype=out_dtype) outputs = [q, r] else: outputs = [r] return Apply(self, [x], outputs)
def test_bad_number_of_shape(self): # Test that the number of dimensions provided is good specify_shape = SpecifyShape() x = vector() shape_vec = ivector() xval = np.random.random((2)).astype(config.floatX) with pytest.raises(AssertionError, match="will never match"): specify_shape(x, []) with pytest.raises(AssertionError, match="will never match"): specify_shape(x, [2, 2]) f = aesara.function([x, shape_vec], specify_shape(x, shape_vec), mode=self.mode) assert isinstance( [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0] .inputs[0] .type, self.input_type, ) expected = r"(Got 1 dimensions \(shape \(2,\)\), expected 0 dimensions with shape \(\).)" expected += r"|(Got 1 dimensions, expected 0 dimensions.)" with pytest.raises(AssertionError, match=expected): f(xval, []) expected = r"(Got 1 dimensions \(shape \(2,\)\), expected 2 dimensions with shape \(2, 2\).)" expected += r"|(SpecifyShape: Got 1 dimensions, expected 2 dimensions.)" with pytest.raises(AssertionError, match=expected): f(xval, [2, 2]) x = matrix() xval = np.random.random((2, 3)).astype(config.floatX) for shape_ in [(), (1,), (2, 3, 4)]: with pytest.raises(AssertionError, match="will never match"): specify_shape(x, shape_) f = aesara.function( [x, shape_vec], specify_shape(x, shape_vec), mode=self.mode ) assert isinstance( [ n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape) ][0] .inputs[0] .type, self.input_type, ) s_exp = str(shape_).replace("(", r"\(").replace(")", r"\)") expected = rf"(Got 2 dimensions \(shape \(2, 3\)\), expected {len(shape_)} dimensions with shape {s_exp}.)" expected += rf"|(SpecifyShape: Got 2 dimensions, expected {len(shape_)} dimensions.)" with pytest.raises(AssertionError, match=expected): f(xval, shape_)
def test_jax_CAReduce(): a_aet = vector("a") a_aet.tag.test_value = np.r_[1, 2, 3].astype(config.floatX) x = aet_sum(a_aet, axis=None) x_fg = FunctionGraph([a_aet], [x]) compare_jax_and_py(x_fg, [np.r_[1, 2, 3].astype(config.floatX)]) a_aet = matrix("a") a_aet.tag.test_value = np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX) x = aet_sum(a_aet, axis=0) x_fg = FunctionGraph([a_aet], [x]) compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)]) x = aet_sum(a_aet, axis=1) x_fg = FunctionGraph([a_aet], [x]) compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)]) a_aet = matrix("a") a_aet.tag.test_value = np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX) x = prod(a_aet, axis=0) x_fg = FunctionGraph([a_aet], [x]) compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)]) x = aet_all(a_aet) x_fg = FunctionGraph([a_aet], [x]) compare_jax_and_py(x_fg, [np.c_[[1, 2, 3], [1, 2, 3]].astype(config.floatX)])
def test_interface(self): mySymbolicMatricesList = TypedListType( TensorType(aesara.config.floatX, (False, False)))() myMatrix = matrix() z = mySymbolicMatricesList.count(myMatrix) f = aesara.function([mySymbolicMatricesList, myMatrix], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) assert f([x, y], y) == 1
def test_sanity_check(self): mySymbolicMatricesList = TypedListType( TensorType(aesara.config.floatX, (False, False)))() myMatrix = matrix() z = Count()(mySymbolicMatricesList, myMatrix) f = aesara.function([mySymbolicMatricesList, myMatrix], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) assert f([y, y, x, y], y) == 3
def test_sanity_check(self): mySymbolicMatricesList = TypedListType( TensorType(aesara.config.floatX, (False, False)))() myMatrix = matrix() z = Remove()(mySymbolicMatricesList, myMatrix) f = aesara.function([mySymbolicMatricesList, myMatrix], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) assert np.array_equal(f([x, y], y), [x])
def test_interfaces(self): mySymbolicMatricesList = TypedListType( TensorType(aesara.config.floatX, (False, False)))() myMatrix = matrix() z = mySymbolicMatricesList.append(myMatrix) f = aesara.function([mySymbolicMatricesList, myMatrix], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) assert np.array_equal(f([x], y), [x, y])
def test_sort(self): x = matrix() self._compile_and_check( [x], [sort(x)], [np.random.randn(10, 40).astype(aesara.config.floatX)], SortOp, ) self._compile_and_check( [x], [sort(x, axis=None)], [np.random.randn(10, 40).astype(aesara.config.floatX)], SortOp, )
def make_node(self, x, y, rcond): x = as_tensor_variable(x) y = as_tensor_variable(y) rcond = as_tensor_variable(rcond) return Apply( self, [x, y, rcond], [ matrix(), dvector(), lscalar(), dvector(), ], )
def test_multiple_outputs(self): m = matrix("m") v = vector("v") m_ = matrix("m_") v_ = vector("v_") mval = self.rng.uniform(size=(3, 7)).astype(aesara.config.floatX) vval = self.rng.uniform(size=(7, )).astype(aesara.config.floatX) m_val = self.rng.uniform(size=(3, 7)).astype(aesara.config.floatX) v_val = self.rng.uniform(size=(7, )).astype(aesara.config.floatX) rop_out1 = Rop([m, v, m + v], [m, v], [m_, v_]) assert isinstance(rop_out1, list) assert len(rop_out1) == 3 rop_out2 = Rop((m, v, m + v), [m, v], [m_, v_]) assert isinstance(rop_out2, tuple) assert len(rop_out2) == 3 all_outs = [] for o in rop_out1, rop_out2: all_outs.extend(o) f = aesara.function([m, v, m_, v_], all_outs) f(mval, vval, m_val, v_val)
def test_local_reshape_dimshuffle(): reshape_dimshuffle = out2in(local_reshape_dimshuffle) x = matrix("x") y = x.dimshuffle("x", 0, "x", 1) out = reshape(y, (1, x.shape[0] * x.shape[1], 1)) g = FunctionGraph([x], [out]) reshape_dimshuffle(g) topo = g.toposort() assert any([not isinstance(x, DimShuffle) for x in topo])
def test_Nin_Nout(self): # Test grad is called correctly for a many-to-many op gval0 = matrix() gval1 = matrix() class TestOp(Op): __props__ = () def make_node(self): inputs = [matrix(), matrix()] outputs = [matrix(), matrix()] return Apply(self, inputs, outputs) def grad(self, inp, grads): return gval0, gval1 def perform(self, *args, **kwargs): raise NotImplementedError() a1 = TestOp().make_node() g = grad_sources_inputs([(a1.outputs[0], one)], None) assert g[a1.inputs[0]] is gval0 assert g[a1.inputs[1]] is gval1
def test_infer_shape(self): admat = matrix() advec = vector() alvec = lvector() rng = np.random.default_rng(utt.fetch_seed()) admat_val = rng.random((3, 5)).astype(config.floatX) advec_val = rng.random((5)).astype(config.floatX) alvec_val = rng.integers(low=0, high=5, size=3) self._compile_and_check( [admat, advec, alvec], CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec), [admat_val, advec_val, alvec_val], CrossentropySoftmaxArgmax1HotWithBias, )
def test_elu(): x = matrix("x") seed = utt.fetch_seed() rng = np.random.default_rng(seed) X = rng.standard_normal((20, 30)).astype(config.floatX) # test the base case, without custom alpha value y = elu(x).eval({x: X}) utt.assert_allclose(y, np.where(X > 0, X, np.exp(X) - 1)) # test for different constant alpha values for alpha in 1.5, 2, -1, -1.5, -2: y = elu(x, alpha).eval({x: X}) utt.assert_allclose(y, np.where(X > 0, X, alpha * (np.exp(X) - 1)))
def test_local_sampling_dot_csr(): mode = get_default_mode() mode = mode.including("specialize", "local_sampling_dot_csr") for sp_format in ["csr"]: # Not implemented for other format inputs = [ matrix(), matrix(), getattr(aesara.sparse, sp_format + "_matrix")(), ] f = aesara.function(inputs, sparse.sampling_dot(*inputs), mode=mode) if aesara.config.blas__ldflags: assert not any( isinstance(node.op, sparse.SamplingDot) for node in f.maker.fgraph.toposort()) else: # SamplingDotCSR's C implementation needs blas, so it should not # be inserted assert not any( isinstance(node.op, sparse.opt.SamplingDotCSR) for node in f.maker.fgraph.toposort())
def test_infer_shape(self): advec = vector() admat = matrix() alvec = lvector() rng = np.random.default_rng(utt.fetch_seed()) advec_val = rng.random((3)).astype(config.floatX) admat_val = rng.random((3, 2)).astype(config.floatX) alvec_val = [0, 1, 0] self._compile_and_check( [advec, admat, alvec], [CrossentropyCategorical1HotGrad()(advec, admat, alvec)], [advec_val, admat_val, alvec_val], CrossentropyCategorical1HotGrad, )
def test_pushout(self): W1 = matrix("W1") W2 = matrix("W2") h0 = vector("h0") def lambda_fn(h, W1, W2): return dot(h, W1 + W2) o, _ = scan(lambda_fn, outputs_info=h0, non_sequences=[W1, W2], n_steps=5) f = function([h0, W1, W2], o, mode=self.mode) scan_node = [x for x in f.maker.fgraph.toposort() if isinstance(x.op, Scan)][0] assert ( len( [ x for x in scan_node.op.fn.maker.fgraph.toposort() if isinstance(x.op, Elemwise) ] ) == 0 )