def test_compute_flag(self): x = tt.matrix("x") y = tt.matrix("y") y.tag.test_value = np.random.rand(4, 5).astype(config.floatX) # should skip computation of test value aesara.config.compute_test_value = "off" z = tt.dot(x, y) assert not hasattr(z.tag, "test_value") # should fail when asked by user aesara.config.compute_test_value = "raise" with pytest.raises(ValueError): tt.dot(x, y) # test that a warning is raised if required aesara.config.compute_test_value = "warn" warnings.simplefilter("error", UserWarning) try: with pytest.raises(UserWarning): tt.dot(x, y) finally: # Restore the default behavior. # TODO There is a cleaner way to do this in Python 2.6, once # Aesara drops support of Python 2.4 and 2.5. warnings.simplefilter("default", UserWarning)
def est_both_assert_merge_2(self): # Merge two nodes, both have assert on different node x1 = tt.matrix("x1") x2 = tt.matrix("x2") x3 = tt.matrix("x3") e = tt.dot(tt.opt.assert_op(x1, (x1 > x3).all()), x2) + tt.dot( x1, tt.opt.assert_op(x2, (x2 > x3).all())) g = FunctionGraph([x1, x2, x3], [e]) MergeOptimizer().optimize(g) strg = aesara.printing.debugprint(g, file="str") strref = """Elemwise{add,no_inplace} [id A] '' 7 |dot [id B] '' 6 | |Assert{msg='Aesara Assert failed!'} [id C] '' 5 | | |x1 [id D] | | |All [id E] '' 3 | | |Elemwise{gt,no_inplace} [id F] '' 1 | | |x1 [id D] | | |x3 [id G] | |Assert{msg='Aesara Assert failed!'} [id H] '' 4 | |x2 [id I] | |All [id J] '' 2 | |Elemwise{gt,no_inplace} [id K] '' 0 | |x2 [id I] | |x3 [id G] |dot [id B] '' 6 """ # print(strg) assert strg == strref, (strg, strref)
def test_min_informative_str(): # evaluates a reference output to make sure the # min_informative_str function works as intended A = tensor.matrix(name="A") B = tensor.matrix(name="B") C = A + B C.name = "C" D = tensor.matrix(name="D") E = tensor.matrix(name="E") F = D + E G = C + F mis = min_informative_str(G).replace("\t", " ") reference = """A. Elemwise{add,no_inplace} B. C C. Elemwise{add,no_inplace} D. D E. E""" if mis != reference: print("--" + mis + "--") print("--" + reference + "--") assert mis == reference
def test_solve_dtype(self): pytest.importorskip("scipy") dtypes = [ "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64", "float16", "float32", "float64", ] A_val = np.eye(2) b_val = np.ones((2, 1)) # try all dtype combinations for A_dtype, b_dtype in itertools.product(dtypes, dtypes): A = tensor.matrix(dtype=A_dtype) b = tensor.matrix(dtype=b_dtype) x = solve(A, b) fn = function([A, b], x) x_result = fn(A_val.astype(A_dtype), b_val.astype(b_dtype)) assert x.dtype == x_result.dtype
def test_bug_josh_reported(self): # Test refers to a bug reported by Josh, when due to a bad merge these # few lines of code failed. See # http://groups.google.com/group/aesara-dev/browse_thread/thread/8856e7ca5035eecb m1 = tt.matrix() m2 = tt.matrix() conv.conv2d(m1, m2)
def test_wrong_dims(self): a = tt.matrix() increment = tt.matrix() index = 0 with pytest.raises(TypeError): tt.set_subtensor(a[index], increment) with pytest.raises(TypeError): tt.inc_subtensor(a[index], increment)
def test_overided_function(self): # We need to test those as they mess with Exception # And we don't want the exception to be changed. x = tt.matrix() x.tag.test_value = np.zeros((2, 3), dtype=config.floatX) y = tt.matrix() y.tag.test_value = np.zeros((2, 2), dtype=config.floatX) with pytest.raises(ValueError): x.__mul__(y)
def setup_method(self): utt.seed_rng() # Using vectors make things a lot simpler for generating the same # computations using scan self.x = tensor.vector("x") self.v = tensor.vector("v") self.rng = np.random.RandomState(utt.fetch_seed()) self.in_shape = (5 + self.rng.randint(3),) self.mx = tensor.matrix("mx") self.mv = tensor.matrix("mv") self.mat_in_shape = (5 + self.rng.randint(3), 5 + self.rng.randint(3))
def test_binomial(): # TODO: test size=None, ndim=X # TODO: test size=X, ndim!=X.ndim # TODO: test random seed in legal value(!=0 and other) # TODO: test sample_size not a multiple of guessed #streams # TODO: test size=Var, with shape that change from call to call # we test size in a tuple of int and a tensor.shape. # we test the param p with int. if (config.mode in ["DEBUG_MODE", "DebugMode", "FAST_COMPILE"] or config.mode == "Mode" and config.linker in ["py"]): sample_size = (10, 50) steps = 50 rtol = 0.02 else: sample_size = (500, 50) steps = int(1e3) rtol = 0.01 x = tensor.matrix() for mean in [0.1, 0.5]: for size, const_size, var_input, input in [ (sample_size, sample_size, [], []), (x.shape, sample_size, [x], [np.zeros(sample_size, dtype=config.floatX)]), # test empty size (scalar) ((), (), [], []), ]: check_binomial(mean, size, const_size, var_input, input, steps, rtol)
def test_deepcopied_type_filter(): a = copy.deepcopy(tensor.matrix()) # The following should run cleanly. # As of commit 731e2d2fa68487733320d341d08b454a50c90d12 # it was failing. a.type.filter(np.ones((2, 2), dtype=a.dtype), strict=True)
def test_det(): rng = np.random.RandomState(utt.fetch_seed()) r = rng.randn(5, 5).astype(config.floatX) x = tensor.matrix() f = aesara.function([x], det(x)) assert np.allclose(np.linalg.det(r), f(r))
def test_inverse_singular(): singular = np.array([[1, 0, 0]] + [[0, 1, 0]] * 2, dtype=aesara.config.floatX) a = tensor.matrix() f = function([a], matrix_inverse(a)) with pytest.raises(np.linalg.LinAlgError): f(singular)
def test_qr_modes(): rng = np.random.RandomState(utt.fetch_seed()) A = tensor.matrix("A", dtype=aesara.config.floatX) a = rng.rand(4, 4).astype(aesara.config.floatX) f = function([A], qr(A)) t_qr = f(a) n_qr = np.linalg.qr(a) assert _allclose(n_qr, t_qr) for mode in ["reduced", "r", "raw"]: f = function([A], qr(A, mode)) t_qr = f(a) n_qr = np.linalg.qr(a, mode) if isinstance(n_qr, (list, tuple)): assert _allclose(n_qr[0], t_qr[0]) assert _allclose(n_qr[1], t_qr[1]) else: assert _allclose(n_qr, t_qr) try: n_qr = np.linalg.qr(a, "complete") f = function([A], qr(A, "complete")) t_qr = f(a) assert _allclose(n_qr, t_qr) except TypeError as e: assert "name 'complete' is not defined" in str(e)
def test_wrong_input(self): mySymbolicMatricesList = TypedListType( tt.TensorType(aesara.config.floatX, (False, False)))() mySymbolicMatrix = tt.matrix() with pytest.raises(TypeError): GetItem()(mySymbolicMatricesList, mySymbolicMatrix)
def test_dot_not_output(self): # Test the case where the vector input to the dot is not already an # output of the inner function. v = tt.vector() m = tt.matrix() output = tt.dot(v, m) # Compile the function twice, once with the optimization and once # without opt_mode = mode.including("scan") f_opt = aesara.function([v, m], tt.jacobian(output, v), mode=opt_mode) no_opt_mode = mode.excluding("scanOp_pushout_output") f_no_opt = aesara.function([v, m], tt.jacobian(output, v), mode=no_opt_mode) # Ensure that the optimization was performed correctly in f_opt # The inner function of scan should have only one output and it should # not be the result of a Dot scan_node = [ node for node in f_opt.maker.fgraph.toposort() if isinstance(node.op, Scan) ][0] assert len(scan_node.op.outputs) == 1 assert not isinstance(scan_node.op.outputs[0], tt.Dot) # Ensure that the function compiled with the optimization produces # the same results as the function compiled without v_value = np.random.random(4).astype(config.floatX) m_value = np.random.random((4, 5)).astype(config.floatX) output_opt = f_opt(v_value, m_value) output_no_opt = f_no_opt(v_value, m_value) utt.assert_allclose(output_opt, output_no_opt)
def test_optimization_min(self): data = np.asarray(np.random.rand(2, 3), dtype=config.floatX) n = tensor.matrix() for axis in [0, 1, -1]: f = function([n], tensor.min(n, axis), mode=self.mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, CAReduce) f(data) # test variant with neg to make sure we optimize correctly f = function([n], tensor.min(-n, axis), mode=self.mode) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, CAReduce) # max assert isinstance(topo[1].op, Elemwise) assert isinstance(topo[1].op.scalar_op, scalar.Neg) f(data) f = function([n], -tensor.min(n, axis), mode=self.mode) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, Elemwise) assert isinstance(topo[0].op.scalar_op, scalar.Neg) assert isinstance(topo[1].op, CAReduce) # max f(data) f = function([n], -tensor.min(-n, axis), mode=self.mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, CAReduce) # max f(data)
def test_pydotprint_profile(): A = tensor.matrix() prof = aesara.compile.ProfileStats(atexit_print=False, gpu_checks=False) f = aesara.function([A], A + 1, profile=prof) aesara.printing.pydotprint(f, print_output_file=False) f([[1]]) aesara.printing.pydotprint(f, print_output_file=False)
def create_test_hmm(): srng = at.random.RandomStream() N_tt = at.iscalar("N") N_tt.tag.test_value = 10 M_tt = at.iscalar("M") M_tt.tag.test_value = 2 mus_tt = at.matrix("mus") mus_tt.tag.test_value = np.stack( [np.arange(0.0, 10), np.arange(0.0, -10, -1)], axis=-1).astype(aesara.config.floatX) sigmas_tt = at.ones((N_tt, )) sigmas_tt.name = "sigmas" pi_0_rv = srng.dirichlet(at.ones((M_tt, )), name="pi_0") Gamma_rv = srng.dirichlet(at.ones((M_tt, M_tt)), name="Gamma") S_0_rv = srng.categorical(pi_0_rv, name="S_0") def scan_fn(mus_t, sigma_t, S_tm1, Gamma_t): S_t = srng.categorical(Gamma_t[S_tm1], name="S_t") Y_t = srng.normal(mus_t[S_t], sigma_t, name="Y_t") return S_t, Y_t (S_rv, Y_rv), scan_updates = aesara.scan( fn=scan_fn, sequences=[mus_tt, sigmas_tt], non_sequences=[Gamma_rv], outputs_info=[{ "initial": S_0_rv, "taps": [-1] }, {}], strict=True, name="scan_rv", ) Y_rv.name = "Y_rv" scan_op = Y_rv.owner.op scan_args = ScanArgs.from_node(Y_rv.owner) Gamma_in = scan_args.inner_in_non_seqs[0] Y_t = scan_args.inner_out_nit_sot[0] mus_t = scan_args.inner_in_seqs[0] sigmas_t = scan_args.inner_in_seqs[1] S_t = scan_args.inner_out_sit_sot[0] rng_in = scan_args.inner_out_shared[0] mus_in = Y_rv.owner.inputs[1] mus_in.name = "mus_in" sigmas_in = Y_rv.owner.inputs[2] sigmas_in.name = "sigmas_in" # The output `S_rv` is really `S_rv[1:]`, so we have to extract the actual # `Scan` output: `S_rv`. S_in = S_rv.owner.inputs[0] S_in.name = "S_in" return locals()
def test_masked_input(self): m = tt.matrix("m") mt = m.T mt.name = "m.T" with pytest.raises(UnusedInputError): function([m, mt], mt * 2) function([m, mt], mt * 2, on_unused_input="ignore")
def setUp(self): extra1 = at.iscalar("extra1") extra1_ = np.array(0, dtype=extra1.dtype) extra1.dshape = tuple() extra1.dsize = 1 val1 = at.vector("val1") val1_ = np.zeros(3, dtype=val1.dtype) val1.dshape = (3, ) val1.dsize = 3 val2 = at.matrix("val2") val2_ = np.zeros((2, 3), dtype=val2.dtype) val2.dshape = (2, 3) val2.dsize = 6 self.val1, self.val1_ = val1, val1_ self.val2, self.val2_ = val2, val2_ self.extra1, self.extra1_ = extra1, extra1_ self.cost = extra1 * val1.sum() + val2.sum() self.f_grad = ValueGradFunction([self.cost], [val1, val2], {extra1: extra1_}, mode="FAST_COMPILE")
def test__getitem__AdvancedSubtensor(): # Make sure we get `AdvancedSubtensor`s for basic indexing operations x = tt.matrix("x") i = tt.ivector("i") # This is a `__getitem__` call that's redirected to `_tensor_py_operators.take` z = x[i] op_types = [type(node.op) for node in aesara.gof.graph.io_toposort([x, i], [z])] assert op_types[-1] == AdvancedSubtensor1 # This should index nothing (i.e. return an empty copy of `x`) # We check that the index is empty z = x[[]] op_types = [type(node.op) for node in aesara.gof.graph.io_toposort([x, i], [z])] assert op_types == [AdvancedSubtensor1] assert isinstance(z.owner.inputs[1], TensorConstant) # This is also a `__getitem__` call that's redirected to `_tensor_py_operators.take` z = x[:, i] op_types = [type(node.op) for node in aesara.gof.graph.io_toposort([x, i], [z])] assert op_types == [DimShuffle, AdvancedSubtensor1, DimShuffle] z = x[..., i, None] op_types = [type(node.op) for node in aesara.gof.graph.io_toposort([x, i], [z])] assert op_types == [MakeSlice, AdvancedSubtensor] z = x[i, None] op_types = [type(node.op) for node in aesara.gof.graph.io_toposort([x, i], [z])] assert op_types[-1] == AdvancedSubtensor
def test_modes(self): # this is a quick test after the LazyLinker branch merge # to check that all the current modes can still be used. linker_classes_involved = [] predef_modes = ["FAST_COMPILE", "FAST_RUN", "DEBUG_MODE"] # Linkers to use with regular Mode if aesara.config.cxx: linkers = [ "py", "c|py", "c|py_nogc", "vm", "vm_nogc", "cvm", "cvm_nogc" ] else: linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc"] modes = predef_modes + [Mode(linker, "fast_run") for linker in linkers] for mode in modes: x = tt.matrix() y = tt.vector() f = aesara.function([x, y], x + y, mode=mode) # test that it runs something f([[1, 2], [3, 4]], [5, 6]) linker_classes_involved.append(f.maker.mode.linker.__class__) # print 'MODE:', mode, f.maker.mode.linker, 'stop' # regression check: # there should be # - VM_Linker # - OpWiseCLinker (FAST_RUN) # - PerformLinker (FAST_COMPILE) # - DebugMode's Linker (DEBUG_MODE) assert 4 == len(set(linker_classes_involved))
def test_logp(self): np.random.seed(42) chol_val = floatX(np.array([[1, 0.9], [0, 2]])) cov_val = floatX(np.dot(chol_val, chol_val.T)) cov = at.matrix("cov") cov.tag.test_value = cov_val delta_val = floatX(np.random.randn(5, 2)) delta = at.matrix("delta") delta.tag.test_value = delta_val expect = stats.multivariate_normal(mean=np.zeros(2), cov=cov_val) expect = expect.logpdf(delta_val).sum() logp = MvNormalLogp()(cov, delta) logp_f = aesara.function([cov, delta], logp) logp = logp_f(cov_val, delta_val) npt.assert_allclose(logp, expect)
def __init__(self, z0=None, dim=None, jitter=0.001, batch_size=None, local=False): self.local = local self.batch_size = batch_size self.__jitter = jitter if isinstance(z0, AbstractFlow): parent = z0 dim = parent.dim z0 = parent.forward else: parent = None if dim is not None: self.dim = dim else: raise ValueError("Cannot infer dimension of flow, " "please provide dim or Flow instance as z0") if z0 is None: self.z0 = aet.matrix() # type: TensorVariable else: self.z0 = aet.as_tensor(z0) self.parent = parent
def test_can_not_infer_nb_dim(self): # Was reported in gh-5613. Test that we do not crash # or that we crash in a few other case found while # investigating that case img = tt.tensor4("img") patches = tt.nnet.neighbours.images2neibs(img, [16, 16]) extractPatches = aesara.function([img], patches, mode=self.mode) patsRecovery = tt.matrix("patsRecovery") original_size = tt.ivector("original_size") for mode in ["valid", "ignore_borders"]: out = neibs2images(patsRecovery, (16, 16), original_size, mode=mode) f = aesara.function([patsRecovery, original_size], out, mode=self.mode) im_val = np.ones((1, 3, 320, 320), dtype=np.float32) neibs = extractPatches(im_val) f(neibs, im_val.shape) # Wrong number of dimensions with pytest.raises(ValueError): f(neibs, (1, 1, 3, 320, 320)) # End up with a step of 0 # This can lead to division by zero in DebugMode with pytest.raises((ValueError, ZeroDivisionError)): f(neibs, (3, 320, 320, 1))
def test_expand_packed_triangular(): with pytest.raises(ValueError): x = at.matrix("x") x.tag.test_value = np.array([[1.0]], dtype=aesara.config.floatX) expand_packed_triangular(5, x) N = 5 packed = at.vector("packed") packed.tag.test_value = floatX(np.zeros(N * (N + 1) // 2)) with pytest.raises(TypeError): expand_packed_triangular(packed.shape[0], packed) np.random.seed(42) vals = np.random.randn(N, N) lower = floatX(np.tril(vals)) lower_packed = floatX(vals[lower != 0]) upper = floatX(np.triu(vals)) upper_packed = floatX(vals[upper != 0]) expand_lower = expand_packed_triangular(N, packed, lower=True) expand_upper = expand_packed_triangular(N, packed, lower=False) expand_diag_lower = expand_packed_triangular(N, packed, lower=True, diagonal_only=True) expand_diag_upper = expand_packed_triangular(N, packed, lower=False, diagonal_only=True) assert np.all(expand_lower.eval({packed: lower_packed}) == lower) assert np.all(expand_upper.eval({packed: upper_packed}) == upper) assert np.all( expand_diag_lower.eval({packed: lower_packed}) == floatX(np.diag( vals))) assert np.all( expand_diag_upper.eval({packed: upper_packed}) == floatX(np.diag( vals)))
def test_extractdiag_opt(self): x = tensor.matrix() fn = aesara.function([x], tensor.ExtractDiag()(x), mode=mode_with_gpu) assert any([ isinstance(node.op, GpuExtractDiag) for node in fn.maker.fgraph.toposort() ])
def test_insert_inplace(self): mySymbolicMatricesList = TypedListType( tt.TensorType(aesara.config.floatX, (False, False)))() mySymbolicIndex = tt.scalar(dtype="int64") mySymbolicMatrix = tt.matrix() z = Insert()(mySymbolicMatricesList, mySymbolicIndex, mySymbolicMatrix) m = aesara.compile.mode.get_default_mode().including( "typed_list_inplace_opt") f = aesara.function( [ In(mySymbolicMatricesList, borrow=True, mutable=True), mySymbolicIndex, mySymbolicMatrix, ], z, accept_inplace=True, mode=m, ) assert f.maker.fgraph.toposort()[0].op.inplace x = rand_ranged(-1000, 1000, [100, 101]) y = rand_ranged(-1000, 1000, [100, 101]) assert np.array_equal(f([x], np.asarray(1, dtype="int64"), y), [x, y])
def test_numpy_compare(self, n): a = np.array([[0.1231101, 0.72381381], [0.28748201, 0.43036511]]).astype(aesara.config.floatX) A = tensor.matrix("A", dtype=aesara.config.floatX) A.tag.test_value = a Q = matrix_power(A, n) n_p = np.linalg.matrix_power(a, n) assert np.allclose(n_p, Q.get_test_value())
def test_infer_shape(self): r = self.rng.randn(4, 4).astype(aesara.config.floatX) x = tensor.matrix() xi = self.op(x) self._compile_and_check([x], [xi], [r], self.op_class, warn=False)