def test_multMatVect(): A1 = tensor.lmatrix("A1") s1 = tensor.ivector("s1") m1 = tensor.iscalar("m1") A2 = tensor.lmatrix("A2") s2 = tensor.ivector("s2") m2 = tensor.iscalar("m2") g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2) f0 = aesara.function([A1, s1, m1, A2, s2, m2], g0) i32max = np.iinfo(np.int32).max A1 = np.random.randint(0, i32max, (3, 3)).astype("int64") s1 = np.random.randint(0, i32max, 3).astype("int32") m1 = np.asarray(np.random.randint(i32max), dtype="int32") A2 = np.random.randint(0, i32max, (3, 3)).astype("int64") s2 = np.random.randint(0, i32max, 3).astype("int32") m2 = np.asarray(np.random.randint(i32max), dtype="int32") f0.input_storage[0].storage[0] = A1 f0.input_storage[1].storage[0] = s1 f0.input_storage[2].storage[0] = m1 f0.input_storage[3].storage[0] = A2 f0.input_storage[4].storage[0] = s2 f0.input_storage[5].storage[0] = m2 r_a1 = rng_mrg.matVecModM(A1, s1, m1) r_a2 = rng_mrg.matVecModM(A2, s2, m2) f0.fn() r_b = f0.output_storage[0].value assert np.allclose(r_a1, r_b[:3]) assert np.allclose(r_a2, r_b[3:])
def multMatVect(v, A, m1, B, m2): # TODO : need description for parameter and return """ Multiply the first half of v by A with a modulo of m1 and the second half by B with a modulo of m2. Notes ----- The parameters of dot_modulo are passed implicitly because passing them explicitly takes more time than running the function's C-code. """ if multMatVect.dot_modulo is None: A_sym = tensor.lmatrix("A") s_sym = tensor.ivector("s") m_sym = tensor.iscalar("m") A2_sym = tensor.lmatrix("A2") s2_sym = tensor.ivector("s2") m2_sym = tensor.iscalar("m2") o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym) multMatVect.dot_modulo = function( [A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False) # This way of calling the Aesara fct is done to bypass Aesara overhead. f = multMatVect.dot_modulo f.input_storage[0].storage[0] = A f.input_storage[1].storage[0] = v[:3] f.input_storage[2].storage[0] = m1 f.input_storage[3].storage[0] = B f.input_storage[4].storage[0] = v[3:] f.input_storage[5].storage[0] = m2 f.fn() r = f.output_storage[0].storage[0] return r
def create_test_hmm(): srng = at.random.RandomStream() N_tt = at.iscalar("N") N_tt.tag.test_value = 10 M_tt = at.iscalar("M") M_tt.tag.test_value = 2 mus_tt = at.matrix("mus") mus_tt.tag.test_value = np.stack( [np.arange(0.0, 10), np.arange(0.0, -10, -1)], axis=-1).astype(aesara.config.floatX) sigmas_tt = at.ones((N_tt, )) sigmas_tt.name = "sigmas" pi_0_rv = srng.dirichlet(at.ones((M_tt, )), name="pi_0") Gamma_rv = srng.dirichlet(at.ones((M_tt, M_tt)), name="Gamma") S_0_rv = srng.categorical(pi_0_rv, name="S_0") def scan_fn(mus_t, sigma_t, S_tm1, Gamma_t): S_t = srng.categorical(Gamma_t[S_tm1], name="S_t") Y_t = srng.normal(mus_t[S_t], sigma_t, name="Y_t") return S_t, Y_t (S_rv, Y_rv), scan_updates = aesara.scan( fn=scan_fn, sequences=[mus_tt, sigmas_tt], non_sequences=[Gamma_rv], outputs_info=[{ "initial": S_0_rv, "taps": [-1] }, {}], strict=True, name="scan_rv", ) Y_rv.name = "Y_rv" scan_op = Y_rv.owner.op scan_args = ScanArgs.from_node(Y_rv.owner) Gamma_in = scan_args.inner_in_non_seqs[0] Y_t = scan_args.inner_out_nit_sot[0] mus_t = scan_args.inner_in_seqs[0] sigmas_t = scan_args.inner_in_seqs[1] S_t = scan_args.inner_out_sit_sot[0] rng_in = scan_args.inner_out_shared[0] mus_in = Y_rv.owner.inputs[1] mus_in.name = "mus_in" sigmas_in = Y_rv.owner.inputs[2] sigmas_in.name = "sigmas_in" # The output `S_rv` is really `S_rv[1:]`, so we have to extract the actual # `Scan` output: `S_rv`. S_in = S_rv.owner.inputs[0] S_in.name = "S_in" return locals()
def test_swap_SharedVariable_with_given(self): # A special testcase for logistic_sgd.py in Deep Learning Tutorial # This test assert that SharedVariable in different function have same storage train_x = aesara.shared(value=np.random.rand(10, 10).astype(config.floatX)) test_x = aesara.shared(value=np.random.rand(10, 10).astype(config.floatX)) train_y = aesara.shared(value=np.random.rand(10, 1).astype(config.floatX)) test_y = aesara.shared(value=np.random.rand(10, 1).astype(config.floatX)) i = tt.iscalar("index") x = tt.vector("x") y = tt.vector("y") # this formular has no sense but for a test out = (tt.sum(x) - y) ** 2 train = aesara.function( [i], out, givens={x: train_x[i], y: train_y[i]}, updates={train_x: train_x + 0.1}, ) test_def = aesara.function([i], out, givens={x: test_x[i], y: test_y[i]}) test_cpy = train.copy( swap={train_x: test_x, train_y: test_y}, delete_updates=True ) for in1, in2 in zip(test_def.maker.inputs, test_cpy.maker.inputs): assert in1.value is in2.value
def test_select_proportional_to_weight(self): # Tests that ChoiceFromUniform selects elements, on average, # proportional to the their probabilities p = tensor.fmatrix() u = tensor.fvector() n = tensor.iscalar() m = multinomial.ChoiceFromUniform(odtype="auto")(p, u, n) f = function([p, u, n], m, allow_input_downcast=True) n_elements = 100 n_selected = 10 mean_rtol = 0.0005 np.random.seed(12345) pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) avg_pvals = np.zeros((n_elements, ), dtype=config.floatX) for rep in range(10000): uni = np.random.rand(n_selected).astype(config.floatX) res = f(pvals, uni, n_selected) res = np.squeeze(res) avg_pvals[res] += 1 avg_pvals /= avg_pvals.sum() avg_diff = np.mean(abs(avg_pvals - pvals)) assert avg_diff < mean_rtol, avg_diff
def test_merge_ifs_true_false(self): x1 = tensor.scalar("x1") x2 = tensor.scalar("x2") y1 = tensor.scalar("y1") y2 = tensor.scalar("y2") w1 = tensor.scalar("w1") w2 = tensor.scalar("w2") c = tensor.iscalar("c") out = ifelse( c, ifelse(c, x1, x2) + ifelse(c, y1, y2) + w1, ifelse(c, x1, x2) + ifelse(c, y1, y2) + w2, ) f = aesara.function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True) assert (len([ x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse) ]) == 1) rng = np.random.RandomState(utt.fetch_seed()) vx1 = rng.uniform() vx2 = rng.uniform() vy1 = rng.uniform() vy2 = rng.uniform() vw1 = rng.uniform() vw2 = rng.uniform() assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1), vx1 + vy1 + vw1) assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0), vx2 + vy2 + vw2)
def test_not_lazy_if_inplace(self): # Tests that if the outputs are scalars and the graph is big, # we disable the inplace opt to speed up optimization x = tensor.vector("x", dtype=self.dtype) y = tensor.vector("y", dtype=self.dtype) c = tensor.iscalar("c") mode = aesara.compile.get_mode(self.mode).excluding( # Disable many opt to keep the graph big enough to disable # the opt. "fusion", "local_add_canonizer", "inplace", "constant_folding", "constant_folding", ) y2 = reduce(lambda x, y: x + y, [y] + list(range(200))) f = aesara.function([c, x, y], ifelse(c, x, y2), mode=mode) # For not inplace ifelse ifnode = [ n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse) ] assert len(ifnode) == 1 assert not ifnode[0].op.as_view rng = np.random.RandomState(utt.fetch_seed()) xlen = rng.randint(200) ylen = rng.randint(200) vx = np.asarray(rng.uniform(size=(xlen, )), self.dtype) vy = np.asarray(rng.uniform(size=(ylen, )), self.dtype) assert np.allclose(vx, f(1, vx, vy)) assert np.allclose(vy + sum(range(200)), f(0, vx, vy))
def test_select_proportional_to_weight(self): # Tests that multinomial_wo_replacement selects elements, on average, # proportional to the their probabilities th_rng = RandomStreams(12345) p = tensor.fmatrix() n = tensor.iscalar() m = th_rng.multinomial_wo_replacement(pvals=p, n=n) f = function([p, n], m, allow_input_downcast=True) n_elements = 100 n_selected = 10 mean_rtol = 0.0005 np.random.seed(12345) pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX) pvals /= pvals.sum(1) avg_pvals = np.zeros((n_elements, ), dtype=config.floatX) for rep in range(10000): res = f(pvals, n_selected) res = np.squeeze(res) avg_pvals[res] += 1 avg_pvals /= avg_pvals.sum() avg_diff = np.mean(abs(avg_pvals - pvals)) assert avg_diff < mean_rtol
def test_multiple_out(self): x1 = tensor.vector("x1", dtype=self.dtype) x2 = tensor.vector("x2", dtype=self.dtype) y1 = tensor.vector("y1", dtype=self.dtype) y2 = tensor.vector("y2", dtype=self.dtype) c = tensor.iscalar("c") z = ifelse(c, (x1, x2), (y1, y2)) f = aesara.function([c, x1, x2, y1, y2], z, mode=self.mode) self.assertFunctionContains1(f, self.get_ifelse(2)) ifnode = [ x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse) ][0] assert len(ifnode.outputs) == 2 rng = np.random.RandomState(utt.fetch_seed()) x1len = rng.randint(200) x2len = rng.randint(200) y1len = rng.randint(200) y2len = rng.randint(200) vx1 = np.asarray(rng.uniform(size=(x1len, )), self.dtype) vx2 = np.asarray(rng.uniform(size=(x2len, )), self.dtype) vy1 = np.asarray(rng.uniform(size=(y1len, )), self.dtype) vy2 = np.asarray(rng.uniform(size=(y2len, )), self.dtype) ovx1, ovx2 = f(1, vx1, vx2, vy1, vy2) ovy1, ovy2 = f(0, vx1, vx2, vy1, vy2) assert np.allclose(vx1, ovx1) assert np.allclose(vy1, ovy1) assert np.allclose(vx2, ovx2) assert np.allclose(vy2, ovy2)
def test_multiple_out_grad(self): # Tests that we can compute the gradients through lazy if x1 = tensor.vector("x1") x2 = tensor.vector("x2") y1 = tensor.vector("y1") y2 = tensor.vector("y2") c = tensor.iscalar("c") z = ifelse(c, (x1, x2), (y1, y2)) grads = tensor.grad(z[0].sum() + z[1].sum(), [x1, x2, y1, y2]) f = aesara.function([c, x1, x2, y1, y2], grads) rng = np.random.RandomState(utt.fetch_seed()) lens = [rng.randint(200) for i in range(4)] values = [ np.asarray(rng.uniform(size=(l, )), aesara.config.floatX) for l in lens ] outs_1 = f(1, *values) assert all([x.shape[0] == y for x, y in zip(outs_1, lens)]) assert np.all(outs_1[0] == 1.0) assert np.all(outs_1[1] == 1.0) assert np.all(outs_1[2] == 0.0) assert np.all(outs_1[3] == 0.0) outs_0 = f(0, *values) assert all([x.shape[0] == y for x, y in zip(outs_1, lens)]) assert np.all(outs_0[0] == 0.0) assert np.all(outs_0[1] == 0.0) assert np.all(outs_0[2] == 1.0) assert np.all(outs_0[3] == 1.0)
def test_mixed_dtype(self): x1 = tensor.vector("x1", dtype="int32") x2 = tensor.vector("x2", dtype=self.dtype) y1 = tensor.vector("y1", dtype="int32") y2 = tensor.vector("y2", dtype=self.dtype) c = tensor.iscalar("c") f = aesara.function([c, x1, x2, y1, y2], ifelse(c, (x1, x2), (y1, y2)), mode=self.mode) self.assertFunctionContains1(f, self.get_ifelse(2)) rng = np.random.RandomState(utt.fetch_seed()) xlen = rng.randint(200) ylen = rng.randint(200) vx1 = np.asarray(rng.uniform(size=(xlen, )) * 3, "int32") vx2 = np.asarray(rng.uniform(size=(xlen, )), self.dtype) vy1 = np.asarray(rng.uniform(size=(ylen, )) * 3, "int32") vy2 = np.asarray(rng.uniform(size=(ylen, )), self.dtype) o1, o2 = f(1, vx1, vx2, vy1, vy2) assert np.allclose(vx1, o1) assert np.allclose(vx2, o2) o1, o2 = f(0, vx1, vx2, vy1, vy2) assert np.allclose(vy1, o1) assert np.allclose(vy2, o2)
def test_grad_lazy_if(self): # Tests that we can compute the gradients through lazy if x = tensor.vector("x", dtype=self.dtype) y = tensor.vector("y", dtype=self.dtype) c = tensor.iscalar("c") z = ifelse(c, x, y) gx, gy = tensor.grad(z.sum(), [x, y]) f = aesara.function( [c, x, y], [self.cast_output(gx), self.cast_output(gy)], mode=self.mode) # There is only 2 of the 3 ifelse that are moved on the GPU. # The one that stay on the CPU is for the shape. self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3) rng = np.random.RandomState(utt.fetch_seed()) xlen = rng.randint(200) ylen = rng.randint(200) vx = np.asarray(rng.uniform(size=(xlen, )), self.dtype) vy = np.asarray(rng.uniform(size=(ylen, )), self.dtype) gx0, gy0 = f(1, vx, vy) assert np.allclose(gx0.shape, vx.shape) assert np.allclose(gy0.shape, vy.shape) assert np.all(np.asarray(gx0) == 1.0) assert np.all(np.asarray(gy0) == 0.0) gx0, gy0 = f(0, vx, vy) assert np.allclose(gx0.shape, vx.shape) assert np.allclose(gy0.shape, vy.shape) assert np.all(np.asarray(gx0) == 0.0) assert np.all(np.asarray(gy0) == 1.0)
def setUp(self): extra1 = at.iscalar("extra1") extra1_ = np.array(0, dtype=extra1.dtype) extra1.dshape = tuple() extra1.dsize = 1 val1 = at.vector("val1") val1_ = np.zeros(3, dtype=val1.dtype) val1.dshape = (3, ) val1.dsize = 3 val2 = at.matrix("val2") val2_ = np.zeros((2, 3), dtype=val2.dtype) val2.dshape = (2, 3) val2.dsize = 6 self.val1, self.val1_ = val1, val1_ self.val2, self.val2_ = val2, val2_ self.extra1, self.extra1_ = extra1, extra1_ self.cost = extra1 * val1.sum() + val2.sum() self.f_grad = ValueGradFunction([self.cost], [val1, val2], {extra1: extra1_}, mode="FAST_COMPILE")
def test_pushout2(self): x1 = tensor.scalar("x1") x2 = tensor.scalar("x2") y1 = tensor.scalar("y1") y2 = tensor.scalar("y2") w1 = tensor.scalar("w1") w2 = tensor.scalar("w2") c = tensor.iscalar("c") x, y = ifelse(c, (x1, y1), (x2, y2), name="f1") z = ifelse(x > y, w1, w2, name="f2") out = x * z * y f = aesara.function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True) assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse) rng = np.random.RandomState(utt.fetch_seed()) vx1 = rng.uniform() vx2 = rng.uniform() vy1 = rng.uniform() vy2 = rng.uniform() vw1 = rng.uniform() vw2 = rng.uniform() if vx1 > vy1: vw = vw1 else: vw = vw2 assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1), vx1 * vy1 * vw) if vx2 > vy2: vw = vw1 else: vw = vw2 assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0), vx2 * vy2 * vw)
def test_copy_delete_updates(self): w = tt.iscalar("w") x = tt.fscalar("x") # SharedVariable for tests, one of them has update y = aesara.shared(value=1, name="y") z = aesara.shared(value=2, name="z") out = x + y + z # Test for different linkers # for mode in ["FAST_RUN","FAST_COMPILE"]: # second_time = False for mode in ["FAST_RUN", "FAST_COMPILE"]: ori = aesara.function([x], out, mode=mode, updates={z: z * 2}) cpy = ori.copy(delete_updates=True) assert cpy(1)[0] == 4 assert cpy(1)[0] == 4 assert cpy(1)[0] == 4 # Test if unused implicit and explicit inputs from delete_updates # are ignored as intended. for mode in ["FAST_RUN", "FAST_COMPILE"]: ori = aesara.function([x], x, mode=mode, updates={z: z * 2}) cpy = ori.copy(delete_updates=True) ori = aesara.function([x, w], x, mode=mode, updates={z: z + w}) cpy = ori.copy(delete_updates=True)
def check(dtype, N, M_=None, k=0): # Aesara does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = aesara.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def test_grad_cast_input(self): # Tests the gradient when both inputs are on the GPU. x = tensor.vector("x", dtype=self.dtype) y = tensor.vector("y", dtype=self.dtype) c = tensor.iscalar("c") z = ifelse(c, self.cast_output(x), self.cast_output(y)) gx, gy = tensor.grad(z.sum(), [x, y]) aesara.function([c, x, y], [gx, gy], mode=self.mode)
def test_record_mode_bad(): # Like test_record_bad, but some events are recorded by the # aesara RecordMode, as is the event that triggers the mismatch # error. # Record a sequence of events output = StringIO() recorder = Record(file_object=output, replay=False) record_mode = RecordMode(recorder) i = iscalar() f = function([i], i, mode=record_mode, name="f") num_lines = 10 for i in range(num_lines): recorder.handle_line(str(i) + "\n") f(i) # Make sure that the playback functionality doesn't raise any errors # when we repeat them output_value = output.getvalue() output = StringIO(output_value) playback_checker = Record(file_object=output, replay=True) playback_mode = RecordMode(playback_checker) i = iscalar() f = function([i], i, mode=playback_mode, name="f") for i in range(num_lines // 2): playback_checker.handle_line(str(i) + "\n") f(i) # Make sure a wrong event causes a MismatchError try: f(0) except MismatchError: return raise AssertionError("Failed to detect a mismatch.")
def test_lazy_if_on_generics(self): x = aesara.generic() y = aesara.generic() c = tensor.iscalar("c") f = aesara.function([c, x, y], ifelse(c, x, y)) vx = ["testX"] vy = ["testY"] assert f(1, vx, vy) == vx assert f(0, vx, vy) == vy
def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = aesara.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def test_remove_useless_inputs1(self): x = tensor.vector("x") y = tensor.vector("y") c = tensor.iscalar("c") z = ifelse(c, (x, x), (y, y)) f = aesara.function([c, x, y], z) ifnode = [ n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse) ][0] assert len(ifnode.inputs) == 3
def test_merge(self): x = tensor.vector("x") y = tensor.vector("y") c = tensor.iscalar("c") z1 = ifelse(c, x + 1, y + 1) z2 = ifelse(c, x + 2, y + 2) z = z1 + z2 f = aesara.function([c, x, y], z) assert (len([ n for n in f.maker.fgraph.toposort() if isinstance(n.op, IfElse) ]) == 1)
def test_scan_debugprint1(): k = tensor.iscalar("k") A = tensor.dvector("A") # Symbolic description of the result result, updates = aesara.scan( fn=lambda prior_result, A: prior_result * A, outputs_info=tensor.ones_like(A), non_sequences=A, n_steps=k, ) final_result = result[-1] output_str = aesara.printing.debugprint(final_result, file="str") lines = output_str.split("\n") expected_output = """Subtensor{int64} [id A] '' |Subtensor{int64::} [id B] '' | |for{cpu,scan_fn} [id C] '' | | |k [id D] | | |IncSubtensor{Set;:int64:} [id E] '' | | | |AllocEmpty{dtype='float64'} [id F] '' | | | | |Elemwise{add,no_inplace} [id G] '' | | | | | |k [id D] | | | | | |Subtensor{int64} [id H] '' | | | | | |Shape [id I] '' | | | | | | |Rebroadcast{0} [id J] '' | | | | | | |InplaceDimShuffle{x,0} [id K] '' | | | | | | |Elemwise{second,no_inplace} [id L] '' | | | | | | |A [id M] | | | | | | |InplaceDimShuffle{x} [id N] '' | | | | | | |TensorConstant{1.0} [id O] | | | | | |Constant{0} [id P] | | | | |Subtensor{int64} [id Q] '' | | | | |Shape [id R] '' | | | | | |Rebroadcast{0} [id J] '' | | | | |Constant{1} [id S] | | | |Rebroadcast{0} [id J] '' | | | |ScalarFromTensor [id T] '' | | | |Subtensor{int64} [id H] '' | | |A [id M] | |Constant{1} [id U] |Constant{-1} [id V] Inner graphs of the scan ops: for{cpu,scan_fn} [id C] '' >Elemwise{mul,no_inplace} [id W] '' > |<TensorType(float64, vector)> [id X] -> [id E] > |A_copy [id Y] -> [id M]""" for truth, out in zip(expected_output.split("\n"), lines): assert truth.strip() == out.strip()
def test_n_samples_1(): p = tensor.fmatrix() u = tensor.fvector() n = tensor.iscalar() m = multinomial.MultinomialFromUniform("auto")(p, u, n) f = function([p, u, n], m, allow_input_downcast=True) np.random.seed(12345) for i in [1, 5, 10, 100, 1000, 10000]: uni = np.random.rand(2 * i).astype(config.floatX) res = f([[1.0, 0.0], [0.0, 1.0]], uni, i) utt.assert_allclose(res, [[i * 1.0, 0.0], [0.0, i * 1.0]])
def test_cpu_contiguous(): a = tt.fmatrix("a") i = tt.iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") f = aesara.function([a, i], cpu_contiguous(a.reshape((5, 4))[::i])) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, CpuContiguous) for node in topo]) assert f(a_val, 1).flags["C_CONTIGUOUS"] assert f(a_val, 2).flags["C_CONTIGUOUS"] assert f(a_val, 3).flags["C_CONTIGUOUS"] # Test the grad: utt.verify_grad(cpu_contiguous, [np.random.rand(5, 7, 2)])
def test_remove_useless_inputs2(self): x1 = tensor.vector("x1") x2 = tensor.vector("x2") y1 = tensor.vector("y1") y2 = tensor.vector("y2") c = tensor.iscalar("c") z = ifelse(c, (x1, x1, x1, x2, x2), (y1, y1, y2, y2, y2)) f = aesara.function([c, x1, x2, y1, y2], z) ifnode = [ x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse) ][0] assert len(ifnode.outputs) == 3
def test_gpu_contiguous(): a = tt.fmatrix("a") i = tt.iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") # The reshape is needed otherwise we make the subtensor on the CPU # to transfer less data. f = aesara.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous
def test_local_alloc_dimshuffle(): alloc_dimshuffle = out2in(local_alloc_dimshuffle) x = tensor.vector("x") m = tensor.iscalar("m") y = x.dimshuffle("x", 0) out = tensor.alloc(y, m, 1, x.shape[0]) g = FunctionGraph([x, m], [out]) alloc_dimshuffle(g) topo = g.toposort() assert any([not isinstance(x, DimShuffle) for x in topo])
def test_record_mode_good(): # Like test_record_good, but some events are recorded by the # aesara RecordMode. We don't attempt to check the # exact string value of the record in this case. # Record a sequence of events output = StringIO() recorder = Record(file_object=output, replay=False) record_mode = RecordMode(recorder) i = iscalar() f = function([i], i, mode=record_mode, name="f") num_lines = 10 for i in range(num_lines): recorder.handle_line(str(i) + "\n") f(i) # Make sure that the playback functionality doesn't raise any errors # when we repeat them output_value = output.getvalue() output = StringIO(output_value) playback_checker = Record(file_object=output, replay=True) playback_mode = RecordMode(playback_checker) i = iscalar() f = function([i], i, mode=playback_mode, name="f") for i in range(num_lines): playback_checker.handle_line(str(i) + "\n") f(i)
def test_functions(): xvals = list(map(np.atleast_1d, [0.01, 0.1, 2, 100, 10000])) x = aet.dvector("x") x.tag.test_value = xvals[0] p = aet.iscalar("p") p.tag.test_value = 1 gammaln = function([x], ps.gammaln(x)) psi = function([x], ps.psi(x)) function([x, p], ps.multigammaln(x, p)) for x in xvals: check_vals(gammaln, ss.gammaln, x) for x in xvals[1:]: check_vals(psi, ss.psi, x)