def test_default_dtype(self): random = RandomStreams(utt.fetch_seed()) low = tensor.dscalar() high = tensor.dscalar() # Should not silently downcast from low and high out0 = random.uniform(low=low, high=high, size=(42, )) assert out0.dtype == "float64" f0 = function([low, high], out0) val0 = f0(-2.1, 3.1) assert val0.dtype == "float64" # Should downcast, since asked explicitly out1 = random.uniform(low=low, high=high, size=(42, ), dtype="float32") assert out1.dtype == "float32" f1 = function([low, high], out1) val1 = f1(-1.1, 1.1) assert val1.dtype == "float32" # Should use floatX lowf = tensor.fscalar() highf = tensor.fscalar() outf = random.uniform(low=lowf, high=highf, size=(42, )) assert outf.dtype == config.floatX ff = function([lowf, highf], outf) valf = ff(np.float32(-0.1), np.float32(0.3)) assert valf.dtype == config.floatX
def test_gammau_nan_c(): x1 = at.dscalar() x2 = at.dscalar() y = gammau(x1, x2) test_func = CLinker().accept(FunctionGraph([x1, x2], [y])).make_function() assert np.isnan(test_func(-1, 1)) assert np.isnan(test_func(1, -1)) assert np.isnan(test_func(-1, -1))
def test_infer_shape(self): adscal = at.dscalar() bdscal = at.dscalar() adscal_val = np.random.random() bdscal_val = np.random.random() + 1 out = assert_op(adscal, bdscal) self._compile_and_check([adscal, bdscal], [out], [adscal_val, bdscal_val], Assert) admat = at.dmatrix() admat_val = np.random.random((3, 4)) adscal_val += 1 out = assert_op(admat, adscal, bdscal) self._compile_and_check([admat, adscal, bdscal], [out], [admat_val, adscal_val, bdscal_val], Assert)
def test_nodiff_params(): def dydt_dict(t, y, p): return { 'A': y.A, 'B': y.B, 'C': y.C, } A = aet.dscalar("A") A.tag.test_value = np.array(0.9) time = np.linspace(0, 1) y0 = {'A': (A, ()), 'B': np.array(1.), 'C': np.array(1.)} params = { 'alpha': np.array(1.), 'beta': np.array(1.), 'extra': np.array([0.]) } solution, *_ = sunode.wrappers.as_aesara.solve_ivp( y0=y0, params=params, rhs=dydt_dict, tvals=time, t0=time[0], derivatives="forward", solver_kwargs=dict(sens_mode="simultaneous")) func = aesara.function([A], [solution["A"], solution["B"]]) assert func(0.2)[0].shape == time.shape
def test_simple_2d(self): # Increments or sets part of a tensor by a scalar using full slice and # a partial slice depending on a scalar. a = tt.dmatrix() increment = tt.dscalar() sl1 = slice(None) sl2_end = tt.lscalar() sl2 = slice(sl2_end) for do_set in [False, True]: if do_set: resut = tt.set_subtensor(a[sl1, sl2], increment) else: resut = tt.inc_subtensor(a[sl1, sl2], increment) f = aesara.function([a, increment, sl2_end], resut) val_a = np.ones((5, 5)) val_inc = 2.3 val_sl2_end = 2 result = f(val_a, val_inc, val_sl2_end) expected_result = np.copy(val_a) if do_set: expected_result[:, :val_sl2_end] = val_inc else: expected_result[:, :val_sl2_end] += val_inc utt.assert_allclose(result, expected_result)
def test_deepcopy_trust_input(self): a = tt.dscalar() # the a is for 'anonymous' (un-named). x, s = tt.dscalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) f.trust_input = True try: g = copy.deepcopy(f) except NotImplementedError as e: if e[0].startswith("DebugMode is not picklable"): return else: raise assert f.trust_input is g.trust_input f(np.asarray(2.0)) with pytest.raises( (ValueError, AttributeError, aesara.compile.debugmode.InvalidValueError) ): f(2.0) g(np.asarray(2.0)) with pytest.raises( (ValueError, AttributeError, aesara.compile.debugmode.InvalidValueError) ): g(2.0)
def test_hessian(self): x = np.linspace(0, 1, 100) y = x * x spline = SplineWrapper(interpolate.InterpolatedUnivariateSpline(x, y, k=1)) x_var = at.dscalar("x") (g_x,) = at.grad(spline(x_var), [x_var]) with pytest.raises(NotImplementedError): at.grad(g_x, [x_var])
def test_givens_replaces_shared_variable(self): a = shared(1.0, "a") a.default_update = a + 3.0 b = tensor.dscalar("b") c = a + 10 f = pfunc([b], c, givens={a: b}) assert len(f.maker.fgraph.inputs) == 1 assert len(f.maker.fgraph.outputs) == 1
def test_simple_3d(self): # Increments or sets part of a tensor by a scalar using full slice and # a partial slice depending on a scalar. a = tt.dtensor3() increment = tt.dscalar() sl1 = slice(None) sl2_end = tt.lscalar() sl2 = slice(sl2_end) sl3 = 2 val_a = np.ones((5, 3, 4)) val_inc = 2.3 val_sl2_end = 2 for method in [tt.set_subtensor, tt.inc_subtensor]: print("MethodSet", method) resut = method(a[sl1, sl3, sl2], increment) f = aesara.function([a, increment, sl2_end], resut) expected_result = np.copy(val_a) result = f(val_a, val_inc, val_sl2_end) if method is tt.set_subtensor: expected_result[:, sl3, :val_sl2_end] = val_inc else: expected_result[:, sl3, :val_sl2_end] += val_inc utt.assert_allclose(result, expected_result) # Test when we broadcast the result resut = method(a[sl1, sl2], increment) f = aesara.function([a, increment, sl2_end], resut) expected_result = np.copy(val_a) result = f(val_a, val_inc, val_sl2_end) if method is tt.set_subtensor: expected_result[:, :val_sl2_end] = val_inc else: expected_result[:, :val_sl2_end] += val_inc utt.assert_allclose(result, expected_result)
def test_infer_shape(self): x = tt.dmatrix() y = tt.dscalar() z = tt.iscalar() for test_offset in (-5, -4, -1, 0, 1, 4, 5): self._compile_and_check( [x, y, z], [self.op(x, y, z)], [np.random.rand(8, 5), np.random.rand(), test_offset], self.op_class, ) self._compile_and_check( [x, y, z], [self.op(x, y, z)], [np.random.rand(5, 8), np.random.rand(), test_offset], self.op_class, )
def test_infer_shape(self): z = tt.dtensor3() x = tt.dmatrix() y = tt.dscalar() self._compile_and_check( [x, y], [self.op(x, y)], [np.random.rand(8, 5), np.random.rand()], self.op_class, ) self._compile_and_check( [z, y], [self.op(z, y)], # must be square when nd>2 [np.random.rand(8, 8, 8), np.random.rand()], self.op_class, warn=False, )
def test_gammaincc_python(): x1 = at.dscalar() x2 = at.dscalar() y = gammaincc(x1, x2) test_func = function([x1, x2], y, mode=Mode("py")) assert np.isclose(test_func(1, 2), sp.gammaincc(1, 2))