def test4(self): a = dmatrix() axis = scalar() l = sort(a, axis, "mergesort") f = aesara.function([a, axis], l) for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) utt.assert_allclose(gv, gt)
def test_empty_givens_updates(): # Regression test for bug fixed in 8625e03. # Empty givens / updates dictionaries were not properly detected before, # triggering useless crashes at compile time. x = scalar() y = x * 2 function([In(x)], y, givens={}) function([In(x)], y, updates={})
def test_VMLinker_make_vm_cvm(): # We don't want this at module level, since CXX might not be present from aesara.link.c.cvm import CVM a = scalar() linker = VMLinker(allow_gc=False, use_cloop=True) f = function([a], a, mode=Mode(optimizer=None, linker=linker)) assert isinstance(f.vm, CVM)
def check_partial_function(linker_name): x = scalar("input") y = x**2 f = function([x], [y + 7, y - 9, y / 14.0], mode=Mode(optimizer=None, linker=linker_name)) assert f(3, output_subset=[0, 1, 2]) == f(3) assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]] utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))
def test_repeatOp(self): for ndim in [1, 3]: x = TensorType(config.floatX, [False] * ndim)() a = np.random.random((10, ) * ndim).astype(config.floatX) for axis in self._possible_axis(ndim): for dtype in integer_dtypes: r_var = scalar(dtype=dtype) r = np.asarray(3, dtype=dtype) if dtype == "uint64" or (dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1): with pytest.raises(TypeError): repeat(x, r_var, axis=axis) else: f = aesara.function([x, r_var], repeat(x, r_var, axis=axis)) assert np.allclose(np.repeat(a, r, axis=axis), f(a, r)) r_var = vector(dtype=dtype) if axis is None: r = np.random.randint(1, 6, size=a.size).astype(dtype) else: r = np.random.randint(1, 6, size=(10, )).astype(dtype) if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1: with pytest.raises(TypeError): repeat(x, r_var, axis=axis) else: f = aesara.function([x, r_var], repeat(x, r_var, axis=axis)) assert np.allclose(np.repeat(a, r, axis=axis), f(a, r)) # check when r is a list of single integer, e.g. [3]. r = np.random.randint(1, 11, size=()).astype(dtype) + 2 f = aesara.function([x], repeat(x, [r], axis=axis)) assert np.allclose(np.repeat(a, r, axis=axis), f(a)) assert not np.any([ isinstance(n.op, RepeatOp) for n in f.maker.fgraph.toposort() ]) # check when r is aesara tensortype that broadcastable is (True,) r_var = TensorType(broadcastable=(True, ), dtype=dtype)() r = np.random.randint(1, 6, size=(1, )).astype(dtype) f = aesara.function([x, r_var], repeat(x, r_var, axis=axis)) assert np.allclose(np.repeat(a, r[0], axis=axis), f(a, r)) assert not np.any([ isinstance(n.op, RepeatOp) for n in f.maker.fgraph.toposort() ])
def test2(self): a = dmatrix() axis = scalar() w = sort(a, axis) f = aesara.function([a, axis], w) for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) utt.assert_allclose(gv, gt)
def test_VM_exception(): class SomeVM(VM): def __call__(self): pass a = scalar() fg = FunctionGraph(outputs=[SomeOp()(a)]) with pytest.raises(ValueError, match="`nodes` and `thunks`.*"): SomeVM(fg, fg.apply_nodes, [], [])
def test_fgraph_to_python_names(): import inspect x = scalar("1x") y = scalar("_") z = scalar() q = scalar("def") r = NoneConst out_fg = FunctionGraph([x, y, z, q, r], [x, y, z, q, r], clone=False) out_jx = fgraph_to_python(out_fg, to_python) sig = inspect.signature(out_jx) assert (x.auto_name, "_", z.auto_name, q.auto_name, r.name) == tuple(sig.parameters.keys()) assert (1, 2, 3, 4, 5) == out_jx(1, 2, 3, 4, 5) obj = object() assert get_name_for_object(obj) == type(obj).__name__
def test_scalar_shapes(self): with pytest.raises(AssertionError, match="will never match"): specify_shape(vector(), shape=()) with pytest.raises(AssertionError, match="will never match"): specify_shape(matrix(), shape=[]) x = scalar() y = specify_shape(x, shape=()) f = aesara.function([x], y, mode=self.mode) assert f(15) == 15
def t(): f = function( [ In(a, name={"adsf", ()}, value=1.0), In(x, name=(), value=2.0), In(s, name=scalar(), value=3.0), ], a + x + s, ) return f
def test_scalar_input(self): x = scalar("x") assert self.op(aes.add, axis=(-1,))(x).eval({x: 5}) == 5 with pytest.raises( np.AxisError, match=re.escape("axis (-2,) is out of bounds for array of dimension 0"), ): self.op(aes.add, axis=(-2,))(x)
def check_partial_function_output_keys(linker_name): x = scalar("input") y = 3 * x f = function([x], { "a": y * 5, "b": y - 7 }, mode=Mode(optimizer=None, linker=linker_name)) assert f(5, output_subset=["a"])["a"] == f(5)["a"]
def test_deepcopy(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) try: g = copy.deepcopy(f) except NotImplementedError as e: if e[0].startswith("DebugMode is not picklable"): return else: raise # if they both return, assume that they return equivalent things. # print [(k,id(k)) for k in f.finder.keys()] # print [(k,id(k)) for k in g.finder.keys()] assert g.container[0].storage is not f.container[0].storage assert g.container[1].storage is not f.container[1].storage assert g.container[2].storage is not f.container[2].storage assert x not in g.container assert x not in g.value assert len(f.defaults) == len(g.defaults) assert f._check_for_aliased_inputs is g._check_for_aliased_inputs assert f.name == g.name assert f.maker.fgraph.name == g.maker.fgraph.name # print 'f.defaults = %s' % (f.defaults, ) # print 'g.defaults = %s' % (g.defaults, ) for ((f_req, f_feed, f_val), (g_req, g_feed, g_val)) in zip( f.defaults, g.defaults ): assert f_req == g_req and f_feed == g_feed and f_val == g_val assert g.value[1] is not f.value[1] # should not have been copied assert ( g.value[2] is not f.value[2] ) # should have been copied because it is mutable. assert not (g.value[2] != f.value[2]).any() # its contents should be identical assert f(2, 1) == g( 2 ) # they should be in sync, default value should be copied. assert f(2, 1) == g( 2 ) # they should be in sync, default value should be copied. f(1, 2) # put them out of sync assert f(1, 2) != g(1, 2) # they should not be equal anymore. g(1, 2) # put them back in sync assert f(3) == g(3) # They should be in sync again.
def test_jacobian_disconnected_inputs(): # Test that disconnected inputs are properly handled by jacobian. v1 = vector() v2 = vector() jacobian_v = aesara.gradient.jacobian(1 + v1, v2, disconnected_inputs="ignore") func_v = aesara.function([v1, v2], jacobian_v) val = np.arange(4.0).astype(aesara.config.floatX) assert np.allclose(func_v(val, val), np.zeros((4, 4))) s1 = scalar() s2 = scalar() jacobian_s = aesara.gradient.jacobian(1 + s1, s2, disconnected_inputs="ignore") func_s = aesara.function([s2], jacobian_s) val = np.array(1.0).astype(aesara.config.floatX) assert np.allclose(func_s(val), np.zeros(1))
def test_grad_constant(self): # Test that the gradient handles Constants and consider_constant variables # consistently x = scalar() y = scalar() z_x = x + y z_one = one + y g_x = grad(z_x, x, consider_constant=[x]) g_one = grad(z_one, one) f = aesara.function([x, y], [g_x, g_one]) g_x, g_one = f(1, 0.5) if not np.allclose(g_x, g_one): raise AssertionError( "Gradient using consider constant is " + str(g_x) + " but gradient with respect to the same Constant is " + str(g_one))
def test_NNone_rval(self): # grad: Test returning some zero value from grad o = TestGrad.Obj1() a1 = o.make_node() g0, g1, g2 = grad(a1.outputs[0], a1.inputs + [scalar("z")], disconnected_inputs="ignore") assert o.gval0 is g0 assert o.gval1 is g1 assert g2.owner.op == aet.fill assert g2.owner.inputs[1].data == 0
def test_perform(self, shp): rng = np.random.default_rng(43) x = matrix() y = scalar() f = function([x, y], fill_diagonal(x, y)) a = rng.random(shp).astype(config.floatX) val = np.cast[config.floatX](rng.random()) out = f(a, val) # We can't use np.fill_diagonal as it is bugged. assert np.allclose(np.diag(out), val) assert (out == val).sum() == min(a.shape)
def test_debug_mode_dict(self): # Tests that debug mode works where outputs is a dictionary. x = scalar("x") f = function([x], outputs={"1": x, "2": 2 * x, "3": 3 * x}, mode="DEBUG_MODE") result = f(3.0) assert result["1"] == 3.0 assert result["2"] == 6.0 assert result["3"] == 9.0
def test_pushout3(self): x1 = scalar("x1") y1 = scalar("x2") y2 = scalar("y2") c = iscalar("c") two = np.asarray(2, dtype=aesara.config.floatX) x, y = ifelse(c, (x1, y1), (two, y2), name="f1") o3 = np.asarray(0.3, dtype=aesara.config.floatX) o2 = np.asarray(0.2, dtype=aesara.config.floatX) z = ifelse(c, o3, o2, name="f2") out = x * z * y f = function([x1, y1, y2, c], out, allow_input_downcast=True) assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse) rng = np.random.RandomState(utt.fetch_seed()) vx1 = rng.uniform() vy1 = rng.uniform() vy2 = rng.uniform() assert np.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3) assert np.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
def test_output_dictionary(self): # Tests that function works when outputs is a dictionary x = scalar() f = function([x], outputs={"a": x, "c": x * 2, "b": x * 3, "1": x * 4}) outputs = f(10.0) assert outputs["a"] == 10.0 assert outputs["b"] == 30.0 assert outputs["1"] == 40.0 assert outputs["c"] == 20.0
def test_debug_mode_list(self): # Tests that debug mode works where the outputs argument is a list. x = scalar("x") f = function([x], outputs=[x, 2 * x, 3 * x], mode="DEBUG_MODE") result = f(5.0) assert result[0] == 5.0 assert result[1] == 10.0 assert result[2] == 15.0
def test_pushout2(self): x1 = scalar("x1") x2 = scalar("x2") y1 = scalar("y1") y2 = scalar("y2") w1 = scalar("w1") w2 = scalar("w2") c = iscalar("c") x, y = ifelse(c, (x1, y1), (x2, y2), name="f1") z = ifelse(x > y, w1, w2, name="f2") out = x * z * y f = function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True) assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse) rng = np.random.RandomState(utt.fetch_seed()) vx1 = rng.uniform() vx2 = rng.uniform() vy1 = rng.uniform() vy2 = rng.uniform() vw1 = rng.uniform() vw2 = rng.uniform() if vx1 > vy1: vw = vw1 else: vw = vw2 assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1), vx1 * vy1 * vw) if vx2 > vy2: vw = vw1 else: vw = vw2 assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0), vx2 * vy2 * vw)
def test_merge_ifs_true_false(self): x1 = scalar("x1") x2 = scalar("x2") y1 = scalar("y1") y2 = scalar("y2") w1 = scalar("w1") w2 = scalar("w2") c = iscalar("c") out = ifelse( c, ifelse(c, x1, x2) + ifelse(c, y1, y2) + w1, ifelse(c, x1, x2) + ifelse(c, y1, y2) + w2, ) f = function([x1, x2, y1, y2, w1, w2, c], out, allow_input_downcast=True) assert ( len([x for x in f.maker.fgraph.toposort() if isinstance(x.op, IfElse)]) == 1 ) rng = np.random.RandomState(utt.fetch_seed()) vx1 = rng.uniform() vx2 = rng.uniform() vy1 = rng.uniform() vy2 = rng.uniform() vw1 = rng.uniform() vw2 = rng.uniform() assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1), vx1 + vy1 + vw1) assert np.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0), vx2 + vy2 + vw2)
def test_perform_3d(self): rng = np.random.default_rng(43) a = rng.random((3, 3, 3)).astype(config.floatX) x = tensor3() y = scalar() f = function([x, y], fill_diagonal(x, y)) val = np.cast[config.floatX](rng.random() + 10) out = f(a, val) # We can't use np.fill_diagonal as it is bugged. assert out[0, 0, 0] == val assert out[1, 1, 1] == val assert out[2, 2, 2] == val assert (out == val).sum() == min(a.shape)
def test_naming_rule1(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") f = function([a, s], a / s) assert f(1, 2) == 0.5 assert f(2, 1) == 2.0 assert f(2, s=1) == 2.0 checkfor( self, lambda: f(q=2, s=1), TypeError ) # got unexpected keyword argument 'q' checkfor( self, lambda: f(a=2, s=1), TypeError ) # got unexpected keyword argument 'a'
def test_output_list_still_works(self): # Test that function works if outputs is a list. x = scalar("x") f = function([x], outputs=[x * 3, x * 2, x * 4, x]) result = f(5.0) assert result[0] == 15.0 assert result[1] == 10.0 assert result[2] == 20.0 assert result[3] == 5.0
def test_key_string_requirement(self): # Tests that an exception is thrown if a non-string key is used in # the outputs dictionary. x = scalar("x") with pytest.raises(AssertionError): function([x], outputs={1.0: x}) with pytest.raises(AssertionError): function([x], outputs={1.0: x, "a": x**2}) with pytest.raises(AssertionError): function([x], outputs={(1, "b"): x, 1.0: x**2})
def test_grad_int_value(self): w = aesara.shared(np.random.rand(10)) b = aesara.shared(np.random.rand()) params = [w, b] x = vector() y = scalar() score = w.dot(x) + b correct = score * y > 0 loss = ifelse(correct, 0, 1) [(param, param - 0.5 * aesara.grad(cost=loss, wrt=param)) for param in params]
class TestBinomial(utt.InferShapeTester): n = scalar(dtype="int64") p = scalar() shape = lvector() _n = 5 _p = 0.25 _shape = np.asarray([3, 5], dtype="int64") inputs = [n, p, shape] _inputs = [_n, _p, _shape] def setup_method(self): super().setup_method() self.op_class = Binomial def test_op(self): for sp_format in sparse.sparse_formats: for o_type in sparse.float_dtypes: f = aesara.function(self.inputs, Binomial(sp_format, o_type)(*self.inputs)) tested = f(*self._inputs) assert tested.shape == tuple(self._shape) assert tested.format == sp_format assert tested.dtype == o_type assert np.allclose(np.floor(tested.todense()), tested.todense()) def test_infer_shape(self): for sp_format in sparse.sparse_formats: for o_type in sparse.float_dtypes: self._compile_and_check( self.inputs, [Binomial(sp_format, o_type)(*self.inputs)], self._inputs, self.op_class, )
def test_grad_scale(): x = scalar() z = grad(grad_scale(x, 2)**2, x) z2 = grad(x**2, x) f = aesara.function([x], outputs=[z, z2]) if config.mode != "FAST_COMPILE": topo = f.maker.fgraph.toposort() assert not any([isinstance(node.op, GradScale) for node in topo]) out = f(2.0) assert np.allclose(out, (8, 4))