def test_zero_gradient_shape(self): # Ensure that a zero gradient has the proper shape. x = dmatrix() f = aesara.function([x], grad(dscalar(), x, disconnected_inputs="ignore")) a = np.ones((3, 7)) assert (f(a) == 0).all() # Zero gradient assert a.shape == f(a).shape # With proper shape
def test_in_update_wrong_dtype(self): # Ensure that an error is raised if an In-wrapped variables has # an update of a different type a = dscalar("a") b = dvector("b") with pytest.raises(TypeError): In(a, update=b)
def test_deepcopy_trust_input(self): a = dscalar() # the a is for 'anonymous' (un-named). x, s = dscalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) f.trust_input = True try: g = copy.deepcopy(f) except NotImplementedError as e: if e[0].startswith("DebugMode is not picklable"): return else: raise assert f.trust_input is g.trust_input f(np.asarray(2.0)) with pytest.raises((ValueError, AttributeError, InvalidValueError)): f(2.0) g(np.asarray(2.0)) with pytest.raises((ValueError, AttributeError, InvalidValueError)): g(2.0)
def test_detect_nan(): # Test the code snippet example that detects NaN values. nan_detected = [False] def detect_nan(fgraph, i, node, fn): for output in fn.outputs: if np.isnan(output[0]).any(): print("*** NaN detected ***") aesara.printing.debugprint(node) print("Inputs : %s" % [input[0] for input in fn.inputs]) print("Outputs: %s" % [output[0] for output in fn.outputs]) nan_detected[0] = True break x = dscalar("x") f = aesara.function( [x], [aesara.tensor.log(x) * x], mode=aesara.compile.MonitorMode(post_func=detect_nan), ) try: old_stdout = sys.stdout sys.stdout = StringIO() f(0) # log(0) * 0 = -inf * 0 = NaN finally: sys.stdout = old_stdout assert nan_detected[0]
def test_optimizer(): # Test that we can remove optimizer nan_detected = [False] def detect_nan(fgraph, i, node, fn): for output in fn.outputs: if np.isnan(output[0]).any(): print("*** NaN detected ***") aesara.printing.debugprint(node) print("Inputs : %s" % [input[0] for input in fn.inputs]) print("Outputs: %s" % [output[0] for output in fn.outputs]) nan_detected[0] = True break x = dscalar("x") mode = aesara.compile.MonitorMode(post_func=detect_nan) mode = mode.excluding("fusion") f = aesara.function([x], [aesara.tensor.log(x) * x], mode=mode) # Test that the fusion wasn't done assert len(f.maker.fgraph.apply_nodes) == 2 try: old_stdout = sys.stdout sys.stdout = StringIO() f(0) # log(0) * 0 = -inf * 0 = NaN finally: sys.stdout = old_stdout # Test that we still detect the nan assert nan_detected[0]
def test_givens_replaces_shared_variable(self): a = shared(1.0, "a") a.default_update = a + 3.0 b = dscalar("b") c = a + 10 f = pfunc([b], c, givens={a: b}) assert len(f.maker.fgraph.inputs) == 1 assert len(f.maker.fgraph.outputs) == 1
def test_in_update(self): a = dscalar("a") f = function([In(a, value=0.0, update=a + 1)], a, mode="FAST_RUN") # Ensure that, through the executions of the function, the state of the # input is persistent and is updated as it should assert f() == 0.0 assert f() == 1.0 assert f() == 2.0
def test_op_invalid_input_types(): class TestOp(aesara.graph.op.Op): itypes = [dvector, dvector, dvector] otypes = [dvector] def perform(self, node, inputs, outputs): pass msg = r"^Invalid input types for Op.*" with pytest.raises(TypeError, match=msg): TestOp()(dvector(), dscalar(), dvector())
def test_op_invalid_input_types(): class TestOp(aesara.graph.op.Op): itypes = [dvector, dvector, dvector] otypes = [dvector] def perform(self, node, inputs, outputs): pass msg = r"^Invalid input types for Op TestOp:\nInput 2/3: Expected TensorType\(float64, vector\)" with pytest.raises(TypeError, match=msg): TestOp()(dvector(), dscalar(), dvector())
def test_in_update_shared(self): # Test that using both In() with updates and shared variables with # updates in the same function behaves as expected shared_var = aesara.shared(1.0) a = dscalar("a") a_wrapped = In(a, value=0.0, update=shared_var) f = function([a_wrapped], [], updates={shared_var: a}, mode="FAST_RUN") # Ensure that, through the executions of the function, the state of # the input and the shared variable are appropriate (after N execution, # the values have swapped N times). This allows testing that the # changes occur at the same time and one doesn't overwrite the other. for i in range(5): f() assert np.allclose(shared_var.get_value(), i % 2)
def test_jax_ifelse(): true_vals = np.r_[1, 2, 3] false_vals = np.r_[-1, -2, -3] x = ifelse(np.array(True), true_vals, false_vals) x_fg = FunctionGraph([], [x]) compare_jax_and_py(x_fg, []) a = dscalar("a") a.tag.test_value = np.array(0.2, dtype=config.floatX) x = ifelse(a < 0.5, true_vals, false_vals) x_fg = FunctionGraph([a], [x]) # I.e. False compare_jax_and_py(x_fg, [get_test_value(i) for i in x_fg.inputs])
def test_infer_shape(self): x = dmatrix() y = dscalar() z = iscalar() for test_offset in (-5, -4, -1, 0, 1, 4, 5): self._compile_and_check( [x, y, z], [self.op(x, y, z)], [np.random.random((8, 5)), np.random.random(), test_offset], self.op_class, ) self._compile_and_check( [x, y, z], [self.op(x, y, z)], [np.random.random((5, 8)), np.random.random(), test_offset], self.op_class, )
def test_infer_shape(self, test_offset): rng = np.random.default_rng(43) x = dmatrix() y = dscalar() z = iscalar() self._compile_and_check( [x, y, z], [self.op(x, y, z)], [rng.random((8, 5)), rng.random(), test_offset], self.op_class, ) self._compile_and_check( [x, y, z], [self.op(x, y, z)], [rng.random((5, 8)), rng.random(), test_offset], self.op_class, )
def test_infer_shape(self): z = dtensor3() x = dmatrix() y = dscalar() self._compile_and_check( [x, y], [self.op(x, y)], [np.random.random((8, 5)), np.random.random()], self.op_class, ) self._compile_and_check( [z, y], [self.op(z, y)], # must be square when nd>2 [np.random.random((8, 8, 8)), np.random.random()], self.op_class, warn=False, )
def test_numpy_method(fct, value): x = dscalar("x") y = fct(x) f = aesara.function([x], y) utt.assert_allclose(np.nan_to_num(f(value)), np.nan_to_num(fct(value)))