def test_pickle_unpickle_without_reoptimization(): mode = config.mode if mode in ["DEBUG_MODE", "DebugMode"]: mode = "FAST_RUN" x1 = fmatrix("x1") x2 = fmatrix("x2") x3 = shared(np.ones((10, 10), dtype=floatX)) x4 = shared(np.ones((10, 10), dtype=floatX)) y = at_sum(at_sum(at_sum(x1 ** 2 + x2) + x3) + x4) updates = OrderedDict() updates[x3] = x3 + 1 updates[x4] = x4 + 1 f = function([x1, x2], y, updates=updates, mode=mode) # now pickle the compiled aesara fn string_pkl = pickle.dumps(f, -1) # compute f value in1 = np.ones((10, 10), dtype=floatX) in2 = np.ones((10, 10), dtype=floatX) # test unpickle without optimization default = config.reoptimize_unpickled_function try: # the default is True config.reoptimize_unpickled_function = False f_ = pickle.loads(string_pkl) assert f(in1, in2) == f_(in1, in2) finally: config.reoptimize_unpickled_function = default
def test_default_updates_multiple(self): x = shared(0) y = shared(1) x.default_update = x - 1 y.default_update = y + 1 f1 = pfunc([], [x, y]) f1() assert x.get_value() == -1 assert y.get_value() == 2 f2 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=[y]) f2() assert x.get_value() == -3 assert y.get_value() == 2 f3 = pfunc([], [x, y], updates=[(x, (x - 2))], no_default_updates=True) f3() assert x.get_value() == -5 assert y.get_value() == 2 f4 = pfunc([], [x, y], updates=[(y, (y - 2))]) f4() assert x.get_value() == -6 assert y.get_value() == 0
def test_FunctionMaker_cache_optimizations(): opt_db_file = os.path.join(config.compiledir, "optimized_graphs.pkl") if os.path.exists(opt_db_file): os.remove(opt_db_file) floatX = "float32" mode = config.mode if mode in ["DEBUG_MODE", "DebugMode"]: mode = "FAST_RUN" graph_db_file = os.path.join(config.compiledir, "optimized_graphs.pkl") assert not os.path.exists(graph_db_file) with config.change_flags(cache_optimizations=True): a = fmatrix("a") b = fmatrix("b") c = shared(np.ones((10, 10), dtype=floatX)) d = shared(np.ones((10, 10), dtype=floatX)) e = aet_sum(aet_sum(aet_sum(a ** 2 + b) + c) + d) f1 = function([a, b], e, mode=mode) # FIXME: We can do much better about testing this. assert os.path.exists(graph_db_file) m = fmatrix("x1") n = fmatrix("x2") p = shared(np.ones((10, 10), dtype=floatX)) q = shared(np.ones((10, 10), dtype=floatX)) j = aet_sum(aet_sum(aet_sum(m ** 2 + n) + p) + q) f2 = function([m, n], j, mode=mode) in1 = np.ones((10, 10), dtype=floatX) in2 = np.ones((10, 10), dtype=floatX) assert f1(in1, in2) == f2(in1, in2)
def test_swap_SharedVariable_with_given(self): # A special testcase for logistic_sgd.py in Deep Learning Tutorial # This test assert that SharedVariable in different function have same storage train_x = shared(value=np.random.rand(10, 10).astype(config.floatX)) test_x = shared(value=np.random.rand(10, 10).astype(config.floatX)) train_y = shared(value=np.random.rand(10, 1).astype(config.floatX)) test_y = shared(value=np.random.rand(10, 1).astype(config.floatX)) i = iscalar("index") x = vector("x") y = vector("y") # this formular has no sense but for a test out = (aet_sum(x) - y) ** 2 train = function( [i], out, givens={x: train_x[i], y: train_y[i]}, updates={train_x: train_x + 0.1}, ) test_def = function([i], out, givens={x: test_x[i], y: test_y[i]}) test_cpy = train.copy( swap={train_x: test_x, train_y: test_y}, delete_updates=True ) for in1, in2 in zip(test_def.maker.inputs, test_cpy.maker.inputs): assert in1.value is in2.value
def test_copy_share_memory(self): x = fscalar("x") # SharedVariable for tests, one of them has update y = shared(value=1) z = shared(value=2) out = tanh((x + y + 2) / (x + z - 0.2) ** 2) # Test for different linkers for mode in ["FAST_RUN", "FAST_COMPILE"]: ori = function([x], [out], mode=mode, updates={z: z + 1}) cpy = ori.copy(share_memory=True) # Test if memories shared storage_map_ori = ori.fn.storage_map storage_map_cpy = cpy.fn.storage_map fgraph_cpy = cpy.maker.fgraph # Assert intermediate and Constants storages are shared. # and output stoarges are not shared i_o_variables = fgraph_cpy.inputs + fgraph_cpy.outputs ori_storages = storage_map_ori.values() l = [ val for key, val in storage_map_cpy.items() if key not in i_o_variables or isinstance(key, Constant) ] for storage in l: assert any([storage is s for s in ori_storages]) # Assert storages of SharedVariable without updates are shared for (input, _1, _2), here, there in zip( ori.indices, ori.input_storage, cpy.input_storage ): assert here.data is there.data
def test_copy_delete_updates(self): w = iscalar("w") x = fscalar("x") # SharedVariable for tests, one of them has update y = shared(value=1, name="y") z = shared(value=2, name="z") out = x + y + z # Test for different linkers # for mode in ["FAST_RUN","FAST_COMPILE"]: # second_time = False for mode in ["FAST_RUN", "FAST_COMPILE"]: ori = function([x], out, mode=mode, updates={z: z * 2}) cpy = ori.copy(delete_updates=True) assert cpy(1)[0] == 4 assert cpy(1)[0] == 4 assert cpy(1)[0] == 4 # Test if unused implicit and explicit inputs from delete_updates # are ignored as intended. for mode in ["FAST_RUN", "FAST_COMPILE"]: ori = function([x], x, mode=mode, updates={z: z * 2}) cpy = ori.copy(delete_updates=True) ori = function([x, w], x, mode=mode, updates={z: z + w}) cpy = ori.copy(delete_updates=True)
def test_doc(self): # Ensure the code given in pfunc.txt works as expected # Example #1. a = lscalar() b = shared(1) f1 = pfunc([a], (a + b)) f2 = pfunc([In(a, value=44)], a + b, updates={b: b + 1}) assert b.get_value() == 1 assert f1(3) == 4 assert f2(3) == 4 assert b.get_value() == 2 assert f1(3) == 5 b.set_value(0) assert f1(3) == 3 # Example #2. a = tensor.lscalar() b = shared(7) f1 = pfunc([a], a + b) f2 = pfunc([a], a * b) assert f1(5) == 12 b.set_value(8) assert f1(5) == 13 assert f2(4) == 32
def test_shared_input_output(): # Test bug reported on the mailing list by Alberto Orlandi # https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion # The shared variable is both an input and an output of the function. inc = iscalar("inc") state = shared(0) state.name = "state" linker = CLinker() mode = Mode(linker=linker) f = function([inc], state, updates=[(state, state + inc)], mode=mode) g = function([inc], state, updates=[(state, state + inc)]) # Initial value f0 = f(0) g0 = g(0) assert f0 == g0 == 0, (f0, g0) # Increment state via f, returns the previous value. f2 = f(2) assert f2 == f0, (f2, f0) f0 = f(0) g0 = g(0) assert f0 == g0 == 2, (f0, g0) # Increment state via g, returns the previous value g3 = g(3) assert g3 == g0, (g3, g0) f0 = f(0) g0 = g(0) assert f0 == g0 == 5, (f0, g0) vstate = shared(np.zeros(3, dtype="int32")) vstate.name = "vstate" fv = function([inc], vstate, updates=[(vstate, vstate + inc)], mode=mode) gv = function([inc], vstate, updates=[(vstate, vstate + inc)]) # Initial value fv0 = fv(0) gv0 = gv(0) assert np.all(fv0 == 0), fv0 assert np.all(gv0 == 0), gv0 # Increment state via f, returns the previous value. fv2 = fv(2) assert np.all(fv2 == fv0), (fv2, fv0) fv0 = fv(0) gv0 = gv(0) assert np.all(fv0 == 2), fv0 assert np.all(gv0 == 2), gv0 # Increment state via g, returns the previous value gv3 = gv(3) assert np.all(gv3 == gv0), (gv3, gv0) fv0 = fv(0) gv0 = gv(0) assert np.all(fv0 == 5), fv0 assert np.all(gv0 == 5), gv0
def test_clone0(self): x = shared(np.asarray([4, 4, 4])) y = shared(np.asarray([4, 4, 4])) z = shared(np.asarray([2, 2, 2])) up = pfunc([], [], updates={ x: (x * 5), y: ((x * 5) + y), z: (((x * 5) + y)**z) }) up() assert np.all(x.get_value() == 20) assert np.all(y.get_value() == 24) assert np.all(z.get_value() == (24**2))
def test_shared(self): # CHECK: two functions (f1 and f2) can share w w = shared(np.random.rand(2, 2), "w") wval = w.get_value(borrow=False) x = dmatrix() out1 = w + x out2 = w * x f1 = pfunc([x], [out1]) f2 = pfunc([x], [out2]) xval = np.random.rand(2, 2) assert np.all(f1(xval) == xval + wval) assert np.all(f2(xval) == xval * wval) # CHECK: updating a shared value f3 = pfunc([x], out1, updates=[(w, (w - 1))]) # f3 changes the value of w assert np.all(f3(xval) == xval + wval) # this same value is read by f1 assert np.all(f1(xval) == xval + (wval - 1)) w.set_value(w.get_value(borrow=True) * 10, borrow=True) # this same value is read by f1 assert np.all(f1(xval) == xval + w.get_value(borrow=True))
def test_shared_mutable(self): bval = np.arange(5) b = shared(bval) b_out = b * 2 # shared vars copy args. assert b.get_value(borrow=True) is not bval # so we do this to get at the underlying data bval = data_of(b) # by default, shared are not mutable unless doing an explicit update f = pfunc([], [b_out], mode="FAST_RUN") assert (f() == np.arange(5) * 2).all() assert np.all(b.get_value(borrow=True) == np.arange(5)) # using updates, b is now a mutable parameter f = pfunc([], [b_out], updates=[(b, b_out)], mode="FAST_RUN") assert (f() == (np.arange(5) * 2)).all() # because of the update assert (b.get_value(borrow=True) == (np.arange(5) * 2)).all() assert (bval == (np.arange(5) * 2)).all() # because of mutable=True # do not depend on updates being in-place though! bval = np.arange(5) b.set_value(bval, borrow=True) bval = data_of(b) f = pfunc([], [b_out], updates=[(b, (b_out + 3))], mode="FAST_RUN") assert (f() == (np.arange(5) * 2)).all() # because of the update assert (b.get_value(borrow=True) == ((np.arange(5) * 2) + 3)).all() # bval got modified to something... assert not (bval == np.arange(5)).all() # ... but not to b.value ! assert not (bval == b.get_value(borrow=True)).all()
def test_make_node_shared(self): """Make sure we can provide `OpFromGraph.make_node` new shared inputs and get a valid `OpFromGraph`.""" x = at.scalar("x") y = shared(1.0, name="y") test_ofg = OpFromGraph([x], [x + y], on_unused_input="ignore") assert test_ofg.shared_inputs == [y] out = test_ofg(x) y_clone = y.clone() assert y_clone != y y_clone.name = "y_clone" out_new = test_ofg.make_node(*(out.owner.inputs[:1] + [y_clone])).outputs[0] assert "on_unused_input" in out_new.owner.op.kwargs assert out_new.owner.op.shared_inputs == [y_clone] out_fn = function([x], out_new) assert np.array_equal(out_fn(1.0), 2.0) y_clone.set_value(2.0) assert np.array_equal(out_fn(1.0), 3.0)
def test_in_shared_variable(self): # Ensure that an error is raised if the In wrapped is used to wrap # a shared variable a = shared(1.0) a_wrapped = In(a, update=a + 1) with pytest.raises(TypeError): function([a_wrapped])
def test_update_equiv(self): # Like test_update_same, but the update expression is simplified until # it is found to be equal to the original variable a = shared(1.0, "a") b = shared(np.ones((2, 3)), "b") # See comment in test_update_same about why we try both # shared variables. f = aesara.function([], [], updates=[(a, a), (b, (2 * b - b))]) g = aesara.function([], [], updates=[(a, (a * 2 - a)), (b, b)]) f() assert a.get_value(borrow=True).shape == (), a.get_value() assert b.get_value(borrow=True).shape == (2, 3), b.get_value() g() assert a.get_value(borrow=True).shape == (), a.get_value() assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
def test_default_scalar_container(self): # Similar in spirit to test_default_container, but updating a scalar # variable. This is a sanity check for non mutable types. x = shared(0.0, "x") f = pfunc([], x) assert f() == 0 x.set_value(x.get_value(borrow=True) + 1, borrow=True) assert f() == 1
def test_givens_replaces_shared_variable2(self): a = shared(1.0, "a") a.default_update = a + 3 c = a + 10 f = pfunc([], c, givens={a: (a + 10)}) assert f() == 21 assert f() == 34
def test_NanGuardMode(): # Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans # intentionally. A working implementation should be able to capture all # the abnormalties. rng = np.random.default_rng(2482) x = matrix() w = shared(rng.standard_normal((5, 7)).astype(config.floatX)) y = dot(x, w) fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True)) a = rng.standard_normal((3, 5)).astype(config.floatX) with pytest.warns(RuntimeWarning): infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 5)) nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 5)) biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 5)) fun(a) # normal values # Temporarily silence logger _logger = logging.getLogger("aesara.compile.nanguardmode") try: _logger.propagate = False with pytest.raises(AssertionError): fun(infa) # INFs with pytest.raises(AssertionError), pytest.warns(RuntimeWarning): fun(nana) # NANs with pytest.raises(AssertionError): fun(biga) # big values finally: _logger.propagate = True # slices a = rng.standard_normal((3, 4, 5)).astype(config.floatX) with pytest.warns(RuntimeWarning): infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 4, 5)) nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 4, 5)) biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 4, 5)) x = tensor3() y = x[:, at.arange(2), at.arange(2), None] fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True)) fun(a) # normal values try: _logger.propagate = False with pytest.raises(AssertionError): fun(infa) # INFs with pytest.raises(AssertionError), pytest.warns(RuntimeWarning): fun(nana) # NANs with pytest.raises(AssertionError): fun(biga) # big values finally: _logger.propagate = True
def test_givens_replaces_shared_variable(self): a = shared(1.0, "a") a.default_update = a + 3.0 b = tensor.dscalar("b") c = a + 10 f = pfunc([b], c, givens={a: b}) assert len(f.maker.fgraph.inputs) == 1 assert len(f.maker.fgraph.outputs) == 1
def test_default_updates_partial_graph(self): a = shared(0) a.default_update = a + 1 # Increment a each time it is used b = 2 * a # Use only the tip of the graph, a is not used f = pfunc([b], b) assert a.get_value() == 0 f(21) assert a.get_value() == 0
def test_no_shared_as_input(self): # Test that shared variables cannot be used as function inputs. w_init = np.random.rand(2, 2) w = shared(w_init.copy(), "w") with pytest.raises( TypeError, match=r"^Cannot use a shared variable \(w\) as explicit input" ): pfunc([w], aesara.tensor.sum(w * w))
def test_default_updates_expressions(self): x = shared(0) y = shared(1) a = lscalar("a") z = a * x x.default_update = x + y f1 = pfunc([a], z) f1(12) assert x.get_value() == 1 f2 = pfunc([a], z, no_default_updates=True) assert f2(7) == 7 assert x.get_value() == 1 f3 = pfunc([a], z, no_default_updates=[x]) assert f3(9) == 9 assert x.get_value() == 1
def test_check_for_aliased_inputs(self): b = np.random.rand(5, 4) s1 = shared(b) s2 = shared(b) x1 = vector() # Assert cases we should not check for aliased inputs for d in [ dict(outputs=[s1 + 1]), dict(outputs=[s1 + 1, s2 + 3]), dict(outputs=[s1 + 1], updates=[(s2, s2 + 3)]), dict(inputs=[x1], outputs=[x1 + 1], updates=[(s2, s2 + 3)]), ]: if "inputs" not in d: d["inputs"] = [] f = function(**d) assert not f._check_for_aliased_inputs, d # Assert cases we should check for aliased inputs for d in [ dict( inputs=[In(x1, borrow=True)], outputs=[x1 + 1], updates=[(s2, s2 + 3)], ), dict( inputs=[In(x1, borrow=True, mutable=True)], outputs=[x1 + 1], updates=[(s2, s2 + 3)], ), dict( inputs=[In(x1, mutable=True)], outputs=[x1 + 1], updates=[(s2, s2 + 3)], ), ]: if "inputs" not in d: d["inputs"] = [] f = function(**d) assert f._check_for_aliased_inputs, d
def test_update(self): # Test update mechanism in different settings. # Simple value assignment. x = shared(0) assign = pfunc([], [], updates={x: 3}) assign() assert x.get_value() == 3 # Basic increment function. x.set_value(0) inc = pfunc([], [], updates={x: x + 1}) inc() assert x.get_value() == 1 # Increment by a constant value. x.set_value(-1) y = shared(2) inc_by_y = pfunc([], [], updates={x: x + y}) inc_by_y() assert x.get_value() == 1
def test_shared_with_constant_input(self): """Make sure that a constant input can be given to an `OpFromGraph` instance.""" x = at.scalar("x") y = shared(1.0, name="y") test_ofg = OpFromGraph([x], [x + y]) assert test_ofg.shared_inputs == [y] out = test_ofg(at.as_tensor(1.0, dtype=config.floatX)) out_fn = function([], out) assert np.array_equal(out_fn(), 2.0)
def test_update_err_broadcast(self): # Test that broadcastable dimensions raise error data = np.random.rand(10, 10).astype("float32") output_var = shared(name="output", value=data) # the update_var has type matrix, and the update expression # is a broadcasted scalar, and that should be allowed. with pytest.raises(TypeError): aesara.function( inputs=[], outputs=[], updates={output_var: output_var.sum().dimshuffle("x", "x")}, )
def test_default_updates_input(self): x = shared(0) y = shared(1) if aesara.configdefaults.python_int_bitwidth() == 32: a = iscalar("a") else: a = lscalar("a") x.default_update = y y.default_update = y + a f1 = pfunc([], x, no_default_updates=True) f1() assert x.get_value() == 0 assert y.get_value() == 1 f2 = pfunc([], x, no_default_updates=[x]) f2() assert x.get_value() == 0 assert y.get_value() == 1 f3 = pfunc([], x, no_default_updates=[y]) f3() assert x.get_value() == 1 assert y.get_value() == 1 f4 = pfunc([a], x) f4(2) assert x.get_value() == 1 assert y.get_value() == 3 f5 = pfunc([], x, updates={y: (y - 1)}) f5() assert x.get_value() == 3 assert y.get_value() == 2 # a is needed as input if y.default_update is used with pytest.raises(aesara.gof.MissingInputError): pfunc([], x)
def test_default_updates_chained(self): x = shared(2) y = shared(1) z = shared(-1) x.default_update = x - y y.default_update = z z.default_update = z - 1 f1 = pfunc([], [x]) f1() assert x.get_value() == 1 assert y.get_value() == -1 assert z.get_value() == -2 f2 = pfunc([], [x, y]) f2() assert x.get_value() == 2 assert y.get_value() == -2 assert z.get_value() == -3 f3 = pfunc([], [y]) f3() assert x.get_value() == 2 assert y.get_value() == -3 assert z.get_value() == -4 f4 = pfunc([], [x, y], no_default_updates=[x]) f4() assert x.get_value() == 2 assert y.get_value() == -4 assert z.get_value() == -5 f5 = pfunc([], [x, y, z], no_default_updates=[z]) f5() assert x.get_value() == 6 assert y.get_value() == -5 assert z.get_value() == -5
def test_default_container(self): # Ensure it is possible to (implicitly) use a shared variable in a # function, as a 'state' that can be updated at will. rng = np.random.RandomState(1827) w_init = rng.rand(5) w = shared(w_init.copy(), "w") reg = aesara.tensor.sum(w * w) f = pfunc([], reg) assert f() == np.sum(w_init * w_init) # Change the value of w and ensure the output changes accordingly. w.set_value(w.get_value(borrow=True) + 1.0, borrow=True) assert f() == np.sum((w_init + 1)**2)
def test_update_same(self): # There was a bug in CVM, triggered when a shared variable # was its own update expression. a = shared(1.0, "a") b = shared(np.ones((2, 3)), "b") # The order of the variables is not determined, so we try # both shared variables. # TODO: explain the above comment. By "not determined" does # this mean "not deterministic"? # This test originally wrote the updates using dictionaries, # and iterating over the dictionary was not deterministic. # Is that all the comment above meant, or is the CVM intended # to add extra non-determinism? Or is the CVM meant to # deterministically but arbitrarily pick an order for the updates? f = aesara.function([], [], updates=[(a, a), (b, (2 * b))]) g = aesara.function([], [], updates=[(a, (a * 2)), (b, b)]) f() assert a.get_value(borrow=True).shape == (), a.get_value() assert b.get_value(borrow=True).shape == (2, 3), b.get_value() g() assert a.get_value(borrow=True).shape == (), a.get_value() assert b.get_value(borrow=True).shape == (2, 3), b.get_value()
def test_in_update_shared(self): # Test that using both In() with updates and shared variables with # updates in the same function behaves as expected shared_var = shared(1.0) a = dscalar("a") a_wrapped = In(a, value=0.0, update=shared_var) f = function([a_wrapped], [], updates={shared_var: a}, mode="FAST_RUN") # Ensure that, through the executions of the function, the state of # the input and the shared variable are appropriate (after N execution, # the values have swapped N times). This allows testing that the # changes occur at the same time and one doesn't overwrite the other. for i in range(5): f() assert np.allclose(shared_var.get_value(), i % 2)