def test_givens_input_var(self): # Ensure error is raised when trying to replace an input variable. x = tt.scalar("x") y = x * 2 with pytest.raises(RuntimeError): function([x], y, givens={x: x + 1})
def test_no_inplace(self): # Test that when not running inplace, the RandomState is not updated rf = RandomFunction("uniform", tensor.dvector) rng_R = random_state_type() post_r, out = rf(rng_R, (3, ), 0.0, 1.0) f = compile.function([rng_R], [post_r, out]) rng = np.random.RandomState(utt.fetch_seed()) rng0, val0 = f(rng) rng_ = np.random.RandomState(utt.fetch_seed()) # rng should still be in a fresh state assert rng_R.type.values_eq(rng, rng_) # rng0 should be in an updated state assert not rng_R.type.values_eq(rng, rng0) f2 = compile.function( [compile.In(rng_R, value=rng, update=post_r, mutable=False)], [post_r, out]) rng2, val2 = f2() # rng should be in a fresh state assert rng_R.type.values_eq(rng, rng_) # rng2 should be in an updated state assert not rng_R.type.values_eq(rng, rng2) # The updated state should be the same for both functions assert rng_R.type.values_eq(rng2, rng0) rng3, val3 = f2() # rng2 should not have changed assert rng_R.type.values_eq(rng2, rng0) # rng3 should be an updated again version of rng2 assert not rng_R.type.values_eq(rng3, rng2) assert not rng_R.type.values_eq(rng3, rng)
def test_disconnected_input(self): a = tt.scalar("a") v = tt.vector("v") with pytest.raises(UnusedInputError): function([a, v], v * 2) function([a, v], v * 2, on_unused_input="ignore")
def test_masked_input(self): m = tt.matrix("m") mt = m.T mt.name = "m.T" with pytest.raises(UnusedInputError): function([m, mt], mt * 2) function([m, mt], mt * 2, on_unused_input="ignore")
def test_shared_state0(self): a = tt.scalar() # the a is for 'anonymous' (un-named). x, s = tt.scalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) g = function( [ x, In(a, value=1.0, name="a"), In(s, value=f.container[s], update=s - a * x, mutable=True), ], s + a * x, ) f(1, 2) assert f[s] == 2 assert g[s] == 2 g(1, 2) assert f[s] == 0 assert g[s] == 0
def test_borrow_output(self): a = tt.dmatrix() f = function([a], Out(a, borrow=False)) o = np.ones((3, 3)) assert o is not f(o) # function no longer permits aliasing outputs to inputs f = function([a], Out(a * 4, borrow=False)) o = np.ones((3, 3)) four = f(o) assert np.all(four == 4) f(o + 0.1) # should not clobber the memory used to store four assert np.all(four == 4) f = function( [a], Out(a * 4, borrow=True), mode=aesara.Mode("c|py_nogc", "fast_run") ) o = np.ones((3, 3)) four = f(o) assert np.all(four == 4) f(o + 0.1) # should clobber the memory used to store four if aesara.config.cxx: assert not np.all(four == 4) else: # The Elemwise.perform method don't reuse memory # as some numpy version don't support that correctly. assert np.all(four == 4)
def test_shared_state2(self): a = tt.scalar() # the a is for 'anonymous' (un-named). x, s = tt.scalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=False), ], s + a * x, ) g = function( [x, In(a, value=1.0, name="a"), In(s, value=f.container[s])], s + a * x ) f(1, 2) assert f[s] == 2 assert g[s] == 2 f(1, 2) assert f[s] == 4 assert g[s] == 4 g(1, 2) # has no effect on state assert f[s] == 4 assert g[s] == 4
def test_binomial_vector(self): rng_R = random_state_type() n = tensor.lvector() prob = tensor.vector() post_r, out = binomial(rng_R, n=n, p=prob) assert out.ndim == 1 f = compile.function([rng_R, n, prob], [post_r, out], accept_inplace=True) n_val = [1, 2, 3] prob_val = np.asarray([0.1, 0.2, 0.3], dtype=config.floatX) rng = np.random.RandomState(utt.fetch_seed()) numpy_rng = np.random.RandomState(utt.fetch_seed()) # Arguments of size (3,) rng0, val0 = f(rng, n_val, prob_val) numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val) assert np.all(val0 == numpy_val0) # arguments of size (2,) rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1]) numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1]) assert np.all(val1 == numpy_val1) # Specifying the size explicitly g = compile.function( [rng_R, n, prob], binomial(rng_R, n=n, p=prob, size=(3, )), accept_inplace=True, ) rng2, val2 = g(rng1, n_val, prob_val) numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3, )) assert np.all(val2 == numpy_val2) with pytest.raises(ValueError): g(rng2, n_val[:-1], prob_val[:-1])
def __init__(self): a = tt.scalar() # the a is for 'anonymous' (un-named). x, s = tt.scalars("xs") v = tt.vector("v") self.s = s self.x = x self.v = v self.e = a * x + s self.f1 = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) self.f2 = function( [ x, In(a, value=1.0, name="a"), In(s, value=self.f1.container[s], update=s + a * x, mutable=True), ], s + a * x, )
def test_vector_arguments(self): rng_R = random_state_type() low = tensor.vector() post_r, out = uniform(rng_R, low=low, high=1) assert out.ndim == 1 f = compile.function([rng_R, low], [post_r, out], accept_inplace=True) def as_floatX(thing): return np.asarray(thing, dtype=aesara.config.floatX) rng_state0 = np.random.RandomState(utt.fetch_seed()) numpy_rng = np.random.RandomState(utt.fetch_seed()) post0, val0 = f(rng_state0, [-5, 0.5, 0, 1]) post1, val1 = f(post0, as_floatX([0.9])) numpy_val0 = as_floatX(numpy_rng.uniform(low=[-5, 0.5, 0, 1], high=1)) numpy_val1 = as_floatX(numpy_rng.uniform(low=as_floatX([0.9]), high=1)) assert np.all(val0 == numpy_val0) assert np.all(val1 == numpy_val1) high = tensor.vector() post_rb, outb = uniform(rng_R, low=low, high=high) assert outb.ndim == 1 fb = compile.function([rng_R, low, high], [post_rb, outb], accept_inplace=True) post0b, val0b = fb(post1, [-4.0, -2], [-1, 0]) post1b, val1b = fb(post0b, [-4.0], [-1]) numpy_val0b = as_floatX(numpy_rng.uniform(low=[-4.0, -2], high=[-1, 0])) numpy_val1b = as_floatX(numpy_rng.uniform(low=[-4.0], high=[-1])) assert np.all(val0b == numpy_val0b) assert np.all(val1b == numpy_val1b) with pytest.raises(ValueError): fb(post1b, [-4.0, -2], [-1, 0, 1]) # TODO: do we want that? # with pytest.raises(ValueError): # fb(post1b, [-4., -2], [-1]) size = tensor.lvector() post_rc, outc = uniform(rng_R, low=low, high=high, size=size, ndim=1) fc = compile.function([rng_R, low, high, size], [post_rc, outc], accept_inplace=True) post0c, val0c = fc(post1b, [-4.0, -2], [-1, 0], [2]) post1c, val1c = fc(post0c, [-4.0], [-1], [1]) numpy_val0c = as_floatX(numpy_rng.uniform(low=[-4.0, -2], high=[-1, 0])) numpy_val1c = as_floatX(numpy_rng.uniform(low=[-4.0], high=[-1])) assert np.all(val0c == numpy_val0c) assert np.all(val1c == numpy_val1c) with pytest.raises(ValueError): fc(post1c, [-4.0, -2], [-1, 0], [1, 2]) with pytest.raises(ValueError): fc(post1c, [-4.0, -2], [-1, 0], [2, 1]) with pytest.raises(ValueError): fc(post1c, [-4.0, -2], [-1, 0], [1]) with pytest.raises(ValueError): fc(post1c, [-4.0, -2], [-1], [1])
def test_empty_givens_updates(): # Regression test for bug fixed in 8625e03. # Empty givens / updates dictionaries were not properly detected before, # triggering useless crashes at compile time. x = tt.scalar() y = x * 2 function([aesara.In(x)], y, givens={}) function([aesara.In(x)], y, updates={})
def test_deepcopy_trust_input(self): a = tt.dscalar() # the a is for 'anonymous' (un-named). x, s = tt.dscalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) f.trust_input = True try: g = copy.deepcopy(f) except NotImplementedError as e: if e[0].startswith("DebugMode is not picklable"): return else: raise assert f.trust_input is g.trust_input f(np.asarray(2.0)) with pytest.raises( (ValueError, AttributeError, aesara.compile.debugmode.InvalidValueError) ): f(2.0) g(np.asarray(2.0)) with pytest.raises( (ValueError, AttributeError, aesara.compile.debugmode.InvalidValueError) ): g(2.0)
def test_none(self): fn = function([], None) # ok rval = fn() assert ( rval != [] ), "See #254: Using None as function output leads to [] return value" assert rval is None
def test_copy(self): a = tt.scalar() # the a is for 'anonymous' (un-named). x, s = tt.scalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) g = copy.copy(f) # if they both return, assume that they return equivalent things. assert g.container[x].storage is not f.container[x].storage assert g.container[a].storage is not f.container[a].storage assert g.container[s].storage is not f.container[s].storage assert g.value[a] is f.value[a] # should not have been copied assert ( g.value[s] is not f.value[s] ) # should have been copied because it is mutable. assert not (g.value[s] != f.value[s]).any() # its contents should be identical assert f(2, 1) == g( 2 ) # they should be in sync, default value should be copied. assert f(2, 1) == g( 2 ) # they should be in sync, default value should be copied. f(1, 2) # put them out of sync assert f(1, 2) != g(1, 2) # they should not be equal anymore.
def test_lop_override(self, cls_ofg): x = tt.vector() y = 1.0 / (1.0 + tt.exp(-x)) def lop_ov(inps, outs, grads): (y_, ) = outs (dedy_, ) = grads return [2.0 * y_ * (1.0 - y_) * dedy_] y_, dedy = tt.vector(), tt.vector() op_lop_ov = cls_ofg([x, y_, dedy], [2.0 * y_ * (1.0 - y_) * dedy]) xx = tt.vector() yy1 = tt.sum(tt.nnet.sigmoid(xx)) gyy1 = 2.0 * tt.grad(yy1, xx) for ov in [lop_ov, op_lop_ov]: op = cls_ofg([x], [y], lop_overrides=ov) yy2 = tt.sum(op(xx)) gyy2 = tt.grad(yy2, xx) fn = function([xx], [gyy1, gyy2]) xval = np.random.rand(32).astype(config.floatX) y1val, y2val = fn(xval) assert np.allclose(y1val, y2val)
def test_mixed_shape(self): # Test when the provided shape is a tuple of ints and scalar vars rng_R = random_state_type() shape0 = tensor.lscalar() shape = (shape0, 3) post_r, u = uniform(rng_R, size=shape, ndim=2) f = compile.function([rng_R, shape0], u) rng_state0 = np.random.RandomState(utt.fetch_seed()) assert f(rng_state0, 2).shape == (2, 3) assert f(rng_state0, 8).shape == (8, 3) post_r, v = uniform(rng_R, size=shape) g = compile.function([rng_R, shape0], v) assert g(rng_state0, 2).shape == (2, 3) assert g(rng_state0, 8).shape == (8, 3)
def test_poisson(self): # Test that raw_random.poisson generates the same results as numpy. # Check over two calls to see if the random state is correctly updated. rng_R = random_state_type() # Use non-default parameters, and larger dimensions because of # the integer nature of the result post_r, out = poisson(rng_R, lam=5, size=(11, 8)) f = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_r, mutable=True, ) ], [out], accept_inplace=True, ) numpy_rng = np.random.RandomState(utt.fetch_seed()) val0 = f() val1 = f() numpy_val0 = numpy_rng.poisson(5, size=(11, 8)) numpy_val1 = numpy_rng.poisson(5, size=(11, 8)) assert np.allclose(val0, numpy_val0) assert np.allclose(val1, numpy_val1)
def test_multinomial(self): # Test that raw_random.multinomial generates the same results as numpy. # Check over two calls to see if the random state is correctly updated. rng_R = random_state_type() post_r, out = multinomial(rng_R, (7, 3), 6, [0.2] * 5) f = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_r, mutable=True, ) ], [out], accept_inplace=True, ) numpy_rng = np.random.RandomState(utt.fetch_seed()) (val0, ) = f() (val1, ) = f() numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3)) numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3)) assert np.all(val0 == numpy_val0) assert np.all(val1 == numpy_val1) assert val0.shape == (7, 3, 5) assert val1.shape == (7, 3, 5)
def test_binomial(self): # Test that raw_random.binomial generates the same results as numpy. # Check over two calls to see if the random state is correctly updated. rng_R = random_state_type() # Use non-default parameters, and larger dimensions because of # the integer nature of the result post_r, bin = binomial(rng_R, (7, 12), 5, 0.8) f = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_r, mutable=True, ) ], [bin], accept_inplace=True, ) numpy_rng = np.random.RandomState(utt.fetch_seed()) val0 = f() val1 = f() numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12)) numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12)) assert np.all(val0 == numpy_val0) assert np.all(val1 == numpy_val1)
def test_normal(self): # Test that raw_random.normal generates the same results as numpy. # Check over two calls to see if the random state is correctly updated. rng_R = random_state_type() # Use non-default parameters post_r, out = normal(rng_R, (2, 3), 4.0, 2.0) f = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_r, mutable=True, ) ], [out], accept_inplace=True, ) numpy_rng = np.random.RandomState(utt.fetch_seed()) val0 = f() val1 = f() numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3)) numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3)) assert np.allclose(val0, numpy_val0) assert np.allclose(val1, numpy_val1)
def test_random_function_ndim(self): # Test that random_function helper function accepts argument ndim rng_R = random_state_type() # ndim is an optional argument indicating the length of the 'shape' # ndim not specified, OK post_out4, out4 = uniform(rng_R, (4, )) # ndim specified, consistent with shape, OK post_out1_4, out1_4 = uniform(rng_R, (4, ), ndim=1) post_out2_4_4, out2_4_4 = uniform(rng_R, (4, 4), ndim=2) # ndim specified, but not compatible with shape with pytest.raises(ValueError): uniform(rng_R, (4, ), ndim=2) f_ok = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_out2_4_4, mutable=True, ) ], [out4, out1_4, out2_4_4], accept_inplace=True, ) # The correct cases should execute properly o4, o1_4, o2_4_4 = f_ok() # Check the sanity of the answers assert np.allclose(o4, o1_4) assert np.allclose(o4, o2_4_4[0])
def test_inplace_optimization(self): # Test that FAST_RUN includes the random_make_inplace optimization # inplace = False rf2 = RandomFunction(np.random.RandomState.uniform, tensor.dvector) rng_R = random_state_type() # If calling RandomFunction directly, all args have to be specified, # because shape will have to be moved to the end post_r2, out2 = rf2(rng_R, (4, ), 0.0, 1.0) f = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_r2, mutable=True, ) ], out2, mode="FAST_RUN", ) # DEBUG_MODE can't pass the id-based # test below # test that the RandomState object stays the same from function call to # function call, but that the values returned change from call to call. id0 = id(f[rng_R]) val0 = f() assert id0 == id(f[rng_R]) val1 = f() assert id0 == id(f[rng_R]) assert not np.allclose(val0, val1)
def test_random_function_noshape_args(self): # Test if random_function helper works with args but without shape rng_R = random_state_type() # No shape, default args -> OK post_out, out = uniform(rng_R, size=None, ndim=2) f = compile.function( [ compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_out, mutable=True, ) ], [out], accept_inplace=True, ) (o, ) = f() # No shape, args that have to be broadcasted -> OK low = tensor.TensorType(dtype="float64", broadcastable=(False, True, True))() high = tensor.TensorType(dtype="float64", broadcastable=(True, True, True, False))() post_out2, out2 = uniform(rng_R, size=None, ndim=2, low=low, high=high) assert out2.ndim == 4 assert out2.broadcastable == (True, False, True, False) g = compile.function( [ low, high, compile.In( rng_R, value=np.random.RandomState(utt.fetch_seed()), update=post_out2, mutable=True, ), ], [out2], accept_inplace=True, ) low_v = [[[3]], [[4]], [[-5]]] high_v = [[[[5, 8]]]] (o2, ) = g(low_v, high_v) assert o2.shape == (1, 3, 1, 2)
def test_normal_vector(self): rng_R = random_state_type() avg = tensor.vector() std = tensor.vector() post_r, out = normal(rng_R, avg=avg, std=std) assert out.ndim == 1 f = compile.function([rng_R, avg, std], [post_r, out], accept_inplace=True) def as_floatX(thing): return np.asarray(thing, dtype=aesara.config.floatX) avg_val = [1, 2, 3] std_val = as_floatX([0.1, 0.2, 0.3]) rng = np.random.RandomState(utt.fetch_seed()) numpy_rng = np.random.RandomState(utt.fetch_seed()) # Arguments of size (3,) rng0, val0 = f(rng, avg_val, std_val) numpy_val0 = as_floatX( numpy_rng.normal(loc=as_floatX(avg_val), scale=as_floatX(std_val))) assert np.all(val0 == numpy_val0) # arguments of size (2,) rng1, val1 = f(rng0, avg_val[:-1], std_val[:-1]) numpy_val1 = np.asarray( numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1]), dtype=aesara.config.floatX, ) assert np.all(val1 == numpy_val1) # Specifying the size explicitly g = compile.function( [rng_R, avg, std], normal(rng_R, avg=avg, std=std, size=(3, )), accept_inplace=True, ) rng2, val2 = g(rng1, avg_val, std_val) numpy_val2 = np.asarray( numpy_rng.normal(loc=avg_val, scale=std_val, size=(3, )), dtype=aesara.config.floatX, ) assert np.all(val2 == numpy_val2) with pytest.raises(ValueError): g(rng2, avg_val[:-1], std_val[:-1])
def test_multinomial_vector(self): rng_R = random_state_type() n = tensor.lvector() pvals = tensor.matrix() post_r, out = multinomial(rng_R, n=n, pvals=pvals) assert out.ndim == 2 f = compile.function([rng_R, n, pvals], [post_r, out], accept_inplace=True) n_val = [1, 2, 3] pvals_val = [[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]] pvals_val = np.asarray(pvals_val, dtype=config.floatX) rng = np.random.RandomState(utt.fetch_seed()) numpy_rng = np.random.RandomState(utt.fetch_seed()) # Arguments of size (3,) rng0, val0 = f(rng, n_val, pvals_val) numpy_val0 = np.asarray([ numpy_rng.multinomial(n=nv, pvals=pv) for nv, pv in zip(n_val, pvals_val) ]) assert np.all(val0 == numpy_val0) # arguments of size (2,) rng1, val1 = f(rng0, n_val[:-1], pvals_val[:-1]) numpy_val1 = np.asarray([ numpy_rng.multinomial(n=nv, pvals=pv) for nv, pv in zip(n_val[:-1], pvals_val[:-1]) ]) assert np.all(val1 == numpy_val1) # Specifying the size explicitly g = compile.function( [rng_R, n, pvals], multinomial(rng_R, n=n, pvals=pvals, size=(3, )), accept_inplace=True, ) rng2, val2 = g(rng1, n_val, pvals_val) numpy_val2 = np.asarray([ numpy_rng.multinomial(n=nv, pvals=pv) for nv, pv in zip(n_val, pvals_val) ]) assert np.all(val2 == numpy_val2) with pytest.raises(ValueError): g(rng2, n_val[:-1], pvals_val[:-1])
def test_shared_state_not_implicit(self): # This test is taken from the documentation in # doc/topics/function.txt. If it does not pass anymore and yet the # behavior is still intended the doc and the test should both be # updated accordingly. x, s = tt.scalars("xs") inc = function([x, In(s, update=(s + x), value=10.0)], []) dec = function( [x, In(s, update=(s - x), value=inc.container[s], implicit=False)], [] ) assert dec[s] is inc[s] inc[s] = 2 assert dec[s] == 2 dec(1) assert inc[s] == 1 dec(1, 0) assert inc[s] == -1 assert dec[s] == -1
def test_shared_grad(self, cls_ofg): x, y, z = tt.matrices("xyz") s = shared(np.random.rand(2, 2).astype(config.floatX)) e = x + y * z + s op = cls_ofg([x, y, z], [e]) f = op(x, y, z) f = f - tt.grad(tt.sum(f), y) fn = function([x, y, z], f) xv = np.ones((2, 2), dtype=config.floatX) yv = np.ones((2, 2), dtype=config.floatX) * 3 zv = np.ones((2, 2), dtype=config.floatX) * 5 assert np.allclose(11.0 + s.get_value(), fn(xv, yv, zv)) # grad again the shared variable f = op(x, y, z) f = f - tt.grad(tt.sum(f), s) fn = function([x, y, z], f) assert np.allclose(15.0 + s.get_value(), fn(xv, yv, zv))
def test_random_integers_vector(self): rng_R = random_state_type() low = tensor.lvector() high = tensor.lvector() post_r, out = random_integers(rng_R, low=low, high=high) assert out.ndim == 1 f = compile.function([rng_R, low, high], [post_r, out], accept_inplace=True) low_val = [100, 200, 300] high_val = [110, 220, 330] rng = np.random.RandomState(utt.fetch_seed()) numpy_rng = np.random.RandomState(utt.fetch_seed()) # Arguments of size (3,) rng0, val0 = f(rng, low_val, high_val) numpy_val0 = np.asarray([ numpy_rng.randint(low=lv, high=hv + 1) for lv, hv in zip(low_val, high_val) ]) assert np.all(val0 == numpy_val0) # arguments of size (2,) rng1, val1 = f(rng0, low_val[:-1], high_val[:-1]) numpy_val1 = np.asarray([ numpy_rng.randint(low=lv, high=hv + 1) for lv, hv in zip(low_val[:-1], high_val[:-1]) ]) assert np.all(val1 == numpy_val1) # Specifying the size explicitly g = compile.function( [rng_R, low, high], random_integers(rng_R, low=low, high=high, size=(3, )), accept_inplace=True, ) rng2, val2 = g(rng1, low_val, high_val) numpy_val2 = np.asarray([ numpy_rng.randint(low=lv, high=hv + 1) for lv, hv in zip(low_val, high_val) ]) assert np.all(val2 == numpy_val2) with pytest.raises(ValueError): g(rng2, low_val[:-1], high_val[:-1])
def t(): f = function( [ In(a, name={"adsf", ()}, value=1.0), In(x, name=(), value=2.0), In(s, name=tt.scalar(), value=3.0), ], a + x + s, ) return f
def test_deepcopy(self): a = tt.scalar() # the a is for 'anonymous' (un-named). x, s = tt.scalars("xs") f = function( [ x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x, mutable=True), ], s + a * x, ) try: g = copy.deepcopy(f) except NotImplementedError as e: if e[0].startswith("DebugMode is not picklable"): return else: raise # if they both return, assume that they return equivalent things. # print [(k,id(k)) for k in f.finder.keys()] # print [(k,id(k)) for k in g.finder.keys()] assert g.container[0].storage is not f.container[0].storage assert g.container[1].storage is not f.container[1].storage assert g.container[2].storage is not f.container[2].storage assert x not in g.container assert x not in g.value assert len(f.defaults) == len(g.defaults) assert f._check_for_aliased_inputs is g._check_for_aliased_inputs assert f.name == g.name assert f.maker.fgraph.name == g.maker.fgraph.name # print 'f.defaults = %s' % (f.defaults, ) # print 'g.defaults = %s' % (g.defaults, ) for ((f_req, f_feed, f_val), (g_req, g_feed, g_val)) in zip( f.defaults, g.defaults ): assert f_req == g_req and f_feed == g_feed and f_val == g_val assert g.value[1] is not f.value[1] # should not have been copied assert ( g.value[2] is not f.value[2] ) # should have been copied because it is mutable. assert not (g.value[2] != f.value[2]).any() # its contents should be identical assert f(2, 1) == g( 2 ) # they should be in sync, default value should be copied. assert f(2, 1) == g( 2 ) # they should be in sync, default value should be copied. f(1, 2) # put them out of sync assert f(1, 2) != g(1, 2) # they should not be equal anymore. g(1, 2) # put them back in sync assert f(3) == g(3) # They should be in sync again.