def test_RandomVariable_bcast(): rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True) mu = tensor(config.floatX, [True, False, False]) mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX) sd = tensor(config.floatX, [False, False]) sd.tag.test_value = np.ones((2, 3)).astype(config.floatX) s1 = iscalar() s1.tag.test_value = 1 s2 = iscalar() s2.tag.test_value = 2 s3 = iscalar() s3.tag.test_value = 3 s3 = Assert("testing")(s3, eq(s1, 1)) res = rv(mu, sd, size=(s1, s2, s3)) assert res.broadcastable == (False, ) * 3 size = aet.as_tensor((1, 2, 3), dtype=np.int32).astype(np.int64) res = rv(mu, sd, size=size) assert res.broadcastable == (True, False, False) res = rv(0, 1, size=aet.as_tensor(1, dtype=np.int64)) assert res.broadcastable == (True, )
def test_RandomVariable_floatX(): test_rv_op = RandomVariable( "normal", 0, [0, 0], "floatX", inplace=True, ) assert test_rv_op.dtype == "floatX" assert test_rv_op(0, 1).dtype == config.floatX new_floatX = "float64" if config.floatX == "float32" else "float32" with config.change_flags(floatX=new_floatX): assert test_rv_op(0, 1).dtype == new_floatX
def test_RandomVariable_bcast_specify_shape(): rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True) s1 = aet.as_tensor(1, dtype=np.int64) s2 = iscalar() s2.tag.test_value = 2 s3 = iscalar() s3.tag.test_value = 3 s3 = Assert("testing")(s3, eq(s1, 1)) size = specify_shape(aet.as_tensor([s1, s3, s2, s2, s1]), (5, )) mu = tensor(config.floatX, [False, False, True]) mu.tag.test_value = np.random.normal(size=(2, 2, 1)).astype(config.floatX) std = tensor(config.floatX, [False, True, True]) std.tag.test_value = np.ones((2, 1, 1)).astype(config.floatX) res = rv(mu, std, size=size) assert res.broadcastable == (True, False, False, False, True)
def test_RandomVariable_basics(): str_res = str( RandomVariable( "normal", 0, [0, 0], "float32", inplace=True, )) assert str_res == "normal_rv{0, (0, 0), float32, True}" # `ndims_params` should be a `Sequence` type with raises(TypeError, match="^Parameter ndims_params*"): RandomVariable( "normal", 0, 0, config.floatX, inplace=True, ) # `size` should be a `Sequence` type with raises(TypeError, match="^Parameter size*"): RandomVariable( "normal", 0, [0, 0], config.floatX, inplace=True, )(0, 1, size={1, 2}) # No dtype with raises(TypeError, match="^dtype*"): RandomVariable( "normal", 0, [0, 0], inplace=True, )(0, 1) # Confirm that `inplace` works rv = RandomVariable( "normal", 0, [0, 0], "normal", inplace=True, ) assert rv.inplace assert rv.destroy_map == {0: [0]} # A no-params `RandomVariable` rv = RandomVariable(name="test_rv", ndim_supp=0, ndims_params=()) with raises(TypeError): rv.make_node(rng=1) # `RandomVariable._infer_shape` should handle no parameters rv_shape = rv._infer_shape(aet.constant([]), (), []) assert rv_shape.equals(aet.constant([], dtype="int64")) # Integer-specificed `dtype` dtype_1 = all_dtypes[1] rv_node = rv.make_node(None, None, 1) rv_out = rv_node.outputs[1] rv_out.tag.test_value = 1 assert rv_out.dtype == dtype_1 with raises(NullTypeGradError): grad(rv_out, [rv_node.inputs[0]])
def test_RandomVariable_basics(): str_res = str( RandomVariable( "normal", 0, [0, 0], config.floatX, inplace=True, )) assert str_res == "normal_rv" # `ndims_params` should be a `Sequence` type with raises(TypeError, match="^Parameter ndims_params*"): RandomVariable( "normal", 0, 0, config.floatX, inplace=True, ) # `size` should be a `Sequence` type with raises(TypeError, match="^Parameter size*"): RandomVariable( "normal", 0, [0, 0], config.floatX, inplace=True, )(0, 1, size={1, 2}) # No dtype with raises(TypeError, match="^dtype*"): RandomVariable( "normal", 0, [0, 0], inplace=True, )(0, 1) # Confirm that `inplace` works rv = RandomVariable( "normal", 0, [0, 0], "normal", inplace=True, ) assert rv.inplace assert rv.destroy_map == {0: [3]} # A no-params `RandomVariable` rv = RandomVariable(name="test_rv", ndim_supp=0, ndims_params=()) with raises(TypeError): rv.make_node(rng=1) # `RandomVariable._infer_shape` should handle no parameters rv_shape = rv._infer_shape(aet.constant([]), (), []) assert rv_shape.equals(aet.constant([], dtype="int64")) # Integer-specificed `dtype` dtype_1 = all_dtypes[1] rv_node = rv.make_node(None, None, 1) rv_out = rv_node.outputs[1] rv_out.tag.test_value = 1 assert rv_out.dtype == dtype_1 with raises(NullTypeGradError): grad(rv_out, [rv_node.inputs[0]]) rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True) mu = tensor(config.floatX, [True, False, False]) mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX) sd = tensor(config.floatX, [False, False]) sd.tag.test_value = np.ones((2, 3)).astype(config.floatX) s1 = iscalar() s1.tag.test_value = 1 s2 = iscalar() s2.tag.test_value = 2 s3 = iscalar() s3.tag.test_value = 3 s3 = Assert("testing")(s3, eq(s1, 1)) res = rv.compute_bcast([mu, sd], (s1, s2, s3)) assert res == [False] * 3