예제 #1
0
def test_RandomVariable_bcast():
    rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True)

    mu = tensor(config.floatX, [True, False, False])
    mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX)
    sd = tensor(config.floatX, [False, False])
    sd.tag.test_value = np.ones((2, 3)).astype(config.floatX)

    s1 = iscalar()
    s1.tag.test_value = 1
    s2 = iscalar()
    s2.tag.test_value = 2
    s3 = iscalar()
    s3.tag.test_value = 3
    s3 = Assert("testing")(s3, eq(s1, 1))

    res = rv(mu, sd, size=(s1, s2, s3))
    assert res.broadcastable == (False, ) * 3

    size = aet.as_tensor((1, 2, 3), dtype=np.int32).astype(np.int64)
    res = rv(mu, sd, size=size)
    assert res.broadcastable == (True, False, False)

    res = rv(0, 1, size=aet.as_tensor(1, dtype=np.int64))
    assert res.broadcastable == (True, )
예제 #2
0
 def make_node(self, x, shp):
     x = aet.as_tensor_variable(x)
     shp_orig = shp
     shp = aet.as_tensor_variable(shp, ndim=1)
     if not (shp.dtype in int_dtypes or
             (isinstance(shp, TensorConstant) and shp.data.size == 0)):
         # It raises an error if shp is not of integer type,
         # except when shp is constant and empty
         # (in this case, shp.dtype does not matter anymore).
         raise TypeError("Shape must be integers", shp, shp.dtype)
     assert shp.ndim == 1
     if isinstance(shp, TensorConstant):
         bcast = [s == 1 for s in shp.data]
         return Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
     else:
         bcasts = [False] * self.ndim
         shp_list = shp_orig
         if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
             shp_list = [shp_orig]
         for index in range(self.ndim):
             y = shp_list[index]
             y = aet.as_tensor_variable(y)
             # Try to see if we can infer that y has a constant value of 1.
             # If so, that dimension should be broadcastable.
             try:
                 bcasts[index] = (hasattr(y, "get_scalar_constant_value")
                                  and y.get_scalar_constant_value() == 1)
             except NotScalarConstantError:
                 pass
         return Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
예제 #3
0
 def manual_setup_method(self, dtype="float64"):
     # This tests can run even when aesara.config.blas__ldflags is empty.
     self.dtype = dtype
     self.mode = aesara.compile.get_default_mode().including("fast_run")
     self.A = tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor(dtype=dtype, broadcastable=())
     self.x = tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
예제 #4
0
 def test_numpy_2d(self):
     for shp0 in [(2, 3)]:
         x = tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
         a = np.asarray(self.rng.rand(*shp0)).astype(config.floatX)
         for shp1 in [(6, 7)]:
             if len(shp0) + len(shp1) == 2:
                 continue
             y = tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
             f = function([x, y], kron(x, y))
             b = self.rng.rand(*shp1).astype(config.floatX)
             out = f(a, b)
             assert np.allclose(out, np.kron(a, b))
예제 #5
0
 def setup_method(self):
     self.mode = aesara.compile.get_default_mode()
     self.mode = self.mode.including("fast_run")
     self.mode = self.mode.excluding("c_blas")  # c_blas trumps scipy Ops
     dtype = self.dtype = "float64"  # optimization isn't dtype-dependent
     self.A = tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor(dtype=dtype, broadcastable=())
     self.x = tensor(dtype=dtype, broadcastable=(False, ))
     self.y = tensor(dtype=dtype, broadcastable=(False, ))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
예제 #6
0
    def setup_method(self):
        self.mode = mode_with_gpu
        dtype = self.dtype = "float32"  # optimization isn't dtype-dependent
        self.A = tensor(dtype=dtype, broadcastable=(False, False))
        self.a = tensor(dtype=dtype, broadcastable=())
        self.x = tensor(dtype=dtype, broadcastable=(False, ))
        self.y = tensor(dtype=dtype, broadcastable=(False, ))
        self.ger_destructive = gpuger_inplace

        # data on the gpu make the op always inplace
        self.ger = gpuger_inplace
        self.gemm = gpugemm_inplace
        super().setup_method()
예제 #7
0
    def test_argtopk_nd(self, shp, k_, dtype, sorted, idx_dtype):
        ndim = len(shp)
        for axis in range(-ndim, ndim):
            if isinstance(k_, str):
                k = eval(k_.replace("n", str(shp[axis])))
            else:
                k = k_

            if k == 0:
                continue

            x = tensor(name="x",
                       broadcastable=(False, ) * len(shp),
                       dtype=dtype)
            y = argtopk(x, k, axis=axis, sorted=sorted, idx_dtype=idx_dtype)
            fn = aesara.function([x], y, mode=self.mode)
            assert any([
                isinstance(n.op, self.op_class)
                for n in fn.maker.fgraph.apply_nodes
            ])
            size = reduce(int.__mul__, shp)
            xval = gen_unique_vector(size, dtype).reshape(shp)
            yval = fn(xval)
            idx = slice(-k, None) if k > 0 else slice(-k)
            l = axis % ndim
            r = ndim - l
            idx = (slice(None), ) * l + (idx, ) + (slice(None), ) * (r - 1)
            goal = np.argsort(xval, axis=axis)[idx].astype(idx_dtype)

            assert np.all(np.sort(yval, axis=axis) == np.sort(goal, axis=axis))
예제 #8
0
def test_advinc_subtensor1_dtype():
    # Test the mixed dtype case
    shp = (3, 4)
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("uint64", "int8"),
        ("int64", "uint8"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.empty((2,) + shp[1:], dtype=dtype2)
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor(dtype=yval.dtype, broadcastable=(False,) * len(yval.shape), name="y")
        expr = advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (
            sum(
                [
                    isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
                    for node in f.maker.fgraph.toposort()
                ]
            )
            == 1
        )
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [[0, 2]], yval)
        assert np.allclose(rval, rep)
예제 #9
0
    def setup_method(self):
        # This tests can run even when aesara.config.blas__ldflags is empty.
        dtype = "float64"
        self.dtype = dtype
        self.mode = aesara.compile.get_default_mode().including("fast_run")
        # matrix
        self.A = tensor(dtype=dtype, shape=(False, False))
        self.Aval = np.ones((2, 3), dtype=dtype)

        # vector
        self.x = tensor(dtype=dtype, shape=(False,))
        self.y = tensor(dtype=dtype, shape=(False,))
        self.xval = np.asarray([1, 2], dtype=dtype)
        self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)

        # scalar
        self.a = tensor(dtype=dtype, shape=())
예제 #10
0
 def test_perform(self):
     for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
         x = tensor(dtype="floatX", shape=(False,) * len(shp0))
         a = np.asarray(self.rng.random(shp0)).astype(config.floatX)
         for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
             if len(shp0) + len(shp1) == 2:
                 continue
             y = tensor(dtype="floatX", shape=(False,) * len(shp1))
             f = function([x, y], kron(x, y))
             b = self.rng.random(shp1).astype(config.floatX)
             out = f(a, b)
             # Newer versions of scipy want 4 dimensions at least,
             # so we have to add a dimension to a and flatten the result.
             if len(shp0) + len(shp1) == 3:
                 scipy_val = scipy.linalg.kron(a[np.newaxis, :], b).flatten()
             else:
                 scipy_val = scipy.linalg.kron(a, b)
             utt.assert_allclose(out, scipy_val)
예제 #11
0
파일: io.py 프로젝트: Sayam753/Theano-PyMC
 def make_node(self):
     return Apply(
         self,
         [],
         [
             Variable(Generic()),
             tensor(self.dtype, shape=self.broadcastable),
         ],
     )
예제 #12
0
def test_local_dimshuffle_subtensor():

    dimshuffle_subtensor = out2in(local_dimshuffle_subtensor)

    x = dtensor4("x")
    x = aet.patternbroadcast(x, (False, True, False, False))
    i = iscalar("i")

    out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see".
    x = tensor(broadcastable=(False, True, False), dtype="float64")
    out = x[i].dimshuffle(1)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see" but
    # have in between dimensions.
    x = tensor(broadcastable=(False, True, False, True), dtype="float64")
    out = x[i].dimshuffle(1)

    f = aesara.function([x, i], out)

    topo = f.maker.fgraph.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])
    assert f(np.random.rand(5, 1, 4, 1), 2).shape == (4, )

    # Test a corner case that had Aesara return a bug.
    x = dtensor4("x")
    x = aet.patternbroadcast(x, (False, True, False, False))

    assert x[:, :, 0:3, ::-1].dimshuffle(0, 2, 3).eval({
        x: np.ones((5, 1, 6, 7))
    }).shape == (5, 3, 7)
예제 #13
0
def test_RandomVariable_bcast_specify_shape():
    rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True)

    s1 = aet.as_tensor(1, dtype=np.int64)
    s2 = iscalar()
    s2.tag.test_value = 2
    s3 = iscalar()
    s3.tag.test_value = 3
    s3 = Assert("testing")(s3, eq(s1, 1))

    size = specify_shape(aet.as_tensor([s1, s3, s2, s2, s1]), (5, ))
    mu = tensor(config.floatX, [False, False, True])
    mu.tag.test_value = np.random.normal(size=(2, 2, 1)).astype(config.floatX)

    std = tensor(config.floatX, [False, True, True])
    std.tag.test_value = np.ones((2, 1, 1)).astype(config.floatX)

    res = rv(mu, std, size=size)
    assert res.broadcastable == (True, False, False, False, True)
예제 #14
0
    def test_infer_shape(self):
        a = tensor(config.floatX, [False, True, False])
        shape = list(a.shape)
        out = self.op(a, shape)

        self._compile_and_check(
            [a] + shape,
            [out],
            [np.random.rand(2, 1, 3).astype(config.floatX), 2, 1, 3],
            self.op_class,
        )

        a = tensor(config.floatX, [False, True, False])
        shape = [iscalar() for i in range(4)]
        self._compile_and_check(
            [a] + shape,
            [self.op(a, shape)],
            [np.random.rand(2, 1, 3).astype(config.floatX), 6, 2, 5, 3],
            self.op_class,
        )
예제 #15
0
파일: test_jax.py 프로젝트: mgorny/aesara
def test_jax_Dimshuffle():
    a_at = matrix("a")

    x = a_at.T
    x_fg = FunctionGraph([a_at], [x])
    compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX)])

    x = a_at.dimshuffle([0, 1, "x"])
    x_fg = FunctionGraph([a_at], [x])
    compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0], [3.0, 4.0]].astype(config.floatX)])

    a_at = tensor(dtype=config.floatX, shape=[False, True])
    x = a_at.dimshuffle((0,))
    x_fg = FunctionGraph([a_at], [x])
    compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0, 3.0, 4.0]].astype(config.floatX)])

    a_at = tensor(dtype=config.floatX, shape=[False, True])
    x = at_elemwise.DimShuffle([False, True], (0,))(a_at)
    x_fg = FunctionGraph([a_at], [x])
    compare_jax_and_py(x_fg, [np.c_[[1.0, 2.0, 3.0, 4.0]].astype(config.floatX)])
예제 #16
0
def numba_funcify_CAReduce(op, node, **kwargs):
    axes = op.axis
    if axes is None:
        axes = list(range(node.inputs[0].ndim))

    if hasattr(op, "acc_dtype") and op.acc_dtype is not None:
        acc_dtype = op.acc_dtype
    else:
        acc_dtype = node.outputs[0].type.dtype

    np_acc_dtype = np.dtype(acc_dtype)

    scalar_op_identity = np.asarray(op.scalar_op.identity, dtype=np_acc_dtype)

    acc_dtype = numba.np.numpy_support.from_dtype(np_acc_dtype)

    scalar_nfunc_spec = op.scalar_op.nfunc_spec

    # We construct a dummy `Apply` that has the minimum required number of
    # inputs for the scalar `Op`.  Without this, we would get a scalar function
    # with too few arguments.
    dummy_node = Apply(
        op,
        [tensor(acc_dtype, [False]) for i in range(scalar_nfunc_spec[1])],
        [tensor(acc_dtype, [False]) for o in range(scalar_nfunc_spec[2])],
    )
    elemwise_fn = numba_funcify_Elemwise(op,
                                         dummy_node,
                                         use_signature=True,
                                         **kwargs)

    input_name = get_name_for_object(node.inputs[0])
    ndim = node.inputs[0].ndim
    careduce_fn = create_multiaxis_reducer(elemwise_fn,
                                           scalar_op_identity,
                                           axes,
                                           ndim,
                                           acc_dtype,
                                           input_name=input_name)

    return numba.njit(careduce_fn)
예제 #17
0
def test_mvnormal_ShapeFeature():
    M_aet = iscalar("M")
    M_aet.tag.test_value = 2

    d_rv = multivariate_normal(aet.ones((M_aet, )), aet.eye(M_aet), size=2)

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )

    s1, s2 = fg.shape_feature.shape_of[d_rv]

    assert get_test_value(s1) == 2
    assert M_aet in graph_inputs([s2])

    # Test broadcasted shapes
    mean = tensor(config.floatX, [True, False])
    mean.tag.test_value = np.array([[0, 1, 2]], dtype=config.floatX)

    test_covar = np.diag(np.array([1, 10, 100], dtype=config.floatX))
    test_covar = np.stack([test_covar, test_covar * 10.0])
    cov = aet.as_tensor(test_covar).type()
    cov.tag.test_value = test_covar

    d_rv = multivariate_normal(mean, cov, size=[2, 3])

    fg = FunctionGraph(
        [i for i in graph_inputs([d_rv]) if not isinstance(i, Constant)],
        [d_rv],
        clone=False,
        features=[ShapeFeature()],
    )

    s1, s2, s3, s4 = fg.shape_feature.shape_of[d_rv]

    assert s1.get_test_value() == 2
    assert s2.get_test_value() == 3
    assert s3.get_test_value() == 2
    assert s4.get_test_value() == 3
예제 #18
0
    def test_combined_infer_shape(self, shp, k_):
        ndim = len(shp)
        for axis in range(-ndim, ndim):
            if isinstance(k_, str):
                k = eval(k_.replace("n", str(shp[axis])))
            else:
                k = k_

            if k == 0:
                continue

            x = tensor(name="x",
                       broadcastable=(False, ) * len(shp),
                       dtype=aesara.config.floatX)
            yv, yi = topk_and_argtopk(x,
                                      k,
                                      axis=axis,
                                      sorted=False,
                                      idx_dtype="int32")
            size = reduce(int.__mul__, shp)
            xval = gen_unique_vector(size, aesara.config.floatX).reshape(shp)
            self._compile_and_check([x], [yv, yi], [xval], TopKOp)
예제 #19
0
def test_incsub_f16():
    shp = (3, 3)
    shared = gpuarray_shared_constructor
    xval = np.arange(np.prod(shp), dtype="float16").reshape(shp) + 1
    yval = np.empty((2,) + shp[1:], dtype="float16")
    yval[:] = 2
    x = shared(xval, name="x")
    y = tensor(dtype="float16", broadcastable=(False,) * len(shp), name="y")
    expr = advanced_inc_subtensor1(x, y, [0, 2])
    f = aesara.function([y], expr, mode=mode_with_gpu)
    assert (
        sum(
            [
                isinstance(node.op, GpuAdvancedIncSubtensor1)
                for node in f.maker.fgraph.toposort()
            ]
        )
        == 1
    )
    rval = f(yval)
    rep = xval.copy()
    np.add.at(rep, [[0, 2]], yval)
    assert np.allclose(rval, rep)

    expr = inc_subtensor(x[1:], y)
    f = aesara.function([y], expr, mode=mode_with_gpu)
    assert (
        sum(
            [isinstance(node.op, GpuIncSubtensor) for node in f.maker.fgraph.toposort()]
        )
        == 1
    )
    rval = f(yval)
    rep = xval.copy()
    rep[1:] += yval
    assert np.allclose(rval, rep)
예제 #20
0
def test_advinc_subtensor1_vector_scalar():
    # Test the case where x is a vector and y a scalar
    shp = (3,)
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
        ("int8", "int8"),
        ("int16", "int16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.asarray(10, dtype=dtype2)
        x = shared(xval, name="x")
        y = tensor(dtype=yval.dtype, broadcastable=(False,) * len(yval.shape), name="y")
        expr = advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)

        assert (
            sum(
                [
                    isinstance(
                        node.op,
                        (GpuAdvancedIncSubtensor1_dev20, GpuAdvancedIncSubtensor1),
                    )
                    for node in f.maker.fgraph.toposort()
                ]
            )
            == 1
        )
        rval = f(yval)
        rep = xval.copy()
        rep[[0, 2]] += yval
        assert np.allclose(rval, rep)
예제 #21
0
def test_advinc_subtensor1():
    # Test the second case in the opt local_gpu_advanced_incsubtensor1
    for shp in [(3, 3), (3, 3, 3)]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype="float32").reshape(shp) + 1
        yval = np.empty((2,) + shp[1:], dtype="float32")
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor(dtype="float32", broadcastable=(False,) * len(shp), name="y")
        expr = advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (
            sum(
                [
                    isinstance(node.op, GpuAdvancedIncSubtensor1)
                    for node in f.maker.fgraph.toposort()
                ]
            )
            == 1
        )
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [0, 2], yval)
        assert np.allclose(rval, rep)
예제 #22
0
    def test_infer_shape(self):
        image = dtensor4()
        maxout = dtensor4()
        gz = dtensor4()
        rng = np.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2))

        image_val = rng.rand(4, 6, 7, 9)
        out_shapes = [
            [
                [[4, 6, 7, 9], [4, 6, 7, 9]],
                [[4, 6, 3, 4], [4, 6, 4, 5]],
                [[4, 6, 2, 3], [4, 6, 3, 3]],
                [[4, 6, 3, 3], [4, 6, 4, 3]],
                [[4, 6, 2, 4], [4, 6, 3, 5]],
            ],
            [
                [None, None],
                [[4, 6, 4, 5], None],
                [[4, 6, 3, 3], None],
                [[4, 6, 4, 3], None],
                [[4, 6, 3, 5], None],
            ],
            [
                [None, None],
                [None, None],
                [[4, 6, 3, 4], None],
                [[4, 6, 4, 4], None],
                [None, None],
            ],
        ]

        for i, maxpoolshp in enumerate(maxpoolshps):
            for j, ignore_border in enumerate([True, False]):
                for k, pad in enumerate([(0, 0), (1, 1), (1, 2)]):
                    if out_shapes[k][i][j] is None:
                        continue
                    # checking shapes generated by Pool
                    self._compile_and_check(
                        [image],
                        [
                            Pool(ignore_border=ignore_border)(
                                image, maxpoolshp, pad=pad)
                        ],
                        [image_val],
                        Pool,
                    )

                    # checking shapes generated by MaxPoolGrad
                    maxout_val = rng.rand(*out_shapes[k][i][j])
                    gz_val = rng.rand(*out_shapes[k][i][j])
                    self._compile_and_check(
                        [image, maxout, gz],
                        [
                            MaxPoolGrad(ignore_border=ignore_border)(
                                image, maxout, gz, maxpoolshp, pad=pad)
                        ],
                        [image_val, maxout_val, gz_val],
                        MaxPoolGrad,
                        warn=False,
                    )
        # checking with broadcastable input
        image = tensor(dtype="float64",
                       broadcastable=(False, False, True, True))
        image_val = rng.rand(4, 6, 1, 1)
        self._compile_and_check(
            [image],
            [Pool(ignore_border=True)(image, (2, 2), pad=(0, 0))],
            [image_val],
            Pool,
        )
예제 #23
0
 def test_static_shape(self):
     x = tensor(np.float64, shape=(1, 2), name="x")
     y = x.dimshuffle([0, 1, "x"])
     assert y.type.shape == (1, 2, 1)
예제 #24
0
def test_broadcast_params():

    ndims_params = [0, 0]

    mean = np.array([0, 1, 2])
    cov = np.array(1e-6)
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], np.broadcast_to(cov, (3, )))

    ndims_params = [1, 2]

    mean = np.r_[1, 2, 3]
    cov = np.stack([np.eye(3) * 1e-5, np.eye(3) * 1e-4])
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], np.broadcast_to(mean, (2, 3)))
    assert np.array_equal(res[1], cov)

    mean = np.stack([np.r_[0, 0, 0], np.r_[1, 1, 1]])
    cov = np.arange(3 * 3).reshape((3, 3))
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], np.broadcast_to(cov, (2, 3, 3)))

    mean = np.stack([np.r_[0, 0, 0], np.r_[1, 1, 1]])
    cov = np.stack([
        np.arange(3 * 3).reshape((3, 3)),
        np.arange(3 * 3).reshape((3, 3)) * 10
    ])
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], cov)

    mean = np.array([[1, 2, 3]])
    cov = np.stack([np.eye(3) * 1e-5, np.eye(3) * 1e-4])
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], np.array([[1, 2, 3], [1, 2, 3]]))
    assert np.array_equal(res[1], cov)

    mean = np.array([[0], [10], [100]])
    cov = np.diag(np.array([1e-6]))
    params = [mean, cov]
    res = broadcast_params(params, ndims_params)
    assert np.array_equal(res[0], mean)
    assert np.array_equal(res[1], np.broadcast_to(cov, (3, 1, 1)))

    # Try it in Aesara
    with config.change_flags(compute_test_value="raise"):
        mean = tensor(config.floatX, [False, True])
        mean.tag.test_value = np.array([[0], [10], [100]], dtype=config.floatX)
        cov = matrix()
        cov.tag.test_value = np.diag(np.array([1e-6], dtype=config.floatX))
        params = [mean, cov]
        res = broadcast_params(params, ndims_params)
        assert np.array_equal(res[0].get_test_value(), mean.get_test_value())
        assert np.array_equal(res[1].get_test_value(),
                              np.broadcast_to(cov.get_test_value(), (3, 1, 1)))
예제 #25
0
파일: io.py 프로젝트: Sayam753/Theano-PyMC
 def make_node(self, path):
     if isinstance(path, str):
         path = Constant(Generic(), path)
     return Apply(self, [path],
                  [tensor(self.dtype, shape=self.broadcastable)])
예제 #26
0
파일: io.py 프로젝트: Sayam753/Theano-PyMC
 def make_node(self, request, data):
     return Apply(
         self,
         [request, data],
         [tensor(data.dtype, shape=data.broadcastable)],
     )
예제 #27
0
def test_RandomVariable_basics():

    str_res = str(
        RandomVariable(
            "normal",
            0,
            [0, 0],
            config.floatX,
            inplace=True,
        ))

    assert str_res == "normal_rv"

    # `ndims_params` should be a `Sequence` type
    with raises(TypeError, match="^Parameter ndims_params*"):
        RandomVariable(
            "normal",
            0,
            0,
            config.floatX,
            inplace=True,
        )

    # `size` should be a `Sequence` type
    with raises(TypeError, match="^Parameter size*"):
        RandomVariable(
            "normal",
            0,
            [0, 0],
            config.floatX,
            inplace=True,
        )(0, 1, size={1, 2})

    # No dtype
    with raises(TypeError, match="^dtype*"):
        RandomVariable(
            "normal",
            0,
            [0, 0],
            inplace=True,
        )(0, 1)

    # Confirm that `inplace` works
    rv = RandomVariable(
        "normal",
        0,
        [0, 0],
        "normal",
        inplace=True,
    )

    assert rv.inplace
    assert rv.destroy_map == {0: [3]}

    # A no-params `RandomVariable`
    rv = RandomVariable(name="test_rv", ndim_supp=0, ndims_params=())

    with raises(TypeError):
        rv.make_node(rng=1)

    # `RandomVariable._infer_shape` should handle no parameters
    rv_shape = rv._infer_shape(aet.constant([]), (), [])
    assert rv_shape.equals(aet.constant([], dtype="int64"))

    # Integer-specificed `dtype`
    dtype_1 = all_dtypes[1]
    rv_node = rv.make_node(None, None, 1)
    rv_out = rv_node.outputs[1]
    rv_out.tag.test_value = 1

    assert rv_out.dtype == dtype_1

    with raises(NullTypeGradError):
        grad(rv_out, [rv_node.inputs[0]])

    rv = RandomVariable("normal", 0, [0, 0], config.floatX, inplace=True)

    mu = tensor(config.floatX, [True, False, False])
    mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX)
    sd = tensor(config.floatX, [False, False])
    sd.tag.test_value = np.ones((2, 3)).astype(config.floatX)

    s1 = iscalar()
    s1.tag.test_value = 1
    s2 = iscalar()
    s2.tag.test_value = 2
    s3 = iscalar()
    s3.tag.test_value = 3
    s3 = Assert("testing")(s3, eq(s1, 1))

    res = rv.compute_bcast([mu, sd], (s1, s2, s3))
    assert res == [False] * 3