Ejemplo n.º 1
0
def random_ranged(min, max, shape, rng=None):
    if rng is None:
        rng = np.random.default_rng(seed=utt.fetch_seed())
    return np.asarray(rng.random(shape) * (max - min) + min,
                      dtype=config.floatX)
Ejemplo n.º 2
0
    def test_permutation_helper(self):
        # Test that raw_random.permutation_helper generates the same
        # results as numpy,
        # and that the 'ndim_added' keyword behaves correctly.

        # permutation_helper needs "ndim_added=1", because its output
        # is one dimension more than its "shape" argument (and there's
        # no way to determine that automatically).
        # Check the working case, over two calls to see if the random
        # state is correctly updated.
        rf = RandomFunction(permutation_helper,
                            tensor.imatrix,
                            8,
                            ndim_added=1)
        rng_R = random_state_type()
        post_r, out = rf(rng_R, (7, ), 8)

        f = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r,
                    mutable=True,
                )
            ],
            [out],
            accept_inplace=True,
        )

        numpy_rng = np.random.RandomState(utt.fetch_seed())
        val0 = f()
        val1 = f()
        # numpy_rng.permutation outputs one vector at a time,
        # so we call it iteratively to generate all the samples.
        numpy_val0 = np.asarray([numpy_rng.permutation(8) for i in range(7)])
        numpy_val1 = np.asarray([numpy_rng.permutation(8) for i in range(7)])
        assert np.all(val0 == numpy_val0)
        assert np.all(val1 == numpy_val1)

        # This call lacks "ndim_added=1", so ndim_added defaults to 0.
        # A ValueError should be raised.
        rf0 = RandomFunction(permutation_helper, tensor.imatrix, 8)
        post_r0, out0 = rf0(rng_R, (7, ), 8)
        f0 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r0,
                    mutable=True,
                )
            ],
            [out0],
            accept_inplace=True,
        )
        with pytest.raises(ValueError):
            f0()

        # Here, ndim_added is 2 instead of 1. A ValueError should be raised.
        rf2 = RandomFunction(permutation_helper,
                             tensor.imatrix,
                             8,
                             ndim_added=2)
        post_r2, out2 = rf2(rng_R, (7, ), 8)
        f2 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=post_r2,
                    mutable=True,
                )
            ],
            [out2],
            accept_inplace=True,
        )
        with pytest.raises(ValueError):
            f2()
Ejemplo n.º 3
0
def test_det_grad():
    rng = np.random.RandomState(utt.fetch_seed())

    r = rng.randn(5, 5).astype(config.floatX)
    tensor.verify_grad(det, [r], rng=np.random)
Ejemplo n.º 4
0
        def test_specify_shape_partial(self):
            dtype = self.dtype
            if dtype is None:
                dtype = aesara.config.floatX

            rng = np.random.default_rng(utt.fetch_seed())
            x1_1 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x1_1 = self.cast_value(x1_1)
            x1_2 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x1_2 = self.cast_value(x1_2)
            x2 = np.asarray(rng.uniform(1, 2, [5, 2]), dtype=dtype)
            x2 = self.cast_value(x2)

            # Test that we can replace with values of the same shape
            x1_shared = self.shared_constructor(x1_1)
            x1_specify_shape = specify_shape(
                x1_shared,
                (aet.as_tensor_variable(x1_1.shape[0]), x1_shared.shape[1]),
            )
            x1_shared.set_value(x1_2)
            assert np.allclose(self.ref_fct(x1_shared.get_value(borrow=True)),
                               self.ref_fct(x1_2))
            shape_op_fct = aesara.function([], x1_shared.shape)
            topo = shape_op_fct.maker.fgraph.toposort()
            shape_op_fct()
            if aesara.config.mode != "FAST_COMPILE":
                assert len(topo) == 3
                assert isinstance(topo[0].op, Shape_i)
                assert isinstance(topo[1].op, Shape_i)
                assert isinstance(topo[2].op, MakeVector)

            # Test that we forward the input
            specify_shape_fct = aesara.function([], x1_specify_shape)
            specify_shape_fct()
            # aesara.printing.debugprint(specify_shape_fct)
            assert np.all(
                self.ref_fct(specify_shape_fct()) == self.ref_fct(x1_2))
            topo_specify = specify_shape_fct.maker.fgraph.toposort()
            if aesara.config.mode != "FAST_COMPILE":
                assert len(topo_specify) == 4

            # Test that we put the shape info into the graph
            shape_constant_fct = aesara.function([], x1_specify_shape.shape)
            # aesara.printing.debugprint(shape_constant_fct)
            assert np.all(shape_constant_fct() == shape_op_fct())
            topo_cst = shape_constant_fct.maker.fgraph.toposort()
            if aesara.config.mode != "FAST_COMPILE":
                assert len(topo_cst) == 2

            # Test that we can replace with values of the different shape
            # but that will raise an error in some case, but not all
            x1_shared.set_value(x2)
            with pytest.raises(AssertionError):
                specify_shape_fct()

            # No assertion will be raised as the Op is removed from the graph
            if aesara.config.mode not in [
                    "FAST_COMPILE", "DebugMode", "DEBUG_MODE"
            ]:
                shape_constant_fct()
            else:
                with pytest.raises(AssertionError):
                    shape_constant_fct()
Ejemplo n.º 5
0
    def test_infer_shape(self):
        rng_R = random_state_type()
        rng_R_val = np.random.RandomState(utt.fetch_seed())

        # no shape specified, default args
        post_r, out = uniform(rng_R)
        self._compile_and_check([rng_R], [out], [rng_R_val], RandomFunction)

        post_r, out = uniform(rng_R, size=None, ndim=2)
        self._compile_and_check([rng_R], [out], [rng_R_val], RandomFunction)
        """
        #infer_shape don't work for multinomial.
        #The parameter ndim_added is set to 1 and in this case, the infer_shape
        #inplementation don't know how to infer the shape
        post_r, out = multinomial(rng_R)

        self._compile_and_check([rng_R], [out], [rng_R_val],
                                RandomFunction)
        """

        # no shape specified, args have to be broadcasted
        low = tensor.TensorType(dtype="float64",
                                broadcastable=(False, True, True))()
        high = tensor.TensorType(dtype="float64",
                                 broadcastable=(True, True, True, False))()
        post_r, out = uniform(rng_R, size=None, ndim=2, low=low, high=high)
        low_val = [[[3]], [[4]], [[-5]]]
        high_val = [[[[5, 8]]]]
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        # multinomial, specified shape
        """
        #infer_shape don't work for multinomial
        n = iscalar()
        pvals = dvector()
        size_val = (7, 3)
        n_val = 6
        pvals_val = [0.2] * 5
        post_r, out = multinomial(rng_R, size=size_val, n=n, pvals=pvals,
                                  ndim=2)

        self._compile_and_check([rng_R, n, pvals], [out],
                                [rng_R_val, n_val, pvals_val],
                                RandomFunction)
        """

        # uniform vector low and high
        low = dvector()
        high = dvector()
        post_r, out = uniform(rng_R, low=low, high=1)
        low_val = [-5, 0.5, 0, 1]
        self._compile_and_check([rng_R, low], [out], [rng_R_val, low_val],
                                RandomFunction)

        low_val = [0.9]
        self._compile_and_check([rng_R, low], [out], [rng_R_val, low_val],
                                RandomFunction)

        post_r, out = uniform(rng_R, low=low, high=high)
        low_val = [-4.0, -2]
        high_val = [-1, 0]
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        low_val = [-4.0]
        high_val = [-1]
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        # uniform broadcasting low and high
        low = dvector()
        high = dcol()
        post_r, out = uniform(rng_R, low=low, high=high)
        low_val = [-5, 0.5, 0, 1]
        high_val = [[1.0]]
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        low_val = [0.9]
        high_val = [[1.0], [1.1], [1.5]]
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        low_val = [-5, 0.5, 0, 1]
        high_val = [[1.0], [1.1], [1.5]]
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        # uniform with vector slice
        low = dvector()
        high = dvector()
        post_r, out = uniform(rng_R, low=low, high=high)
        low_val = [0.1, 0.2, 0.3]
        high_val = [1.1, 2.2, 3.3]
        size_val = (3, )
        self._compile_and_check(
            [rng_R, low, high],
            [out],
            [rng_R_val, low_val[:-1], high_val[:-1]],
            RandomFunction,
        )

        # uniform with explicit size and size implicit in parameters
        # NOTE 1: Would it be desirable that size could also be supplied
        # as a Theano variable?
        post_r, out = uniform(rng_R, size=size_val, low=low, high=high)
        self._compile_and_check([rng_R, low, high], [out],
                                [rng_R_val, low_val, high_val], RandomFunction)

        # binomial with vector slice
        n = ivector()
        prob = dvector()
        post_r, out = binomial(rng_R, n=n, p=prob)
        n_val = [1, 2, 3]
        prob_val = [0.1, 0.2, 0.3]
        size_val = (3, )
        self._compile_and_check(
            [rng_R, n, prob],
            [out],
            [rng_R_val, n_val[:-1], prob_val[:-1]],
            RandomFunction,
        )

        # binomial with explicit size and size implicit in parameters
        # cf. NOTE 1
        post_r, out = binomial(rng_R, n=n, p=prob, size=size_val)
        self._compile_and_check([rng_R, n, prob], [out],
                                [rng_R_val, n_val, prob_val], RandomFunction)

        # normal with vector slice
        avg = dvector()
        std = dvector()
        post_r, out = normal(rng_R, avg=avg, std=std)
        avg_val = [1, 2, 3]
        std_val = [0.1, 0.2, 0.3]
        size_val = (3, )
        self._compile_and_check(
            [rng_R, avg, std],
            [out],
            [rng_R_val, avg_val[:-1], std_val[:-1]],
            RandomFunction,
        )

        # normal with explicit size and size implicit in parameters
        # cf. NOTE 1
        post_r, out = normal(rng_R, avg=avg, std=std, size=size_val)
        self._compile_and_check([rng_R, avg, std], [out],
                                [rng_R_val, avg_val, std_val], RandomFunction)

        # multinomial with tensor-3 probabilities
        """
Ejemplo n.º 6
0
    def test_gpu3_mixture_dtype_outputs(self):
        def f_rnn(u_t, x_tm1, W_in, W):
            return (u_t * W_in + x_tm1 * W,
                    theano.tensor.cast(u_t + x_tm1, "int64"))

        u = theano.tensor.fvector("u")
        x0 = theano.tensor.fscalar("x0")
        W_in = theano.tensor.fscalar("win")
        W = theano.tensor.fscalar("w")
        output, updates = theano.scan(
            f_rnn,
            u,
            [x0, None],
            [W_in, W],
            n_steps=None,
            truncate_gradient=-1,
            go_backwards=False,
            mode=mode_with_gpu,
        )

        f2 = theano.function(
            [u, x0, W_in, W],
            output,
            updates=updates,
            allow_input_downcast=True,
            mode=mode_with_gpu,
        )

        # get random initial values
        rng = np.random.RandomState(utt.fetch_seed())
        v_u = rng.uniform(size=(4, ), low=-5.0, high=5.0)
        v_x0 = rng.uniform()
        W = rng.uniform()
        W_in = rng.uniform()

        # compute the output in numpy
        v_out1 = np.zeros((4, ))
        v_out2 = np.zeros((4, ), dtype="int64")
        v_out1[0] = v_u[0] * W_in + v_x0 * W
        v_out2[0] = v_u[0] + v_x0
        for step in range(1, 4):
            v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
            v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])

        theano_out1, theano_out2 = f2(v_u, v_x0, W_in, W)
        utt.assert_allclose(theano_out1, v_out1)
        utt.assert_allclose(theano_out2, v_out2)

        topo = f2.maker.fgraph.toposort()
        scan_node = [
            node for node in topo
            if isinstance(node.op, theano.scan_module.scan_op.Scan)
        ]
        assert len(scan_node) == 1
        scan_node = scan_node[0]
        assert scan_node.op.gpua

        scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()

        # check that there is no gpu transfer in the inner loop.
        assert not any(
            [isinstance(node.op, HostFromGpu) for node in scan_node_topo])
        assert not any(
            [isinstance(node.op, GpuFromHost) for node in scan_node_topo])
Ejemplo n.º 7
0
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
    TestAlloc,
    TestComparison,
    TestJoinAndSplit,
    TestReshape,
)
from tests.tensor.utils import rand, safe_make_node


pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray

utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())


def inplace_func(
    inputs,
    outputs,
    mode=None,
    allow_input_downcast=False,
    on_unused_input="raise",
    name=None,
):
    if mode is None:
        mode = mode_with_gpu
    return aesara.function(
        inputs,
        outputs,
Ejemplo n.º 8
0
 def setup_method(self):
     super().setup_method()
     self.rng = np.random.RandomState(utt.fetch_seed())
     self.A = matrix(dtype=self.dtype)
     self.X = np.asarray(self.rng.rand(5, 5), dtype=self.dtype)
     self.S = self.X.dot(self.X.T)
Ejemplo n.º 9
0
 def test_make_node(self, A_func, b_func, error_message):
     np.random.default_rng(utt.fetch_seed())
     with pytest.raises(ValueError, match=error_message):
         A = A_func()
         b = b_func()
         SolveBase()(A, b)
Ejemplo n.º 10
0
    def test_DownsampleFactorMax(self):
        rng = np.random.RandomState(utt.fetch_seed())
        # maxpool, input size
        examples = (
            ((2, ), (16, )),
            (
                (2, ),
                (
                    4,
                    16,
                ),
            ),
            (
                (2, ),
                (
                    4,
                    2,
                    16,
                ),
            ),
            ((1, 1), (4, 2, 16, 16)),
            ((2, 2), (4, 2, 16, 16)),
            ((3, 3), (4, 2, 16, 16)),
            ((3, 2), (4, 2, 16, 16)),
            ((3, 2, 2), (3, 2, 16, 16, 16)),
            ((2, 2, 3, 2), (3, 2, 6, 6, 6, 5)),
        )

        for example, ignore_border, mode in product(
                examples,
            [True, False],
            ["max", "sum", "average_inc_pad", "average_exc_pad"],
        ):
            (maxpoolshp, inputsize) = example
            imval = rng.rand(*inputsize)
            images = aesara.shared(imval)

            # Pure Numpy computation
            numpy_output_val = self.numpy_max_pool_nd(imval,
                                                      maxpoolshp,
                                                      ignore_border,
                                                      mode=mode)

            # The pool_2d or pool_3d helper methods
            if len(maxpoolshp) == 2:
                output = pool_2d(images, maxpoolshp, ignore_border, mode=mode)
                f = function(
                    [],
                    [
                        output,
                    ],
                )
                output_val = f()
                utt.assert_allclose(output_val, numpy_output_val)
            elif len(maxpoolshp) == 3:
                output = pool_3d(images, maxpoolshp, ignore_border, mode=mode)
                f = function(
                    [],
                    [
                        output,
                    ],
                )
                output_val = f()
                utt.assert_allclose(output_val, numpy_output_val)

            # Pool op
            maxpool_op = Pool(ndim=len(maxpoolshp),
                              ignore_border=ignore_border,
                              mode=mode)(images, maxpoolshp)

            output_shape = Pool.out_shape(
                imval.shape,
                maxpoolshp,
                ndim=len(maxpoolshp),
                ignore_border=ignore_border,
            )
            utt.assert_allclose(np.asarray(output_shape),
                                numpy_output_val.shape)
            f = function([], maxpool_op)
            output_val = f()
            utt.assert_allclose(output_val, numpy_output_val)
Ejemplo n.º 11
0
    def test_DownsampleFactorMaxStride(self):
        rng = np.random.RandomState(utt.fetch_seed())
        # maxpool, stride, ignore_border, input, output sizes
        examples = (
            ((1, 1), (1, 1), True, (4, 10, 16, 16), (4, 10, 16, 16)),
            ((1, 1), (5, 7), True, (4, 10, 16, 16), (4, 10, 4, 3)),
            ((1, 1), (1, 1), False, (4, 10, 16, 16), (4, 10, 16, 16)),
            ((1, 1), (5, 7), False, (4, 10, 16, 16), (4, 10, 4, 3)),
            ((3, 3), (1, 1), True, (4, 10, 16, 16), (4, 10, 14, 14)),
            ((3, 3), (3, 3), True, (4, 10, 16, 16), (4, 10, 5, 5)),
            ((3, 3), (5, 7), True, (4, 10, 16, 16), (4, 10, 3, 2)),
            ((3, 3), (1, 1), False, (4, 10, 16, 16), (4, 10, 14, 14)),
            ((3, 3), (3, 3), False, (4, 10, 16, 16), (4, 10, 6, 6)),
            ((3, 3), (5, 7), False, (4, 10, 16, 16), (4, 10, 4, 3)),
            ((5, 3), (1, 1), True, (4, 10, 16, 16), (4, 10, 12, 14)),
            ((5, 3), (3, 3), True, (4, 10, 16, 16), (4, 10, 4, 5)),
            ((5, 3), (5, 7), True, (4, 10, 16, 16), (4, 10, 3, 2)),
            ((5, 3), (1, 1), False, (4, 10, 16, 16), (4, 10, 12, 14)),
            ((5, 3), (3, 3), False, (4, 10, 16, 16), (4, 10, 5, 6)),
            ((5, 3), (5, 7), False, (4, 10, 16, 16), (4, 10, 4, 3)),
            ((16, 16), (1, 1), True, (4, 10, 16, 16), (4, 10, 1, 1)),
            ((16, 16), (5, 7), True, (4, 10, 16, 16), (4, 10, 1, 1)),
            ((16, 16), (1, 1), False, (4, 10, 16, 16), (4, 10, 1, 1)),
            ((16, 16), (5, 7), False, (4, 10, 16, 16), (4, 10, 1, 1)),
            ((3, ), (5, ), True, (16, ), (3, )),
            (
                (3, ),
                (5, ),
                True,
                (
                    2,
                    16,
                ),
                (
                    2,
                    3,
                ),
            ),
            (
                (5, ),
                (3, ),
                True,
                (
                    2,
                    3,
                    16,
                ),
                (
                    2,
                    3,
                    4,
                ),
            ),
            ((5, 1, 3), (3, 3, 3), True, (2, 16, 16, 16), (2, 4, 6, 5)),
            ((5, 1, 3), (3, 3, 3), True, (4, 2, 16, 16, 16), (4, 2, 4, 6, 5)),
        )

        for example, mode in product(
                examples,
            ["max", "sum", "average_inc_pad", "average_exc_pad"]):
            (maxpoolshp, stride, ignore_border, inputshp, outputshp) = example
            # generate random images
            imval = rng.rand(*inputshp)
            images = aesara.shared(imval)
            # Pool op
            numpy_output_val = self.numpy_max_pool_nd_stride(
                imval, maxpoolshp, ignore_border, stride, mode)
            assert (
                numpy_output_val.shape == outputshp
            ), f"outshape is {outputshp}, calculated shape is {numpy_output_val.shape}"
            maxpool_op = Pool(ndim=len(maxpoolshp),
                              ignore_border=ignore_border,
                              mode=mode)(images, maxpoolshp, stride)
            f = function([], maxpool_op)
            output_val = f()
            utt.assert_allclose(output_val, numpy_output_val)
Ejemplo n.º 12
0
    def test_infer_shape(self):
        image = dtensor4()
        maxout = dtensor4()
        gz = dtensor4()
        rng = np.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2))

        image_val = rng.rand(4, 6, 7, 9)
        out_shapes = [
            [
                [[4, 6, 7, 9], [4, 6, 7, 9]],
                [[4, 6, 3, 4], [4, 6, 4, 5]],
                [[4, 6, 2, 3], [4, 6, 3, 3]],
                [[4, 6, 3, 3], [4, 6, 4, 3]],
                [[4, 6, 2, 4], [4, 6, 3, 5]],
            ],
            [
                [None, None],
                [[4, 6, 4, 5], None],
                [[4, 6, 3, 3], None],
                [[4, 6, 4, 3], None],
                [[4, 6, 3, 5], None],
            ],
            [
                [None, None],
                [None, None],
                [[4, 6, 3, 4], None],
                [[4, 6, 4, 4], None],
                [None, None],
            ],
        ]

        for i, maxpoolshp in enumerate(maxpoolshps):
            for j, ignore_border in enumerate([True, False]):
                for k, pad in enumerate([(0, 0), (1, 1), (1, 2)]):
                    if out_shapes[k][i][j] is None:
                        continue
                    # checking shapes generated by Pool
                    self._compile_and_check(
                        [image],
                        [
                            Pool(ignore_border=ignore_border)(
                                image, maxpoolshp, pad=pad)
                        ],
                        [image_val],
                        Pool,
                    )

                    # checking shapes generated by MaxPoolGrad
                    maxout_val = rng.rand(*out_shapes[k][i][j])
                    gz_val = rng.rand(*out_shapes[k][i][j])
                    self._compile_and_check(
                        [image, maxout, gz],
                        [
                            MaxPoolGrad(ignore_border=ignore_border)(
                                image, maxout, gz, maxpoolshp, pad=pad)
                        ],
                        [image_val, maxout_val, gz_val],
                        MaxPoolGrad,
                        warn=False,
                    )
        # checking with broadcastable input
        image = tensor(dtype="float64",
                       broadcastable=(False, False, True, True))
        image_val = rng.rand(4, 6, 1, 1)
        self._compile_and_check(
            [image],
            [Pool(ignore_border=True)(image, (2, 2), pad=(0, 0))],
            [image_val],
            Pool,
        )
Ejemplo n.º 13
0
def random_complex128_ranged(min, max, shape, rng=None):
    if rng is None:
        rng = np.random.default_rng(seed=utt.fetch_seed())
    return np.asarray(rng.random(shape) * (max - min) + min,
                      dtype="complex128")
Ejemplo n.º 14
0
def integers_ranged(min, max, shape, rng=None):
    if rng is None:
        rng = np.random.default_rng(seed=utt.fetch_seed())
    return rng.integers(min, max + 1, shape)
Ejemplo n.º 15
0
 def test_grad(self):
     rng = np.random.default_rng(utt.fetch_seed())
     utt.verify_grad(self.op, [self.a[self.idx_sorted], self.b], rng=rng)
Ejemplo n.º 16
0
 def test__repr__(self):
     np.random.default_rng(utt.fetch_seed())
     A = matrix()
     b = matrix()
     y = SolveBase()(A, b)
     assert y.__repr__() == "SolveBase{lower=False, check_finite=True}.0"
Ejemplo n.º 17
0
 def setup_method(self):
     self.rng = np.random.default_rng(utt.fetch_seed())
Ejemplo n.º 18
0
 def setup_method(self):
     self.rng = np.random.RandomState(utt.fetch_seed(666))
Ejemplo n.º 19
0
    def test_one_sequence_one_output_weights_gpu2(self):
        def f_rnn(u_t, x_tm1, W_in, W):
            return u_t * W_in + x_tm1 * W

        u = theano.tensor.fvector("u")
        x0 = theano.tensor.fscalar("x0")
        W_in = theano.tensor.fscalar("win")
        W = theano.tensor.fscalar("w")
        output, updates = theano.scan(
            f_rnn,
            u,
            x0,
            [W_in, W],
            n_steps=None,
            truncate_gradient=-1,
            go_backwards=False,
            mode=mode_with_gpu,
        )

        f2 = theano.function(
            [u, x0, W_in, W],
            output,
            updates=updates,
            allow_input_downcast=True,
            mode=mode_with_gpu,
        )

        # get random initial values
        rng = np.random.RandomState(utt.fetch_seed())
        v_u = rng.uniform(size=(4, ), low=-5.0, high=5.0)
        v_x0 = rng.uniform()
        W = rng.uniform()
        W_in = rng.uniform()

        # compute the output in numpy
        v_out = np.zeros((4, ))
        v_out[0] = v_u[0] * W_in + v_x0 * W
        for step in range(1, 4):
            v_out[step] = v_u[step] * W_in + v_out[step - 1] * W

        theano_values = f2(v_u, v_x0, W_in, W)
        utt.assert_allclose(theano_values, v_out)

        topo = f2.maker.fgraph.toposort()
        assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 1
        assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4

        scan_node = [
            node for node in topo
            if isinstance(node.op, theano.scan_module.scan_op.Scan)
        ]
        assert len(scan_node) == 1
        scan_node = scan_node[0]
        scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()

        # check that there is no gpu transfer in the inner loop.
        assert any(
            [isinstance(node.op, GpuElemwise) for node in scan_node_topo])
        assert not any(
            [isinstance(node.op, HostFromGpu) for node in scan_node_topo])
        assert not any(
            [isinstance(node.op, GpuFromHost) for node in scan_node_topo])
Ejemplo n.º 20
0
    def test_one_sequence_one_output_weights_gpu1(self):
        def f_rnn(u_t, x_tm1, W_in, W):
            return u_t * W_in + x_tm1 * W

        u = fvector("u")
        x0 = fscalar("x0")
        W_in = fscalar("win")
        W = fscalar("w")

        mode = mode_with_gpu.excluding("InputToGpuOptimizer")
        output, updates = scan(
            f_rnn,
            u,
            x0,
            [W_in, W],
            n_steps=None,
            truncate_gradient=-1,
            go_backwards=False,
            mode=mode,
        )

        output = GpuFromHost(test_ctx_name)(output)
        f2 = aesara.function(
            [u, x0, W_in, W],
            output,
            updates=updates,
            allow_input_downcast=True,
            mode=mode,
        )

        rng = np.random.RandomState(utt.fetch_seed())
        v_u = rng.uniform(size=(4, ), low=-5.0, high=5.0)
        v_x0 = rng.uniform()
        W = rng.uniform()
        W_in = rng.uniform()

        v_u = np.asarray(v_u, dtype="float32")
        v_x0 = np.asarray(v_x0, dtype="float32")
        W = np.asarray(W, dtype="float32")
        W_in = np.asarray(W_in, dtype="float32")

        # compute the output in numpy
        v_out = np.zeros((4, ))
        v_out[0] = v_u[0] * W_in + v_x0 * W
        for step in range(1, 4):
            v_out[step] = v_u[step] * W_in + v_out[step - 1] * W

        aesara_values = f2(v_u, v_x0, W_in, W)
        utt.assert_allclose(aesara_values, v_out)

        # TO DEL
        topo = f2.maker.fgraph.toposort()
        scan_node = [
            node for node in topo if isinstance(node.op, scan.op.Scan)
        ]
        assert len(scan_node) == 1
        scan_node = scan_node[0]

        topo = f2.maker.fgraph.toposort()
        assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 0
        assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4

        scan_node = [
            node for node in topo if isinstance(node.op, scan.op.Scan)
        ]
        assert len(scan_node) == 1
        scan_node = scan_node[0]
        scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()

        # check that there is no gpu transfer in the inner loop.
        assert any(
            [isinstance(node.op, GpuElemwise) for node in scan_node_topo])
        assert not any(
            [isinstance(node.op, HostFromGpu) for node in scan_node_topo])
        assert not any(
            [isinstance(node.op, GpuFromHost) for node in scan_node_topo])
Ejemplo n.º 21
0
        def test_specify_shape(self):
            dtype = self.dtype
            if dtype is None:
                dtype = aesara.config.floatX

            rng = np.random.default_rng(utt.fetch_seed())
            x1_1 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x1_1 = self.cast_value(x1_1)
            x1_2 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x1_2 = self.cast_value(x1_2)
            x2 = np.asarray(rng.uniform(1, 2, [4, 3]), dtype=dtype)
            x2 = self.cast_value(x2)

            # Test that we can replace with values of the same shape
            x1_shared = self.shared_constructor(x1_1)
            x1_specify_shape = specify_shape(x1_shared, x1_1.shape)
            x1_shared.set_value(x1_2)
            assert np.allclose(self.ref_fct(x1_shared.get_value(borrow=True)),
                               self.ref_fct(x1_2))
            shape_op_fct = aesara.function([], x1_shared.shape)
            topo = shape_op_fct.maker.fgraph.toposort()
            if aesara.config.mode != "FAST_COMPILE":
                assert len(topo) == 3
                assert isinstance(topo[0].op, Shape_i)
                assert isinstance(topo[1].op, Shape_i)
                assert isinstance(topo[2].op, MakeVector)

            # Test that we forward the input
            specify_shape_fct = aesara.function([], x1_specify_shape)
            assert np.all(
                self.ref_fct(specify_shape_fct()) == self.ref_fct(x1_2))
            topo_specify = specify_shape_fct.maker.fgraph.toposort()
            assert len(topo_specify) == 2

            # Test that we put the shape info into the graph
            shape_constant_fct = aesara.function([], x1_specify_shape.shape)
            assert np.all(shape_constant_fct() == shape_op_fct())
            topo_cst = shape_constant_fct.maker.fgraph.toposort()
            if aesara.config.mode != "FAST_COMPILE":
                assert len(topo_cst) == 1
                topo_cst[0].op == aesara.compile.function.types.deep_copy_op

            # Test that we can take the grad.
            if aesara.sparse.enable_sparse and isinstance(
                    x1_specify_shape.type, aesara.sparse.SparseType):
                # SparseVariable don't support sum for now.
                assert not hasattr(x1_specify_shape, "sum")
            else:
                shape_grad = aesara.gradient.grad(x1_specify_shape.sum(),
                                                  x1_shared)
                shape_constant_fct_grad = aesara.function([], shape_grad)
                # aesara.printing.debugprint(shape_constant_fct_grad)
                shape_constant_fct_grad()

            # Test that we can replace with values of the different shape
            # but that will raise an error in some case, but not all
            specify_shape_fct()
            x1_shared.set_value(x2)
            with pytest.raises(AssertionError):
                specify_shape_fct()

            # No assertion will be raised as the Op is removed from the graph
            # when their is optimization
            if aesara.config.mode not in [
                    "FAST_COMPILE", "DebugMode", "DEBUG_MODE"
            ]:
                shape_constant_fct()
            else:
                with pytest.raises(AssertionError):
                    shape_constant_fct()
Ejemplo n.º 22
0
 def setup_method(self):
     super().setup_method()
     self.rng = np.random.default_rng(utt.fetch_seed())
     self.A = matrix(dtype=self.dtype)
     self.op = svd
Ejemplo n.º 23
0
        def test_specify_shape_inplace(self):
            # test that specify_shape don't break inserting inplace op

            dtype = self.dtype
            if dtype is None:
                dtype = aesara.config.floatX

            rng = np.random.default_rng(utt.fetch_seed())
            a = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            a = self.cast_value(a)
            a_shared = self.shared_constructor(a)
            b = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            b = self.cast_value(b)
            b_shared = self.shared_constructor(b)
            s = np.zeros((40, 40), dtype=dtype)
            s = self.cast_value(s)
            s_shared = self.shared_constructor(s)
            f = aesara.function(
                [],
                updates=[(s_shared,
                          aesara.tensor.dot(a_shared, b_shared) + s_shared)],
            )
            topo = f.maker.fgraph.toposort()
            f()
            # [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
            if aesara.config.mode != "FAST_COMPILE":
                assert (sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1)
                assert all(node.op == aesara.tensor.blas.gemm_inplace
                           for node in topo
                           if isinstance(node.op, aesara.tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
            # Their is no inplace gemm for sparse
            # assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
            s_shared_specify = specify_shape(
                s_shared,
                s_shared.get_value(borrow=True).shape)

            # now test with the specify shape op in the output
            f = aesara.function(
                [],
                s_shared.shape,
                updates=[
                    (s_shared,
                     aesara.tensor.dot(a_shared, b_shared) + s_shared_specify)
                ],
            )
            topo = f.maker.fgraph.toposort()
            shp = f()
            assert np.all(shp == (40, 40))
            if aesara.config.mode != "FAST_COMPILE":
                assert (sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1)
                assert all(node.op == aesara.tensor.blas.gemm_inplace
                           for node in topo
                           if isinstance(node.op, aesara.tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
            # now test with the specify shape op in the inputs and outputs
            a_shared = specify_shape(a_shared,
                                     a_shared.get_value(borrow=True).shape)
            b_shared = specify_shape(b_shared,
                                     b_shared.get_value(borrow=True).shape)

            f = aesara.function(
                [],
                s_shared.shape,
                updates=[
                    (s_shared,
                     aesara.tensor.dot(a_shared, b_shared) + s_shared_specify)
                ],
            )
            topo = f.maker.fgraph.toposort()
            shp = f()
            assert np.all(shp == (40, 40))
            if aesara.config.mode != "FAST_COMPILE":
                assert (sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1)
                assert all(node.op == aesara.tensor.blas.gemm_inplace
                           for node in topo
                           if isinstance(node.op, aesara.tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
Ejemplo n.º 24
0
def test_det_grad():
    rng = np.random.default_rng(utt.fetch_seed())

    r = rng.standard_normal((5, 5)).astype(config.floatX)
    utt.verify_grad(det, [r], rng=np.random)
Ejemplo n.º 25
0
    def test_random_function_ndim_added(self):
        # Test that random_function helper function accepts ndim_added as
        # keyword argument
        # If using numpy's uniform distribution, ndim_added should be 0,
        # because the shape provided as argument is the output shape.
        # Specifying a different ndim_added will change the Op's output ndim,
        # so np.uniform will produce a result of incorrect shape,
        # and a ValueError should be raised.
        def ndim_added_deco(ndim_added):
            def randomfunction(random_state,
                               size=(),
                               low=0.0,
                               high=0.0,
                               ndim=None):
                ndim, size, bcast = raw_random._infer_ndim_bcast(ndim, size)
                if ndim_added < 0:
                    bcast = bcast[:ndim_added]
                else:
                    bcast = bcast + ((False, ) * ndim_added)
                assert len(bcast) == ndim + ndim_added
                op = RandomFunction(
                    "uniform",
                    tensor.TensorType(dtype="float64", broadcastable=bcast),
                    ndim_added=ndim_added,
                )
                return op(random_state, size, low, high)

            return randomfunction

        uni_1 = ndim_added_deco(1)
        uni_0 = ndim_added_deco(0)
        uni_m1 = ndim_added_deco(-1)

        rng_R = random_state_type()

        p_uni11, uni11 = uni_1(rng_R, size=(4, ))
        p_uni12, uni12 = uni_1(rng_R, size=(3, 4))
        p_uni01, uni01 = uni_0(rng_R, size=(4, ))
        p_uni02, uni02 = uni_0(rng_R, size=(3, 4))
        p_unim11, unim11 = uni_m1(rng_R, size=(4, ))
        p_unim12, unim12 = uni_m1(rng_R, size=(3, 4))

        assert uni11.ndim == 2
        assert uni12.ndim == 3
        assert uni01.ndim == 1
        assert uni02.ndim == 2
        assert unim11.ndim == 0
        assert unim12.ndim == 1

        f11 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=p_uni11,
                    mutable=True,
                )
            ],
            [uni11],
            accept_inplace=True,
        )
        f12 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=p_uni12,
                    mutable=True,
                )
            ],
            [uni12],
            accept_inplace=True,
        )
        fm11 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=p_unim11,
                    mutable=True,
                )
            ],
            [unim11],
            accept_inplace=True,
        )
        fm12 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=p_unim12,
                    mutable=True,
                )
            ],
            [unim12],
            accept_inplace=True,
        )
        f0 = function(
            [
                In(
                    rng_R,
                    value=np.random.RandomState(utt.fetch_seed()),
                    update=p_uni02,
                    mutable=True,
                )
            ],
            [uni01, uni02],
            accept_inplace=True,
        )
        with pytest.raises(ValueError):
            f11()
        with pytest.raises(ValueError):
            f12()
        with pytest.raises(ValueError):
            fm11()
        with pytest.raises(ValueError):
            fm12()
        u01, u02 = f0()
        assert np.allclose(u01, u02[0])
Ejemplo n.º 26
0
 def setup_method(self):
     super().setup_method()
     self.rng = np.random.default_rng(utt.fetch_seed())
     self.A = matrix(dtype=self.dtype)
     self.X = np.asarray(self.rng.random((5, 5)), dtype=self.dtype)
     self.S = self.X.dot(self.X.T)
Ejemplo n.º 27
0
 def setup_method(self):
     super().setup_method()
     self.rng = np.random.RandomState(utt.fetch_seed())
     self.A = theano.tensor.matrix(dtype=self.dtype)
     self.op = svd
Ejemplo n.º 28
0
 def setup_method(self):
     super().setup_method()
     self.op_class = MatrixInverse
     self.op = matrix_inverse
     self.rng = np.random.default_rng(utt.fetch_seed())
Ejemplo n.º 29
0
 def test_alloc_diag_grad(self):
     rng = np.random.RandomState(utt.fetch_seed())
     x = rng.rand(5)
     tensor.verify_grad(alloc_diag, [x], rng=rng)
Ejemplo n.º 30
0
def integers_nonzero(*shape, rng=None):
    if rng is None:
        rng = np.random.default_rng(seed=utt.fetch_seed())
    r = rng.integers(-5, 5, shape)
    return r + (r == 0) * 5