Example #1
0
def test_jax_Reshape():
    a = vector("a")
    x = reshape(a, (2, 2))
    x_fg = FunctionGraph([a], [x])
    compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])

    # Test breaking "omnistaging" changes in JAX.
    # See https://github.com/tensorflow/probability/commit/782d0c64eb774b9aac54a1c8488e4f1f96fbbc68
    x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2))
    x_fg = FunctionGraph([a], [x])
    compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])
Example #2
0
def test_jacobian_disconnected_inputs():
    # Test that disconnected inputs are properly handled by jacobian.

    v1 = vector()
    v2 = vector()
    jacobian_v = aesara.gradient.jacobian(1 + v1,
                                          v2,
                                          disconnected_inputs="ignore")
    func_v = aesara.function([v1, v2], jacobian_v)
    val = np.arange(4.0).astype(aesara.config.floatX)
    assert np.allclose(func_v(val, val), np.zeros((4, 4)))

    s1 = scalar()
    s2 = scalar()
    jacobian_s = aesara.gradient.jacobian(1 + s1,
                                          s2,
                                          disconnected_inputs="ignore")
    func_s = aesara.function([s2], jacobian_s)
    val = np.array(1.0).astype(aesara.config.floatX)
    assert np.allclose(func_s(val), np.zeros(1))
Example #3
0
    def test_incorrect_type(self):

        x = vector("x")
        with pytest.raises(TypeError):
            # Incorrect shape for test value
            x.tag.test_value = np.empty((2, 2))

        x = fmatrix("x")
        with pytest.raises(TypeError):
            # Incorrect dtype (float64) for test value
            x.tag.test_value = np.random.random((3, 4))
Example #4
0
    def setup_method(self):
        super().setup_method()
        self.op_class = SearchsortedOp
        self.op = SearchsortedOp()

        self.x = vector("x")
        self.v = tensor3("v")

        self.a = 30 * np.random.random(50).astype(config.floatX)
        self.b = 30 * np.random.random((8, 10, 5)).astype(config.floatX)
        self.idx_sorted = np.argsort(self.a).astype("int32")
Example #5
0
    def test_python_perform(self):
        x = scalar()
        s = vector(dtype="int32")
        y = specify_shape(x, s)
        f = aesara.function([x, s], y, mode=Mode("py"))
        assert f(12, ()) == 12
        with pytest.raises(
            AssertionError,
            match=r"Got 0 dimensions \(shape \(\)\), expected 1 dimensions with shape \(2,\).",
        ):
            f(12, (2,))

        x = matrix()
        s = vector(dtype="int32")
        y = specify_shape(x, s)
        f = aesara.function([x, s], y, mode=Mode("py"))
        f(np.ones((2, 3)).astype(config.floatX), (2, 3))
        with pytest.raises(
            AssertionError, match=r"Got shape \(3, 4\), expected \(2, 3\)."
        ):
            f(np.ones((3, 4)).astype(config.floatX), (2, 3))
Example #6
0
def test_second():
    a0 = scalar("a0")
    b = scalar("b")

    out = aes.second(a0, b)
    fgraph = FunctionGraph([a0, b], [out])
    compare_jax_and_py(fgraph, [10.0, 5.0])

    a1 = vector("a1")
    out = aet.second(a1, b)
    fgraph = FunctionGraph([a1, b], [out])
    compare_jax_and_py(fgraph, [np.zeros([5], dtype=config.floatX), 5.0])
Example #7
0
def test_local_mul_s_v():
    mode = get_default_mode()
    mode = mode.including("specialize", "local_mul_s_v")

    for sp_format in ["csr"]:  # Not implemented for other format
        inputs = [getattr(aesara.sparse, sp_format + "_matrix")(), vector()]

        f = aesara.function(inputs, sparse.mul_s_v(*inputs), mode=mode)

        assert not any(
            isinstance(node.op, sparse.MulSV) for node in f.maker.fgraph.toposort()
        )
Example #8
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2
     # Numpy's linalg.eigh may return either double or single
     # presision eigenvalues depending on installed version of
     # LAPACK.  Rather than trying to reproduce the (rather
     # involved) logic, we just probe linalg.eigh with a trivial
     # input.
     w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name
     w = vector(dtype=w_dtype)
     v = matrix(dtype=w_dtype)
     return Apply(self, [x], [w, v])
Example #9
0
 def test_infer_shape(self):
     admat = matrix()
     advec = vector()
     rng = np.random.default_rng(utt.fetch_seed())
     admat_val = rng.random((3, 4)).astype(config.floatX)
     advec_val = rng.random((4)).astype(config.floatX)
     self._compile_and_check(
         [admat, advec],
         [SoftmaxWithBias()(admat, advec)],
         [admat_val, advec_val],
         SoftmaxWithBias,
     )
Example #10
0
    def test_basic(self):
        for ndim in [1, 3]:
            x = TensorType(config.floatX, [False] * ndim)()
            a = np.random.random((10,) * ndim).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in integer_dtypes:
                    r_var = scalar(dtype=dtype)
                    r = np.asarray(3, dtype=dtype)
                    if dtype == "uint64" or (
                        dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1
                    ):
                        with pytest.raises(TypeError):
                            repeat(x, r_var, axis=axis)
                    else:
                        f = aesara.function([x, r_var], repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis), f(a, r))

                        r_var = vector(dtype=dtype)
                        if axis is None:
                            r = np.random.randint(1, 6, size=a.size).astype(dtype)
                        else:
                            r = np.random.randint(1, 6, size=(10,)).astype(dtype)

                        if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
                            with pytest.raises(TypeError):
                                repeat(x, r_var, axis=axis)
                        else:
                            f = aesara.function([x, r_var], repeat(x, r_var, axis=axis))
                            assert np.allclose(np.repeat(a, r, axis=axis), f(a, r))

                        # check when r is a list of single integer, e.g. [3].
                        r = np.random.randint(1, 11, size=()).astype(dtype) + 2
                        f = aesara.function([x], repeat(x, [r], axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis), f(a))
                        assert not np.any(
                            [
                                isinstance(n.op, Repeat)
                                for n in f.maker.fgraph.toposort()
                            ]
                        )

                        # check when r is  aesara tensortype that broadcastable is (True,)
                        r_var = TensorType(shape=(True,), dtype=dtype)()
                        r = np.random.randint(1, 6, size=(1,)).astype(dtype)
                        f = aesara.function([x, r_var], repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r[0], axis=axis), f(a, r))
                        assert not np.any(
                            [
                                isinstance(n.op, Repeat)
                                for n in f.maker.fgraph.toposort()
                            ]
                        )
Example #11
0
def test_incsub_offset():
    # Test for https://github.com/Theano/Theano/issues/5670

    # Build a GPU variable which value will have an offset (x1)
    x = gpuarray_shared_constructor(np.zeros(5, dtype=aesara.config.floatX))
    x1 = x[1:]
    # Use inc_subtensor on it
    y = vector()
    z = inc_subtensor(x1[2:], y)
    # Use updates so that inc_subtensor can happen inplace
    f = aesara.function([y], z, updates={x: z}, mode=mode_with_gpu)
    utt.assert_allclose(f([1, 2]), np.array([0, 0, 1, 2], dtype=aesara.config.floatX))
Example #12
0
 def test_topk_sanity(self, dtype, axis, sorted):
     x = vector(name="x", dtype=dtype)
     fn = aesara.function([x],
                          topk(x, 1, axis=axis, sorted=sorted),
                          mode=self.mode)
     assert any(
         isinstance(n.op, self.op_class)
         for n in fn.maker.fgraph.apply_nodes)
     xval = np.asarray([1]).astype(dtype)
     yval = fn(xval)
     assert yval == xval
     assert yval.dtype == xval.dtype
Example #13
0
    def test_minimal(self):
        A = matrix()
        b = vector()

        print("building function")
        f = function([A, b], minimal(A, A, b, b, A))
        print("built")

        Aval = self.rng.standard_normal((5, 5))
        bval = np.arange(5, dtype=float)
        f(Aval, bval)
        print("done")
Example #14
0
    def test_composite_elemwise_float16(self):
        w = bvector()
        x = vector(dtype="float16")
        y = fvector()

        cz = tanh(x + aet.cast(y, "float16"))
        o = (
            cz
            - cz ** 2
            + aet.cast(x, "int16")
            + aet.cast(x, "float32")
            + aet.cast(w, "float16")
            - aet.constant(np.float16(1.0))
        )

        aesara.function([w, x, y], o, mode=mode_with_gpu)

        v = vector(dtype="uint8")
        w = vector(dtype="float16")
        x = vector(dtype="float16")
        y = vector(dtype="float16")
        z = vector(dtype="float16")

        o = aet.switch(v, mul(w, x, y), z)
        aesara.function([v, w, x, y, z], o, mode=mode_with_gpu)
Example #15
0
 def test_neg_idx(self):
     admat = matrix()
     advec = vector()
     alvec = lvector()
     rng = np.random.default_rng(utt.fetch_seed())
     admat_val = rng.random((3, 5)).astype(config.floatX)
     advec_val = rng.random((5)).astype(config.floatX)
     alvec_val = rng.integers(low=0, high=5, size=3)
     alvec_val[1] = -1
     out = CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec)
     f = aesara.function([admat, advec, alvec], out)
     with pytest.raises(ValueError):
         f(admat_val, advec_val, alvec_val)
Example #16
0
    def test_infer_shape(self):
        for ndim in [1, 3]:
            x = TensorType(config.floatX, [False] * ndim)()
            shp = (np.arange(ndim) + 1) * 3
            a = np.random.random(shp).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in ["int8", "uint8", "uint64"]:
                    r_var = scalar(dtype=dtype)
                    r = np.asarray(3, dtype=dtype)
                    if dtype in self.numpy_unsupported_dtypes:
                        r_var = vector(dtype=dtype)
                        with pytest.raises(TypeError):
                            repeat(x, r_var)
                    else:
                        self._compile_and_check(
                            [x, r_var],
                            [RepeatOp(axis=axis)(x, r_var)],
                            [a, r],
                            self.op_class,
                        )

                        r_var = vector(dtype=dtype)
                        if axis is None:
                            r = np.random.randint(1, 6,
                                                  size=a.size).astype(dtype)
                        elif a.size > 0:
                            r = np.random.randint(
                                1, 6, size=a.shape[axis]).astype(dtype)
                        else:
                            r = np.random.randint(1, 6,
                                                  size=(10, )).astype(dtype)

                        self._compile_and_check(
                            [x, r_var],
                            [RepeatOp(axis=axis)(x, r_var)],
                            [a, r],
                            self.op_class,
                        )
Example #17
0
    def test_scalar_shapes(self):
        with pytest.raises(ValueError, match="will never match"):
            specify_shape(vector(), shape=())
        with pytest.raises(ValueError, match="will never match"):
            specify_shape(matrix(), shape=[])

        x = scalar()
        y = specify_shape(x, shape=())
        f = aesara.function([x], y, mode=self.mode)
        assert f(15) == 15

        x = vector()
        s = lscalar()
        y = specify_shape(x, shape=s)
        f = aesara.function([x, s], y, mode=self.mode)
        assert f([15], 1) == [15]

        x = vector()
        s = as_tensor_variable(1, dtype=np.int64)
        y = specify_shape(x, shape=s)
        f = aesara.function([x], y, mode=self.mode)
        assert f([15]) == [15]
Example #18
0
    def test_grad_int_value(self):
        w = aesara.shared(np.random.rand(10))
        b = aesara.shared(np.random.rand())
        params = [w, b]

        x = vector()
        y = scalar()

        score = w.dot(x) + b
        correct = score * y > 0

        loss = ifelse(correct, 0, 1)
        [(param, param - 0.5 * aesara.grad(cost=loss, wrt=param)) for param in params]
Example #19
0
    def test_leaf_inside_scan(self):
        x = vector("x")
        y = scalar("y")
        z = scalar("z")

        y.tag.replacement = z

        s, _ = aesara.scan(lambda x: x * y, sequences=x)
        (s2, ) = map_variables(self.replacer, [s])

        f = aesara.function([x, y, z], [s, s2])
        rval = f(x=np.array([1, 2, 3], dtype=np.float32), y=1, z=2)
        assert np.array_equal(rval, [[1, 2, 3], [2, 4, 6]])
Example #20
0
def test_jacobian_vector():
    x = vector()
    y = x * 2
    rng = np.random.RandomState(seed=utt.fetch_seed())

    # test when the jacobian is called with a tensor as wrt
    Jx = jacobian(y, x)
    f = aesara.function([x], Jx)
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a tuple as wrt
    Jx = jacobian(y, (x, ))
    assert isinstance(Jx, tuple)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a list as wrt
    Jx = jacobian(y, [x])
    assert isinstance(Jx, list)
    f = aesara.function([x], Jx[0])
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    assert np.allclose(f(vx), np.eye(10) * 2)

    # test when the jacobian is called with a list of two elements
    z = vector()
    y = x * z
    Js = jacobian(y, [x, z])
    f = aesara.function([x, z], Js)
    vx = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    vz = rng.uniform(size=(10, )).astype(aesara.config.floatX)
    vJs = f(vx, vz)
    evx = np.zeros((10, 10))
    evz = np.zeros((10, 10))
    np.fill_diagonal(evx, vx)
    np.fill_diagonal(evz, vz)
    assert np.allclose(vJs[0], evz)
    assert np.allclose(vJs[1], evx)
Example #21
0
    def test_bad_number_of_shape(self):
        # Test that the number of dimensions provided is good
        specify_shape = SpecifyShape()

        x = vector()
        shape_vec = ivector()
        xval = np.random.random((2)).astype(config.floatX)
        with pytest.raises(AssertionError, match="will never match"):
            specify_shape(x, [])
        with pytest.raises(AssertionError, match="will never match"):
            specify_shape(x, [2, 2])

        f = aesara.function([x, shape_vec], specify_shape(x, shape_vec), mode=self.mode)
        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )
        expected = r"(Got 1 dimensions \(shape \(2,\)\), expected 0 dimensions with shape \(\).)"
        expected += r"|(Got 1 dimensions, expected 0 dimensions.)"
        with pytest.raises(AssertionError, match=expected):
            f(xval, [])
        expected = r"(Got 1 dimensions \(shape \(2,\)\), expected 2 dimensions with shape \(2, 2\).)"
        expected += r"|(SpecifyShape: Got 1 dimensions, expected 2 dimensions.)"
        with pytest.raises(AssertionError, match=expected):
            f(xval, [2, 2])

        x = matrix()
        xval = np.random.random((2, 3)).astype(config.floatX)
        for shape_ in [(), (1,), (2, 3, 4)]:
            with pytest.raises(AssertionError, match="will never match"):
                specify_shape(x, shape_)
            f = aesara.function(
                [x, shape_vec], specify_shape(x, shape_vec), mode=self.mode
            )
            assert isinstance(
                [
                    n
                    for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, SpecifyShape)
                ][0]
                .inputs[0]
                .type,
                self.input_type,
            )
            s_exp = str(shape_).replace("(", r"\(").replace(")", r"\)")
            expected = rf"(Got 2 dimensions \(shape \(2, 3\)\), expected {len(shape_)} dimensions with shape {s_exp}.)"
            expected += rf"|(SpecifyShape: Got 2 dimensions, expected {len(shape_)} dimensions.)"
            with pytest.raises(AssertionError, match=expected):
                f(xval, shape_)
Example #22
0
    def test_wrong_rval_len1(self):
        # Test that it is not ok to return the wrong number of gradient terms

        class retOne(Op):
            __props__ = ()

            def make_node(self, *inputs):
                outputs = [vector()]
                return Apply(self, inputs, outputs)

            def grad(self, inputs, grads):
                return [inputs[0].zeros_like()]

            def perform(self, *args, **kwargs):
                raise NotImplementedError()

        i = vector()
        j = vector()
        a1 = retOne().make_node(i)
        grad_sources_inputs([(a1.out, one)], None)
        a2 = retOne().make_node(i, j)
        with pytest.raises(ValueError):
            grad_sources_inputs([(a2.out, one)], None)
Example #23
0
def test_gradient_scan():
    # Test for a crash when using MRG inside scan and taking the gradient
    # See https://groups.google.com/d/msg/theano-dev/UbcYyU5m-M8/UO9UgXqnQP0J
    aesara_rng = MRG_RandomStream(10)
    w = shared(np.ones(1, dtype="float32"))

    def one_step(x):
        return x + aesara_rng.uniform((1, ), dtype="float32") * w

    x = vector(dtype="float32")
    values, updates = scan(one_step, outputs_info=x, n_steps=10)
    gw = grad(aet_sum(values[-1]), w)
    f = function([x], gw)
    f(np.arange(1, dtype="float32"))
Example #24
0
 def get_function(self, dtype, transpose_A=False, slice_tensors=False):
     alpha = scalar(dtype=dtype)
     beta = scalar(dtype=dtype)
     A = matrix(dtype=dtype)
     x = vector(dtype=dtype)
     y = vector(dtype=dtype)
     if transpose_A:
         A_1 = A.T
     else:
         A_1 = A
     if slice_tensors:
         A_2 = A_1[::-self.slice_step]
         x_2 = x[::-self.slice_step]
         y_2 = y[::-self.slice_step]
     else:
         A_2 = A_1
         x_2 = x
         y_2 = y
     return aesara.function(
         [alpha, A, x, beta, y],
         self.gemv(y_2, alpha, A_2, x_2, beta),
         mode=self.mode,
     )
Example #25
0
    def test_multiple_outputs(self):
        m = matrix("m")
        v = vector("v")
        m_ = matrix("m_")
        v_ = vector("v_")

        mval = self.rng.uniform(size=(3, 7)).astype(aesara.config.floatX)
        vval = self.rng.uniform(size=(7, )).astype(aesara.config.floatX)
        m_val = self.rng.uniform(size=(3, 7)).astype(aesara.config.floatX)
        v_val = self.rng.uniform(size=(7, )).astype(aesara.config.floatX)

        rop_out1 = Rop([m, v, m + v], [m, v], [m_, v_])
        assert isinstance(rop_out1, list)
        assert len(rop_out1) == 3
        rop_out2 = Rop((m, v, m + v), [m, v], [m_, v_])
        assert isinstance(rop_out2, tuple)
        assert len(rop_out2) == 3

        all_outs = []
        for o in rop_out1, rop_out2:
            all_outs.extend(o)
        f = aesara.function([m, v, m_, v_], all_outs)
        f(mval, vval, m_val, v_val)
Example #26
0
 def test_infer_shape(self):
     advec = vector()
     admat = matrix()
     alvec = lvector()
     rng = np.random.default_rng(utt.fetch_seed())
     advec_val = rng.random((3)).astype(config.floatX)
     admat_val = rng.random((3, 2)).astype(config.floatX)
     alvec_val = [0, 1, 0]
     self._compile_and_check(
         [advec, admat, alvec],
         [CrossentropyCategorical1HotGrad()(advec, admat, alvec)],
         [advec_val, admat_val, alvec_val],
         CrossentropyCategorical1HotGrad,
     )
Example #27
0
 def test_infer_shape(self):
     admat = matrix()
     advec = vector()
     alvec = lvector()
     rng = np.random.default_rng(utt.fetch_seed())
     admat_val = rng.random((3, 5)).astype(config.floatX)
     advec_val = rng.random((5)).astype(config.floatX)
     alvec_val = rng.integers(low=0, high=5, size=3)
     self._compile_and_check(
         [admat, advec, alvec],
         CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec),
         [admat_val, advec_val, alvec_val],
         CrossentropySoftmaxArgmax1HotWithBias,
     )
Example #28
0
    def test_softmax_optimizations_w_bias(self):
        x = matrix("x")
        b = vector("b")
        one_of_n = lvector("one_of_n")
        op = crossentropy_categorical_1hot

        fgraph = FunctionGraph([x, b, one_of_n],
                               [op(softmax_legacy(x + b), one_of_n)])
        assert fgraph.outputs[0].owner.op == op

        optdb.query(OPT_FAST_RUN).optimize(fgraph)

        assert len(fgraph.toposort()) == 1
        assert fgraph.outputs[
            0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
Example #29
0
    def make_node(self, x):
        x = as_tensor_variable(x)
        assert x.ndim == 2, "The input of svd function should be a matrix."

        in_dtype = x.type.numpy_dtype
        out_dtype = np.dtype(f"f{in_dtype.itemsize}")

        s = vector(dtype=out_dtype)

        if self.compute_uv:
            u = matrix(dtype=out_dtype)
            vt = matrix(dtype=out_dtype)
            return Apply(self, [x], [u, s, vt])
        else:
            return Apply(self, [x], [s])
Example #30
0
 def test_infer_shape(self):
     admat = matrix()
     advec = vector()
     alvec = lvector()
     rng = np.random.default_rng(utt.fetch_seed())
     admat_val = rng.random((10, 5)).astype(config.floatX)
     admat_val /= admat_val.sum(axis=1).reshape(10, 1)
     advec_val = rng.random((10)).astype(config.floatX)
     alvec_val = rng.integers(low=0, high=5, size=10)
     self._compile_and_check(
         [advec, admat, alvec],
         [CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec)],
         [advec_val, admat_val, alvec_val],
         CrossentropySoftmax1HotWithBiasDx,
     )