Пример #1
0
def test_qr_modes():
    rng = np.random.RandomState(utt.fetch_seed())

    A = tensor.matrix("A", dtype=aesara.config.floatX)
    a = rng.rand(4, 4).astype(aesara.config.floatX)

    f = function([A], qr(A))
    t_qr = f(a)
    n_qr = np.linalg.qr(a)
    assert _allclose(n_qr, t_qr)

    for mode in ["reduced", "r", "raw"]:
        f = function([A], qr(A, mode))
        t_qr = f(a)
        n_qr = np.linalg.qr(a, mode)
        if isinstance(n_qr, (list, tuple)):
            assert _allclose(n_qr[0], t_qr[0])
            assert _allclose(n_qr[1], t_qr[1])
        else:
            assert _allclose(n_qr, t_qr)

    try:
        n_qr = np.linalg.qr(a, "complete")
        f = function([A], qr(A, "complete"))
        t_qr = f(a)
        assert _allclose(n_qr, t_qr)
    except TypeError as e:
        assert "name 'complete' is not defined" in str(e)
Пример #2
0
    def test_svd(self):
        A = tensor.matrix("A", dtype=self.dtype)
        U, S, VT = svd(A)
        fn = function([A], [U, S, VT])
        a = self.rng.rand(4, 4).astype(self.dtype)
        n_u, n_s, n_vt = np.linalg.svd(a)
        t_u, t_s, t_vt = fn(a)

        assert _allclose(n_u, t_u)
        assert _allclose(n_s, t_s)
        assert _allclose(n_vt, t_vt)

        fn = function([A], svd(A, compute_uv=False))
        t_s = fn(a)
        assert _allclose(n_s, t_s)
Пример #3
0
    def test_inverse_correctness(self):

        r = self.rng.randn(4, 4).astype(aesara.config.floatX)

        x = tensor.matrix()
        xi = self.op(x)

        ri = function([x], xi)(r)
        assert ri.shape == r.shape
        assert ri.dtype == r.dtype

        rir = np.dot(ri, r)
        rri = np.dot(r, ri)

        assert _allclose(np.identity(4), rir), rir
        assert _allclose(np.identity(4), rri), rri
    def test_empty_elemwise(self):
        x = aesara.shared(np.random.rand(0, 6).astype(config.floatX), "x")

        # should work
        z = (x + 2) * 3
        assert hasattr(z.tag, "test_value")
        f = aesara.function([], z)
        assert _allclose(f(), z.tag.test_value)
Пример #5
0
def test_tensorsolve():
    rng = np.random.RandomState(utt.fetch_seed())

    A = tensor.tensor4("A", dtype=aesara.config.floatX)
    B = tensor.matrix("B", dtype=aesara.config.floatX)
    X = tensorsolve(A, B)
    fn = function([A, B], [X])

    # slightly modified example from np.linalg.tensorsolve docstring
    a = np.eye(2 * 3 * 4).astype(aesara.config.floatX)
    a.shape = (2 * 3, 4, 2, 3 * 4)
    b = rng.rand(2 * 3, 4).astype(aesara.config.floatX)

    n_x = np.linalg.tensorsolve(a, b)
    t_x = fn(a, b)
    assert _allclose(n_x, t_x)

    # check the type upcast now
    C = tensor.tensor4("C", dtype="float32")
    D = tensor.matrix("D", dtype="float64")
    Y = tensorsolve(C, D)
    fn = function([C, D], [Y])

    c = np.eye(2 * 3 * 4, dtype="float32")
    c.shape = (2 * 3, 4, 2, 3 * 4)
    d = rng.rand(2 * 3, 4).astype("float64")
    n_y = np.linalg.tensorsolve(c, d)
    t_y = fn(c, d)
    assert _allclose(n_y, t_y)
    assert n_y.dtype == Y.dtype

    # check the type upcast now
    E = tensor.tensor4("E", dtype="int32")
    F = tensor.matrix("F", dtype="float64")
    Z = tensorsolve(E, F)
    fn = function([E, F], [Z])

    e = np.eye(2 * 3 * 4, dtype="int32")
    e.shape = (2 * 3, 4, 2, 3 * 4)
    f = rng.rand(2 * 3, 4).astype("float64")
    n_z = np.linalg.tensorsolve(e, f)
    t_z = fn(e, f)
    assert _allclose(n_z, t_z)
    assert n_z.dtype == Z.dtype
Пример #6
0
def test_rop_lop():
    mx = tensor.matrix("mx")
    mv = tensor.matrix("mv")
    v = tensor.vector("v")
    y = matrix_inverse(mx).sum(axis=0)

    yv = tensor.Rop(y, mx, mv)
    rop_f = function([mx, mv], yv)

    sy, _ = aesara.scan(
        lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
        sequences=tensor.arange(y.shape[0]),
        non_sequences=[y, mx, mv],
    )
    scan_f = function([mx, mv], sy)

    rng = np.random.RandomState(utt.fetch_seed())
    vx = np.asarray(rng.randn(4, 4), aesara.config.floatX)
    vv = np.asarray(rng.randn(4, 4), aesara.config.floatX)

    v1 = rop_f(vx, vv)
    v2 = scan_f(vx, vv)

    assert _allclose(v1, v2), "ROP mismatch: {} {}".format(v1, v2)

    raised = False
    try:
        tensor.Rop(aesara.clone(y, replace={mx: break_op(mx)}), mx, mv)
    except ValueError:
        raised = True
    if not raised:
        raise Exception("Op did not raised an error even though the function"
                        " is not differentiable")

    vv = np.asarray(rng.uniform(size=(4, )), aesara.config.floatX)
    yv = tensor.Lop(y, mx, v)
    lop_f = function([mx, v], yv)

    sy = tensor.grad((v * y).sum(), mx)
    scan_f = function([mx, v], sy)

    v1 = lop_f(vx, vv)
    v2 = scan_f(vx, vv)
    assert _allclose(v1, v2), "LOP mismatch: {} {}".format(v1, v2)
Пример #7
0
    def test_eval(self):
        A = self.A
        Ai = tensorinv(A)
        n_ainv = np.linalg.tensorinv(self.a)
        tf_a = function([A], [Ai])
        t_ainv = tf_a(self.a)
        assert _allclose(n_ainv, t_ainv)

        B = self.B
        Bi = tensorinv(B)
        Bi1 = tensorinv(B, ind=1)
        n_binv = np.linalg.tensorinv(self.b)
        n_binv1 = np.linalg.tensorinv(self.b1, ind=1)
        tf_b = function([B], [Bi])
        tf_b1 = function([B], [Bi1])
        t_binv = tf_b(self.b)
        t_binv1 = tf_b1(self.b1)
        assert _allclose(t_binv, n_binv)
        assert _allclose(t_binv1, n_binv1)
    def test_constant(self):
        x = tt.constant(np.random.rand(2, 3), dtype=config.floatX)
        y = aesara.shared(np.random.rand(3, 6).astype(config.floatX), "y")

        # should work
        z = tt.dot(x, y)
        assert hasattr(z.tag, "test_value")
        f = aesara.function([], z)
        assert _allclose(f(), z.tag.test_value)

        # this test should fail
        x = tt.constant(np.random.rand(2, 4), dtype=config.floatX)
        with pytest.raises(ValueError):
            tt.dot(x, y)
    def test_shared(self):
        x = tt.matrix("x")
        x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)
        y = aesara.shared(np.random.rand(4, 6).astype(config.floatX), "y")

        # should work
        z = tt.dot(x, y)
        assert hasattr(z.tag, "test_value")
        f = aesara.function([x], z)
        assert _allclose(f(x.tag.test_value), z.tag.test_value)

        # this test should fail
        y.set_value(np.random.rand(5, 6).astype(config.floatX))
        with pytest.raises(ValueError):
            tt.dot(x, y)
Пример #10
0
def test_matrix_dot():
    rng = np.random.RandomState(utt.fetch_seed())
    n = rng.randint(4) + 2
    rs = []
    xs = []
    for k in range(n):
        rs += [rng.randn(4, 4).astype(aesara.config.floatX)]
        xs += [tensor.matrix()]
    sol = matrix_dot(*xs)

    aesara_sol = function(xs, sol)(*rs)
    numpy_sol = rs[0]
    for r in rs[1:]:
        numpy_sol = np.dot(numpy_sol, r)

    assert _allclose(numpy_sol, aesara_sol)
Пример #11
0
def test_pseudoinverse_correctness():
    rng = np.random.RandomState(utt.fetch_seed())
    d1 = rng.randint(4) + 2
    d2 = rng.randint(4) + 2
    r = rng.randn(d1, d2).astype(aesara.config.floatX)

    x = tensor.matrix()
    xi = pinv(x)

    ri = function([x], xi)(r)
    assert ri.shape[0] == r.shape[1]
    assert ri.shape[1] == r.shape[0]
    assert ri.dtype == r.dtype
    # Note that pseudoinverse can be quite unprecise so I prefer to compare
    # the result with what np.linalg returns
    assert _allclose(ri, np.linalg.pinv(r))
    def test_variable_only(self):
        x = tt.matrix("x")
        x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)
        y = tt.matrix("y")
        y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)

        # should work
        z = tt.dot(x, y)
        assert hasattr(z.tag, "test_value")
        f = aesara.function([x, y], z)
        assert _allclose(f(x.tag.test_value, y.tag.test_value),
                         z.tag.test_value)

        # this test should fail
        y.tag.test_value = np.random.rand(6, 5).astype(config.floatX)
        with pytest.raises(ValueError):
            tt.dot(x, y)
    def test_string_var(self):
        x = tt.matrix("x")
        x.tag.test_value = np.random.rand(3, 4).astype(config.floatX)
        y = tt.matrix("y")
        y.tag.test_value = np.random.rand(4, 5).astype(config.floatX)

        z = aesara.shared(np.random.rand(5, 6).astype(config.floatX))

        # should work
        out = tt.dot(tt.dot(x, y), z)
        assert hasattr(out.tag, "test_value")
        tf = aesara.function([x, y], out)
        assert _allclose(tf(x.tag.test_value, y.tag.test_value),
                         out.tag.test_value)

        def f(x, y, z):
            return tt.dot(tt.dot(x, y), z)

        # this test should fail
        z.set_value(np.random.rand(7, 6).astype(config.floatX))
        with pytest.raises(ValueError):
            f(x, y, z)
Пример #14
0
    def test_numpy_compare(self):
        rng = np.random.RandomState(utt.fetch_seed())

        M = tensor.matrix("A", dtype=aesara.config.floatX)
        V = tensor.vector("V", dtype=aesara.config.floatX)

        a = rng.rand(4, 4).astype(aesara.config.floatX)
        b = rng.rand(4).astype(aesara.config.floatX)

        A = (
            [
                None, "fro", "inf", "-inf", 1, -1, None, "inf", "-inf", 0, 1,
                -1, 2, -2
            ],
            [M, M, M, M, M, M, V, V, V, V, V, V, V, V],
            [a, a, a, a, a, a, b, b, b, b, b, b, b, b],
            [None, "fro", inf, -inf, 1, -1, None, inf, -inf, 0, 1, -1, 2, -2],
        )

        for i in range(0, 14):
            f = function([A[1][i]], norm(A[1][i], A[0][i]))
            t_n = f(A[2][i])
            n_n = np.linalg.norm(A[2][i], A[3][i])
            assert _allclose(n_n, t_n)
Пример #15
0
    def validate(self, image_shape, filter_shape, out_dim, verify_grad=True):

        image_dim = len(image_shape)
        filter_dim = len(filter_shape)
        input = tt.TensorType("float64", [False] * image_dim)()
        filters = tt.TensorType("float64", [False] * filter_dim)()

        bsize = image_shape[0]
        if image_dim != 3:
            bsize = 1
        nkern = filter_shape[0]
        if filter_dim != 3:
            nkern = 1

        # THEANO IMPLEMENTATION ############
        # we create a symbolic function so that verify_grad can work
        def sym_conv2d(input, filters):
            return conv.conv2d(input, filters)

        output = sym_conv2d(input, filters)
        assert output.ndim == out_dim
        aesara_conv = aesara.function([input, filters], output)

        # initialize input and compute result
        image_data = np.random.random(image_shape)
        filter_data = np.random.random(filter_shape)
        aesara_output = aesara_conv(image_data, filter_data)

        # REFERENCE IMPLEMENTATION ############
        out_shape2d = np.array(image_shape[-2:]) - np.array(
            filter_shape[-2:]) + 1
        ref_output = np.zeros(tuple(out_shape2d))

        # reshape as 3D input tensors to make life easier
        image_data3d = image_data.reshape((bsize, ) + image_shape[-2:])
        filter_data3d = filter_data.reshape((nkern, ) + filter_shape[-2:])
        # reshape aesara output as 4D to make life easier
        aesara_output4d = aesara_output.reshape((
            bsize,
            nkern,
        ) + aesara_output.shape[-2:])

        # loop over mini-batches (if required)
        for b in range(bsize):

            # loop over filters (if required)
            for k in range(nkern):

                image2d = image_data3d[b, :, :]
                filter2d = filter_data3d[k, :, :]
                output2d = np.zeros(ref_output.shape)
                for row in range(ref_output.shape[0]):
                    for col in range(ref_output.shape[1]):
                        output2d[row, col] += (
                            image2d[row:row + filter2d.shape[0],
                                    col:col + filter2d.shape[1], ] *
                            filter2d[::-1, ::-1]).sum()

                assert _allclose(aesara_output4d[b, k, :, :], output2d)

        # TEST GRADIENT ############
        if verify_grad:
            utt.verify_grad(sym_conv2d, [image_data, filter_data])
Пример #16
0
    def validate(
        self,
        image_shape,
        filter_shape,
        border_mode="valid",
        subsample=(1, 1),
        N_image_shape=None,
        N_filter_shape=None,
        input=None,
        filters=None,
        unroll_batch=None,
        unroll_kern=None,
        unroll_patch=None,
        verify_grad=True,
        should_raise=False,
    ):
        """
        :param image_shape: The constant shape info passed to conv2d.
        :param filter_shape: The constant shape info passed to conv2d.

        :param N_image_shape: None(default to image_shape) or tuple of
                              4 elements with the shape of the input image

        :param N_filter_shape: None(default to filter_shape) or tuple
                               of 4 elements with the shape of the
                               input filter

        """
        if N_image_shape is None:
            N_image_shape = [
                tt.get_scalar_constant_value(tt.as_tensor_variable(x))
                for x in image_shape
            ]
        if N_filter_shape is None:
            N_filter_shape = [
                tt.get_scalar_constant_value(tt.as_tensor_variable(x))
                for x in filter_shape
            ]

        if input is None:
            input = self.input
        if not filters:
            filters = self.filters

        # THEANO IMPLEMENTATION

        # we create a symbolic function so that verify_grad can work
        def sym_conv2d(input, filters):
            # define aesara graph and function
            input.name = "input"
            filters.name = "filters"
            rval = conv.conv2d(
                input,
                filters,
                image_shape,
                filter_shape,
                border_mode,
                subsample,
                unroll_batch=unroll_batch,
                unroll_kern=unroll_kern,
                unroll_patch=unroll_patch,
            )
            rval.name = "conv_output"
            return rval

        output = sym_conv2d(input, filters)
        output.name = "conv2d({},{})".format(input.name, filters.name)
        aesara_conv = aesara.function([input, filters], output, mode=self.mode)

        # initialize input and compute result
        image_data = np.random.random(N_image_shape).astype(self.dtype)
        filter_data = np.random.random(N_filter_shape).astype(self.dtype)
        try:
            aesara_output = aesara_conv(image_data, filter_data)
        except ValueError:
            if not should_raise:
                raise
            return
        else:
            if should_raise:
                raise Exception("ConvOp should have generated an error")

        # REFERENCE IMPLEMENTATION
        s = 1.0
        orig_image_data = image_data
        if border_mode != "full":
            s = -1.0
        out_shape2d = (np.array(N_image_shape[-2:]) +
                       s * np.array(N_filter_shape[-2:]) - s)
        out_shape2d = np.ceil(out_shape2d / np.array(subsample))
        # avoid numpy deprecation
        out_shape2d = out_shape2d.astype("int32")
        out_shape = (N_image_shape[0], N_filter_shape[0]) + tuple(out_shape2d)
        ref_output = np.zeros(out_shape)

        # loop over output feature maps
        ref_output.fill(0)
        if border_mode == "full":
            image_data2 = np.zeros((
                N_image_shape[0],
                N_image_shape[1],
                N_image_shape[2] + 2 * N_filter_shape[2] - 2,
                N_image_shape[3] + 2 * N_filter_shape[3] - 2,
            ))
            image_data2[:, :, N_filter_shape[2] - 1:N_filter_shape[2] - 1 +
                        N_image_shape[2],
                        N_filter_shape[3] - 1:N_filter_shape[3] - 1 +
                        N_image_shape[3], ] = image_data
            image_data = image_data2
            N_image_shape = image_data.shape
        for bb in range(N_image_shape[0]):
            for nn in range(N_filter_shape[0]):
                for im0 in range(N_image_shape[1]):
                    filter2d = filter_data[nn, im0, :, :]
                    image2d = image_data[bb, im0, :, :]
                    for row in range(ref_output.shape[2]):
                        irow = row * subsample[0]  # image row
                        for col in range(ref_output.shape[3]):
                            icol = col * subsample[1]  # image col
                            ref_output[bb, nn, row, col] += (
                                image2d[irow:irow + N_filter_shape[2],
                                        icol:icol + N_filter_shape[3], ] *
                                filter2d[::-1, ::-1]).sum()

        assert _allclose(aesara_output, ref_output)

        # TEST GRADIENT
        if verify_grad:
            utt.verify_grad(sym_conv2d, [orig_image_data, filter_data])