コード例 #1
0
    def test_input_aliasing_affecting_inplace_operations(self):

        # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,
        #        you need to make in inputs mutable (so that inplace
        #        operations are used) and to break the elemwise composition
        #        with some non-elemwise op (here dot)
        x = aesara.tensor.dvector()
        y = aesara.tensor.dvector()
        m1 = aesara.tensor.dmatrix()
        m2 = aesara.tensor.dmatrix()
        f = aesara.function(
            [
                aesara.In(x, mutable=True),
                aesara.In(y, mutable=True),
                aesara.In(m1, mutable=True),
                aesara.In(m2, mutable=True),
            ],
            aesara.dot((x * 2), m1) + aesara.dot((y * 3), m2),
        )
        # Test 1. If the same variable is given twice

        # Compute bogus values
        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray(
            [
                [1, 0, 0, 0, 0],
                [0, 1, 0, 0, 0],
                [0, 0, 1, 0, 0],
                [0, 0, 0, 1, 0],
                [0, 0, 0, 0, 1],
            ],
            dtype="float64",
        )
        bogus_vals = f(v, v, m, m)
        # Since we used inplace operation v and m may be corrupted
        # so we need to recreate them

        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray(
            [
                [1, 0, 0, 0, 0],
                [0, 1, 0, 0, 0],
                [0, 0, 1, 0, 0],
                [0, 0, 0, 1, 0],
                [0, 0, 0, 0, 1],
            ],
            dtype="float64",
        )
        m_copy = m.copy()
        v_copy = v.copy()
        vals = f(v, v_copy, m, m_copy)

        assert np.allclose(vals, bogus_vals)
コード例 #2
0
    def test_partial_input_aliasing_affecting_inplace_operations(self):

        # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,
        #        you need to make in inputs mutable ( so that inplace
        #        operations are used) and to break the elemwise composition
        #        with some non-elemwise op ( here dot )
        x = aesara.tensor.dvector()
        y = aesara.tensor.dvector()
        z = aesara.tensor.dvector()
        m1 = aesara.tensor.dmatrix()
        m2 = aesara.tensor.dmatrix()
        m3 = aesara.tensor.dmatrix()

        # Test 2. If variables only partial overlap
        #   more exactly we care about the case when we have a,b,c
        #   and a shares memory with b, b shares memory with c, but
        #   c does not share memory with a

        f = aesara.function(
            [
                aesara.In(x, mutable=True),
                aesara.In(y, mutable=True),
                aesara.In(z, mutable=True),
                aesara.In(m1, mutable=True),
                aesara.In(m2, mutable=True),
                aesara.In(m3, mutable=True),
            ],
            (aesara.dot((x * 2), m1) + aesara.dot((y * 3), m2) + aesara.dot(
                (z * 4), m3)),
        )

        # Compute bogus values
        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray([[1, 0], [0, 1]], dtype="float64")
        bogus_vals = f(v[:2], v[1:3], v[2:4], m, m, m)
        # Since we used inplace operation v and m may be corrupted
        # so we need to recreate them

        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray([[1, 0], [0, 1]], dtype="float64")
        m_copy1 = m.copy()
        v_copy1 = v.copy()
        m_copy2 = m.copy()
        v_copy2 = v.copy()
        vals = f(v[:2], v_copy1[1:3], v_copy2[2:4], m, m_copy1, m_copy2)

        assert np.allclose(vals, bogus_vals)
コード例 #3
0
 def test_nan_beta_0(self):
     mode = self.mode.including()
     mode.check_isfinite = False
     f = aesara.function(
         [self.A, self.x, self.y, self.a],
         self.a * self.y + aesara.dot(self.A, self.x),
         mode=mode,
     )
     Aval = np.ones((3, 1), dtype=self.dtype)
     xval = np.ones((1,), dtype=self.dtype)
     yval = float("NaN") * np.ones((3,), dtype=self.dtype)
     zval = f(Aval, xval, yval, 0)
     assert not np.isnan(zval).any()
コード例 #4
0
def matrix_power(M, n):
    r"""
    Raise a square matrix to the (integer) power n.
    This implementation uses exponentiation by squaring which is
    significantly faster than the naive implementation.
    The time complexity for exponentiation by squaring is
    :math: `\mathcal{O}((n \log M)^k)`

    Parameters
    ----------
    M : Tensor variable
    n : Python int
    """
    if n < 0:
        M = pinv(M)
        n = abs(n)

    # Shortcuts when 0 < n <= 3
    if n == 0:
        return tensor.eye(M.shape[-2])

    elif n == 1:
        return M

    elif n == 2:
        return aesara.dot(M, M)

    elif n == 3:
        return aesara.dot(aesara.dot(M, M), M)

    result = z = None

    while n > 0:
        z = M if z is None else aesara.dot(z, z)
        n, bit = divmod(n, 2)
        if bit:
            result = z if result is None else aesara.dot(result, z)

    return result
コード例 #5
0
    def test_optimizations_mv(self):
        skip_if_blas_ldflags_empty()
        """ Test matrix dot vector """
        f = aesara.function(
            [self.A, self.y], aesara.dot(self.A, self.y), mode=self.mode
        )

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, tensor.dot)
        self.assertFunctionContains1(f, CGemv(inplace=True))

        # Assert they produce the same output
        assert np.allclose(f(self.Aval, self.yval), np.dot(self.Aval, self.yval))
        # Test with negative strides on 2 dims
        assert np.allclose(
            f(self.Aval[::-1, ::-1], self.yval),
            np.dot(self.Aval[::-1, ::-1], self.yval),
        )
コード例 #6
0
    def t_gemv1(self, m_shp):
        """ test vector2 + dot(matrix, vector1) """
        rng = np.random.RandomState(unittest_tools.fetch_seed())
        v1 = aesara.shared(np.array(rng.uniform(size=(m_shp[1],)), dtype="float32"))
        v2_orig = np.array(rng.uniform(size=(m_shp[0],)), dtype="float32")
        v2 = aesara.shared(v2_orig)
        m = aesara.shared(np.array(rng.uniform(size=m_shp), dtype="float32"))

        f = aesara.function([], v2 + tensor.dot(m, v1), mode=self.mode)

        # Assert they produce the same output
        assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2_orig)
        topo = [n.op for n in f.maker.fgraph.toposort()]
        assert topo == [CGemv(inplace=False)], topo

        # test the inplace version
        g = aesara.function(
            [], [], updates=[(v2, v2 + aesara.dot(m, v1))], mode=self.mode
        )

        # Assert they produce the same output
        g()
        assert np.allclose(
            v2.get_value(), np.dot(m.get_value(), v1.get_value()) + v2_orig
        )
        topo = [n.op for n in g.maker.fgraph.toposort()]
        assert topo == [CGemv(inplace=True)]

        # Do the same tests with a matrix with strides in both dimensions
        m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True)
        v2.set_value(v2_orig)
        assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2_orig)
        g()
        assert np.allclose(
            v2.get_value(), np.dot(m.get_value(), v1.get_value()) + v2_orig
        )
コード例 #7
0
        def test_specify_shape_inplace(self):
            # test that specify_shape don't break inserting inplace op

            dtype = self.dtype
            if dtype is None:
                dtype = aesara.config.floatX

            rng = np.random.RandomState(utt.fetch_seed())
            a = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            a = self.cast_value(a)
            a_shared = self.shared_constructor(a)
            b = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            b = self.cast_value(b)
            b_shared = self.shared_constructor(b)
            s = np.zeros((40, 40), dtype=dtype)
            s = self.cast_value(s)
            s_shared = self.shared_constructor(s)
            f = aesara.function([],
                                updates=[
                                    (s_shared,
                                     aesara.dot(a_shared, b_shared) + s_shared)
                                ])
            topo = f.maker.fgraph.toposort()
            f()
            # [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
            if aesara.config.mode != "FAST_COMPILE":
                assert (sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1)
                assert all(node.op == tensor.blas.gemm_inplace for node in topo
                           if isinstance(node.op, tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
            # Their is no inplace gemm for sparse
            # assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
            s_shared_specify = tensor.specify_shape(
                s_shared,
                s_shared.get_value(borrow=True).shape)

            # now test with the specify shape op in the output
            f = aesara.function(
                [],
                s_shared.shape,
                updates=[(s_shared,
                          aesara.dot(a_shared, b_shared) + s_shared_specify)],
            )
            topo = f.maker.fgraph.toposort()
            shp = f()
            assert np.all(shp == (40, 40))
            if aesara.config.mode != "FAST_COMPILE":
                assert (sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1)
                assert all(node.op == tensor.blas.gemm_inplace for node in topo
                           if isinstance(node.op, tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
            # now test with the specify shape op in the inputs and outputs
            a_shared = tensor.specify_shape(
                a_shared,
                a_shared.get_value(borrow=True).shape)
            b_shared = tensor.specify_shape(
                b_shared,
                b_shared.get_value(borrow=True).shape)

            f = aesara.function(
                [],
                s_shared.shape,
                updates=[(s_shared,
                          aesara.dot(a_shared, b_shared) + s_shared_specify)],
            )
            topo = f.maker.fgraph.toposort()
            shp = f()
            assert np.all(shp == (40, 40))
            if aesara.config.mode != "FAST_COMPILE":
                assert (sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1)
                assert all(node.op == tensor.blas.gemm_inplace for node in topo
                           if isinstance(node.op, tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")