Esempio n. 1
0
    def t_gemv1(self, m_shp):
        """test vector2 + dot(matrix, vector1)"""
        rng = np.random.default_rng(unittest_tools.fetch_seed())
        v1 = aesara.shared(np.array(rng.uniform(size=(m_shp[1],)), dtype="float32"))
        v2_orig = np.array(rng.uniform(size=(m_shp[0],)), dtype="float32")
        v2 = aesara.shared(v2_orig)
        m = aesara.shared(np.array(rng.uniform(size=m_shp), dtype="float32"))

        f = aesara.function([], v2 + at.dot(m, v1), mode=self.mode)

        # Assert they produce the same output
        assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2_orig)
        topo = [n.op for n in f.maker.fgraph.toposort()]
        assert topo == [CGemv(inplace=False)], topo

        # test the inplace version
        g = aesara.function([], [], updates=[(v2, v2 + at.dot(m, v1))], mode=self.mode)

        # Assert they produce the same output
        g()
        assert np.allclose(
            v2.get_value(), np.dot(m.get_value(), v1.get_value()) + v2_orig
        )
        topo = [n.op for n in g.maker.fgraph.toposort()]
        assert topo == [CGemv(inplace=True)]

        # Do the same tests with a matrix with strides in both dimensions
        m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True)
        v2.set_value(v2_orig)
        assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2_orig)
        g()
        assert np.allclose(
            v2.get_value(), np.dot(m.get_value(), v1.get_value()) + v2_orig
        )
Esempio n. 2
0
class TestCGemvFloat64(BaseGemv, OptimizationTestMixin):
    mode = mode_blas_opt
    dtype = "float64"
    gemv = CGemv(inplace=False)
    gemv_inplace = CGemv(inplace=True)

    def setup_method(self):
        skip_if_blas_ldflags_empty()
Esempio n. 3
0
    def test_optimizations_mv(self):
        skip_if_blas_ldflags_empty()
        """ Test matrix dot vector """
        f = aesara.function([self.A, self.y], at.dot(self.A, self.y), mode=self.mode)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, at.dot)
        self.assertFunctionContains1(f, CGemv(inplace=True))

        # Assert they produce the same output
        assert np.allclose(f(self.Aval, self.yval), np.dot(self.Aval, self.yval))
        # Test with negative strides on 2 dims
        assert np.allclose(
            f(self.Aval[::-1, ::-1], self.yval),
            np.dot(self.Aval[::-1, ::-1], self.yval),
        )
Esempio n. 4
0
class TestCGemvNoFlags:
    mode = mode_blas_opt
    gemv = CGemv(inplace=False)
    M = 4
    N = 5
    slice_step = 3

    def get_function(self, dtype, transpose_A=False, slice_tensors=False):
        alpha = scalar(dtype=dtype)
        beta = scalar(dtype=dtype)
        A = matrix(dtype=dtype)
        x = vector(dtype=dtype)
        y = vector(dtype=dtype)
        if transpose_A:
            A_1 = A.T
        else:
            A_1 = A
        if slice_tensors:
            A_2 = A_1[::-self.slice_step]
            x_2 = x[::-self.slice_step]
            y_2 = y[::-self.slice_step]
        else:
            A_2 = A_1
            x_2 = x
            y_2 = y
        return aesara.function(
            [alpha, A, x, beta, y],
            self.gemv(y_2, alpha, A_2, x_2, beta),
            mode=self.mode,
        )

    def get_data(self,
                 dtype,
                 alpha,
                 beta,
                 transpose_A=False,
                 slice_tensors=False):
        if slice_tensors:
            if transpose_A:
                A_shape = (self.N, self.M * self.slice_step)
            else:
                A_shape = (self.M * self.slice_step, self.N)
            x_shape = (self.N * self.slice_step, )
            y_shape = (self.M * self.slice_step, )
        else:
            if transpose_A:
                A_shape = (self.N, self.M)
            else:
                A_shape = (self.M, self.N)
            x_shape = (self.N, )
            y_shape = (self.M, )
        A = np.random.random(A_shape).astype(dtype)
        x = np.random.random(x_shape).astype(dtype)
        y = np.random.random(y_shape).astype(dtype)
        return (alpha, A, x, beta, y)

    def compute_ref(self, alpha, A, x, beta, y, transpose_A, slice_tensors):
        if transpose_A:
            A = A.T
        if slice_tensors:
            A = A[::-self.slice_step]
            x = x[::-self.slice_step]
            y = y[::-self.slice_step]
        ref_val = alpha * np.dot(A, x)
        if beta != 0:
            ref_val += beta * y
        return ref_val

    @aesara.config.change_flags(blas__ldflags="")
    def run_cgemv(self, dtype, ALPHA, BETA, transpose_A, slice_tensors):
        f = self.get_function(dtype,
                              transpose_A=transpose_A,
                              slice_tensors=slice_tensors)
        values = self.get_data(dtype,
                               ALPHA,
                               BETA,
                               transpose_A=transpose_A,
                               slice_tensors=slice_tensors)
        assert any(
            isinstance(node.op, CGemv) for node in f.maker.fgraph.apply_nodes)
        z_val = f(*values)
        assert z_val.dtype == dtype
        assert z_val.ndim == 1
        assert z_val.shape[0] == self.M
        ref_val = self.compute_ref(*(values + (transpose_A, slice_tensors)))
        unittest_tools.assert_allclose(ref_val, z_val)

    def test_cgemv(self):
        for dtype in ("float32", "float64"):
            for alpha in (0, 1, -2):
                for beta in (0, 1, -2):
                    for transpose_A in (False, True):
                        for slice_tensors in (False, True):
                            self.run_cgemv(
                                dtype,
                                alpha,
                                beta,
                                transpose_A,
                                slice_tensors,
                            )