def t_gemv1(self, m_shp): ''' test vector2 + dot(matrix, vector1) ''' rng = numpy.random.RandomState(unittest_tools.fetch_seed()) v1 = theano.shared( numpy.array(rng.uniform(size=(m_shp[1], )), dtype='float32')) v2_orig = numpy.array(rng.uniform(size=(m_shp[0], )), dtype='float32') v2 = theano.shared(v2_orig) m = theano.shared(numpy.array(rng.uniform(size=m_shp), dtype='float32')) f = theano.function([], v2 + tensor.dot(m, v1), mode=self.mode) # Assert they produce the same output assert numpy.allclose( f(), numpy.dot(m.get_value(), v1.get_value()) + v2_orig) topo = [n.op for n in f.maker.env.toposort()] assert topo == [CGemv(inplace=False)], topo #test the inplace version f = theano.function([], [], updates={v2: v2 + theano.dot(m, v1)}, mode=self.mode) # Assert they produce the same output f() assert numpy.allclose( v2.get_value(), numpy.dot(m.get_value(), v1.get_value()) + v2_orig) topo = [n.op for n in f.maker.env.toposort()] assert topo == [CGemv(inplace=True)]
class TestCGemvFloat64(TestCase, BaseGemv, TestOptimizationMixin): mode = mode_blas_opt dtype = 'float64' gemv = CGemv(inplace=False) gemv_inplace = CGemv(inplace=True) def setUp(self): skip_if_blas_ldflags_empty()
class TestCGemvFloat64(BaseGemv, OptimizationTestMixin): mode = mode_blas_opt dtype = "float64" gemv = CGemv(inplace=False) gemv_inplace = CGemv(inplace=True) def setup_method(self): skip_if_blas_ldflags_empty()
class TestCGemvFloat64(TestCase, BaseGemv, TestOptimizationMixin): mode = mode_blas_opt dtype = 'float64' gemv = CGemv(inplace=False) gemv_inplace = CGemv(inplace=True) def setUp(self): if theano.config.blas.ldflags == "": raise SkipTest("This test is useful only when Theano" " is directly linked to blas.")
def t_gemv1(self, m_shp): ''' test vector2 + dot(matrix, vector1) ''' rng = numpy.random.RandomState(unittest_tools.fetch_seed()) v1 = theano.shared( numpy.array(rng.uniform(size=(m_shp[1], )), dtype='float32')) v2_orig = numpy.array(rng.uniform(size=(m_shp[0], )), dtype='float32') v2 = theano.shared(v2_orig) m = theano.shared(numpy.array(rng.uniform(size=m_shp), dtype='float32')) f = theano.function([], v2 + tensor.dot(m, v1), mode=self.mode) # Assert they produce the same output assert numpy.allclose( f(), numpy.dot(m.get_value(), v1.get_value()) + v2_orig) topo = [n.op for n in f.maker.fgraph.toposort()] assert topo == [CGemv(inplace=False)], topo #test the inplace version g = theano.function([], [], updates=[(v2, v2 + theano.dot(m, v1))], mode=self.mode) # Assert they produce the same output g() assert numpy.allclose( v2.get_value(), numpy.dot(m.get_value(), v1.get_value()) + v2_orig) topo = [n.op for n in g.maker.fgraph.toposort()] assert topo == [CGemv(inplace=True)] # Do the same tests with a matrix with strides in both dimensions m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True) v2.set_value(v2_orig) assert numpy.allclose( f(), numpy.dot(m.get_value(), v1.get_value()) + v2_orig) g() assert numpy.allclose( v2.get_value(), numpy.dot(m.get_value(), v1.get_value()) + v2_orig)
def test_optimizations_mv(self): ''' Test matrix dot vector ''' f = theano.function([self.A, self.y], theano.dot(self.A, self.y), mode=self.mode) # Assert that the dot was optimized somehow self.assertFunctionContains0(f, tensor.dot) self.assertFunctionContains1(f, CGemv(True)) # Assert they produce the same output assert numpy.allclose(f(self.Aval, self.yval), numpy.dot(self.Aval, self.yval))
def test_optimizations_mv(self): ''' Test matrix dot vector ''' f = theano.function([self.A, self.y], theano.dot(self.A, self.y), mode=self.mode) # Assert that the dot was optimized somehow self.assertFunctionContains0(f, tensor.dot) self.assertFunctionContains1(f, CGemv(inplace=True, force_init_beta=True)) # Assert they produce the same output assert numpy.allclose(f(self.Aval, self.yval), numpy.dot(self.Aval, self.yval)) # Test with negative strides on 2 dims assert numpy.allclose(f(self.Aval[::-1, ::-1], self.yval), numpy.dot(self.Aval[::-1, ::-1], self.yval))
def test_optimizations_vm(self): ''' Test vector dot matrix ''' f = theano.function([self.x, self.A], theano.dot(self.x, self.A), mode=self.mode) # Assert that the dot was optimized somehow self.assertFunctionContains0(f, tensor.dot) self.assertFunctionContains1(f, CGemv(True)) # Assert they produce the same output assert numpy.allclose(f(self.xval, self.Aval), numpy.dot(self.xval, self.Aval)) # Test with negative strides on 2 dims assert numpy.allclose(f(self.xval, self.Aval[::-1, ::-1]), numpy.dot(self.xval, self.Aval[::-1, ::-1]))
def test_optimizations_mv(self): skip_if_blas_ldflags_empty() """ Test matrix dot vector """ f = theano.function( [self.A, self.y], theano.tensor.dot(self.A, self.y), mode=self.mode ) # Assert that the dot was optimized somehow self.assertFunctionContains0(f, tensor.dot) self.assertFunctionContains1(f, CGemv(inplace=True)) # Assert they produce the same output assert np.allclose(f(self.Aval, self.yval), np.dot(self.Aval, self.yval)) # Test with negative strides on 2 dims assert np.allclose( f(self.Aval[::-1, ::-1], self.yval), np.dot(self.Aval[::-1, ::-1], self.yval), )
class TestCGemvNoFlags: mode = mode_blas_opt gemv = CGemv(inplace=False) M = 4 N = 5 slice_step = 3 def setup_method(self): unittest_tools.seed_rng() def get_function(self, dtype, transpose_A=False, slice_tensors=False): alpha = theano.tensor.scalar(dtype=dtype) beta = theano.tensor.scalar(dtype=dtype) A = theano.tensor.matrix(dtype=dtype) x = theano.tensor.vector(dtype=dtype) y = theano.tensor.vector(dtype=dtype) if transpose_A: A_1 = A.T else: A_1 = A if slice_tensors: A_2 = A_1[::-self.slice_step] x_2 = x[::-self.slice_step] y_2 = y[::-self.slice_step] else: A_2 = A_1 x_2 = x y_2 = y return theano.function( [alpha, A, x, beta, y], self.gemv(y_2, alpha, A_2, x_2, beta), mode=self.mode, ) def get_data(self, dtype, alpha, beta, transpose_A=False, slice_tensors=False): if slice_tensors: if transpose_A: A_shape = (self.N, self.M * self.slice_step) else: A_shape = (self.M * self.slice_step, self.N) x_shape = (self.N * self.slice_step, ) y_shape = (self.M * self.slice_step, ) else: if transpose_A: A_shape = (self.N, self.M) else: A_shape = (self.M, self.N) x_shape = (self.N, ) y_shape = (self.M, ) A = np.random.random(A_shape).astype(dtype) x = np.random.random(x_shape).astype(dtype) y = np.random.random(y_shape).astype(dtype) return (alpha, A, x, beta, y) def compute_ref(self, alpha, A, x, beta, y, transpose_A, slice_tensors): if transpose_A: A = A.T if slice_tensors: A = A[::-self.slice_step] x = x[::-self.slice_step] y = y[::-self.slice_step] ref_val = alpha * np.dot(A, x) if beta != 0: ref_val += beta * y return ref_val @theano.change_flags({"blas.ldflags": ""}) def run_cgemv(self, dtype, ALPHA, BETA, transpose_A, slice_tensors): f = self.get_function(dtype, transpose_A=transpose_A, slice_tensors=slice_tensors) values = self.get_data(dtype, ALPHA, BETA, transpose_A=transpose_A, slice_tensors=slice_tensors) assert any( isinstance(node.op, CGemv) for node in f.maker.fgraph.apply_nodes) z_val = f(*values) assert z_val.dtype == dtype assert z_val.ndim == 1 assert z_val.shape[0] == self.M ref_val = self.compute_ref(*(values + (transpose_A, slice_tensors))) unittest_tools.assert_allclose(ref_val, z_val) def test_cgemv(self): for dtype in ("float32", "float64"): for alpha in (0, 1, -2): for beta in (0, 1, -2): for transpose_A in (False, True): for slice_tensors in (False, True): self.run_cgemv( dtype, alpha, beta, transpose_A, slice_tensors, )
class TestCGemvFloat64(TestCase, BaseGemv, TestOptimizationMixin): mode = mode_blas_opt dtype = 'float64' gemv = CGemv(inplace=False) gemv_inplace = CGemv(inplace=True)