def test_sparseblockouter(self): o = ftensor4() x = ftensor3() y = ftensor3() xIdx = imatrix() yIdx = imatrix() out = self.outer_op(o, x, y, xIdx, yIdx) f = aesara.function([o, x, y, xIdx, yIdx], out, on_unused_input="warn", mode=self.mode) ( o_val, x_val, y_val, xIdx_val, yIdx_val, ) = self.outer_data() th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val) ref_out = self.outer_numpy(o_val, x_val, y_val, xIdx_val, yIdx_val) utt.assert_allclose(ref_out, th_out)
def test_sparseblockgemvF(self): # Test the fortran order for W (which can happen in the grad for some # graphs). b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() o = self.gemv_op( b.take(oIdx, axis=0), DimShuffle((False, False, False, False), (0, 1, 3, 2))(at.as_tensor_variable(W)), h, iIdx, oIdx, ) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(np.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy(b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val) utt.assert_allclose(ref_out, th_out)
def test_outer_infershape(self): o = ftensor4() x = ftensor3() y = ftensor3() xIdx = imatrix() yIdx = imatrix() self._compile_and_check( [o, x, y, xIdx, yIdx], [self.outer_op(o, x, y, xIdx, yIdx)], self.outer_data(), self.outer_class, )
def test_gemv_infershape(self): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)], self.gemv_data(), self.gemv_class, )
def test_dot_infershape(self): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [sparse_block_dot(W, h, iIdx, b, oIdx)], self.gemv_data(), self.gemv_class, )
def test_grad_identity(): # Check that the grad method of Identity correctly handles int dytpes x = imatrix("x") # tensor_copy is Elemwise{Identity} y = aesara.tensor.tensor_copy(x) l = y.sum(dtype=aesara.config.floatX) aesara.gradient.grad(l, x)
def test_rebuild_strict(self): # Test fix for error reported at # https://groups.google.com/d/topic/theano-users/BRK0UEB72XA/discussion w = imatrix() x, y = ivectors("x", "y") z = x * y f = function([w, y], z, givens=[(x, w)], rebuild_strict=False) z_val = f(np.ones((3, 5), dtype="int32"), np.arange(5, dtype="int32")) assert z_val.ndim == 2 assert np.all(z_val == np.ones((3, 5)) * np.arange(5))
def test_sparseblockgemv_grad_shape(self): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) go = aesara.grad(o.sum(), [b, W, h]) f = aesara.function([W, h, iIdx, b, oIdx], go, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() # just make sure that it runs correctly and all the shapes are ok. b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val) assert b_g.shape == b_val.shape assert h_g.shape == h_val.shape assert W_g.shape == W_val.shape
def test_sparseblockgemv(self): # Compares the numpy and aesara versions of sparseblockgemv. b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy(b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val) utt.assert_allclose(ref_out, th_out)
def test_on_real_input(self): x = dvector() rng = np.random.default_rng(23) xval = rng.standard_normal((10)) np.all(0 == aesara.function([x], imag(x))(xval)) np.all(xval == aesara.function([x], real(x))(xval)) x = imatrix() xval = np.asarray(rng.standard_normal((3, 3)) * 100, dtype="int32") np.all(0 == aesara.function([x], imag(x))(xval)) np.all(xval == aesara.function([x], real(x))(xval))
def test_on_real_input(self): x = dvector() rng = np.random.RandomState(23) xval = rng.randn(10) np.all(0 == aesara.function([x], imag(x))(xval)) np.all(xval == aesara.function([x], real(x))(xval)) x = imatrix() xval = np.asarray(rng.randn(3, 3) * 100, dtype="int32") np.all(0 == aesara.function([x], imag(x))(xval)) np.all(xval == aesara.function([x], real(x))(xval))
def test_grad_int(self): # tests that the gradient with respect to an integer # is the same as the gradient with respect to a float W = matrix() b = vector() def make_grad_func(X): Z = dot(X, W) + b H = aesara.tensor.nnet.sigmoid(Z) cost = H.sum() g = grad(cost, X) return aesara.function([X, W, b], g, on_unused_input="ignore") int_func = make_grad_func(imatrix()) # we have to use float64 as the float type to get the results to match # using an integer for the input makes all the later functions use # float64 float_func = make_grad_func(matrix(dtype="float64")) m = 5 d = 3 n = 4 rng = np.random.RandomState([2012, 9, 5]) int_type = imatrix().dtype float_type = "float64" X = np.cast[int_type](rng.randn(m, d) * 127.0) W = np.cast[W.dtype](rng.randn(d, n)) b = np.cast[b.dtype](rng.randn(n)) int_result = int_func(X, W, b) float_result = float_func(np.cast[float_type](X), W, b) assert np.allclose(int_result, float_result), (int_result, float_result)