def test_sparseblockouter(self): o = tensor.ftensor4() x = tensor.ftensor3() y = tensor.ftensor3() xIdx = tensor.imatrix() yIdx = tensor.imatrix() out = self.outer_op(o, x, y, xIdx, yIdx) f = aesara.function( [o, x, y, xIdx, yIdx], out, on_unused_input="warn", mode=self.mode ) ( o_val, x_val, y_val, xIdx_val, yIdx_val, ) = self.outer_data() th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val) ref_out = self.outer_numpy(o_val, x_val, y_val, xIdx_val, yIdx_val) utt.assert_allclose(ref_out, th_out)
def test_sparseblockgemvF(self): # Test the fortan order for W (which can happen in the grad for some # graphs). b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() o = self.gemv_op( b.take(oIdx, axis=0), tensor.DimShuffle((False, False, False, False), (0, 1, 3, 2))( tensor.as_tensor_variable(W) ), h, iIdx, oIdx, ) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(np.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy( b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val ) utt.assert_allclose(ref_out, th_out)
def test_outer_infershape(self): o = tensor.ftensor4() x = tensor.ftensor3() y = tensor.ftensor3() xIdx = tensor.imatrix() yIdx = tensor.imatrix() self._compile_and_check( [o, x, y, xIdx, yIdx], [self.outer_op(o, x, y, xIdx, yIdx)], self.outer_data(), self.outer_class, )
def test_gemv_infershape(self): b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)], self.gemv_data(), self.gemv_class, )
def test_dot_infershape(self): b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [sparse_block_dot(W, h, iIdx, b, oIdx)], self.gemv_data(), self.gemv_class, )
def test_sparseblockgemv_grad_shape(self): b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) go = aesara.grad(o.sum(), [b, W, h]) f = aesara.function([W, h, iIdx, b, oIdx], go, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() # just make sure that it runs correcly and all the shapes are ok. b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val) assert b_g.shape == b_val.shape assert h_g.shape == h_val.shape assert W_g.shape == W_val.shape
def test_sparseblockgemv(self): # Compares the numpy and aesara versions of sparseblockgemv. b = tensor.fmatrix() W = tensor.ftensor4() h = tensor.ftensor3() iIdx = tensor.imatrix() oIdx = tensor.imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy( b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val ) utt.assert_allclose(ref_out, th_out)
def test_infer_shape(self): shape = (100, 40, 6, 3) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="valid")], [images], Images2Neibs, ) self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="valid")], [images], Images2Neibs, ) shape = (100, 40, 5, 4) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="ignore_borders")], [images], Images2Neibs, ) shape = (100, 40, 5, 3) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="ignore_borders")], [images], Images2Neibs, ) shape = (100, 40, 6, 7) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 2), mode="ignore_borders")], [images], Images2Neibs, ) shape = (100, 40, 5, 10) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(3, 3), mode="wrap_centered")], [images], Images2Neibs, ) shape = (100, 40, 6, 4) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="half")], [images], Images2Neibs, ) self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="half")], [images], Images2Neibs, ) shape = (100, 40, 6, 5) images = np.ones(shape).astype("float32") x = tt.ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="full")], [images], Images2Neibs, ) self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="full")], [images], Images2Neibs, )
def test_GpuCumOp4D(self, mode): op_class = partial(self.op_class, mode=mode) # Should not use the GPU version. x = tt.ftensor4("x") f = aesara.function([x], op_class(axis=1)(x), mode=self.mode) assert [n for n in f.maker.fgraph.toposort() if isinstance(n.op, CumOp)]