def test_pooling_with_tensor_vars(self): x = ftensor4() window_size = ivector() stride = ivector() padding = ivector() data = np.random.normal(0, 1, (1, 1, 5, 5)).astype("float32") # checking variable params vs fixed params for ignore_border in [True, False]: for mode in ["max", "sum", "average_inc_pad", "average_exc_pad"]: y = pool_2d(x, window_size, ignore_border, stride, padding, mode) dx = aesara.gradient.grad(y.sum(), x) var_fct = aesara.function([x, window_size, stride, padding], [y, dx]) for ws in (4, 2, 5): for st in (2, 3): for pad in (0, 1): if (pad > st or st > ws or (pad != 0 and not ignore_border) or (mode == "average_exc_pad" and pad != 0)): continue y = pool_2d(x, (ws, ws), ignore_border, (st, st), (pad, pad), mode) dx = aesara.gradient.grad(y.sum(), x) fix_fct = aesara.function([x], [y, dx]) var_y, var_dx = var_fct(data, (ws, ws), (st, st), (pad, pad)) fix_y, fix_dx = fix_fct(data) utt.assert_allclose(var_y, fix_y) utt.assert_allclose(var_dx, fix_dx)
def test_sparseblockouter(self): o = ftensor4() x = ftensor3() y = ftensor3() xIdx = imatrix() yIdx = imatrix() out = self.outer_op(o, x, y, xIdx, yIdx) f = aesara.function([o, x, y, xIdx, yIdx], out, on_unused_input="warn", mode=self.mode) ( o_val, x_val, y_val, xIdx_val, yIdx_val, ) = self.outer_data() th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val) ref_out = self.outer_numpy(o_val, x_val, y_val, xIdx_val, yIdx_val) utt.assert_allclose(ref_out, th_out)
def test_sparseblockgemvF(self): # Test the fortran order for W (which can happen in the grad for some # graphs). b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() o = self.gemv_op( b.take(oIdx, axis=0), DimShuffle((False, False, False, False), (0, 1, 3, 2))(at.as_tensor_variable(W)), h, iIdx, oIdx, ) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(np.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy(b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val) utt.assert_allclose(ref_out, th_out)
def test_GpuCumOp4D(self, mode): op_class = partial(self.op_class, mode=mode) # Should not use the GPU version. x = ftensor4("x") f = aesara.function([x], op_class(axis=1)(x), mode=self.mode) assert [ n for n in f.maker.fgraph.toposort() if isinstance(n.op, CumOp) ]
def test_outer_infershape(self): o = ftensor4() x = ftensor3() y = ftensor3() xIdx = imatrix() yIdx = imatrix() self._compile_and_check( [o, x, y, xIdx, yIdx], [self.outer_op(o, x, y, xIdx, yIdx)], self.outer_data(), self.outer_class, )
def test_gemv_infershape(self): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)], self.gemv_data(), self.gemv_class, )
def test_dot_infershape(self): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() self._compile_and_check( [W, h, iIdx, b, oIdx], [sparse_block_dot(W, h, iIdx, b, oIdx)], self.gemv_data(), self.gemv_class, )
def test_blocksparse_inplace_gemv_opt(): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = lmatrix() oIdx = lmatrix() o = sparse_block_dot(W, h, iIdx, b, oIdx) f = aesara.function([W, h, iIdx, b, oIdx], o) if aesara.config.mode == "FAST_COMPILE": assert not f.maker.fgraph.toposort()[-1].op.inplace assert check_stack_trace(f, ops_to_check=[sparse_block_gemv]) else: assert f.maker.fgraph.toposort()[-1].op.inplace assert check_stack_trace(f, ops_to_check=[sparse_block_gemv_inplace])
def test_blocksparse_inplace_outer_opt(): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = lmatrix() oIdx = lmatrix() o = sparse_block_dot(W, h, iIdx, b, oIdx) f = aesara.function([W, h, iIdx, b, oIdx], [o, aesara.gradient.grad(o.sum(), wrt=W)]) if aesara.config.mode == "FAST_COMPILE": assert not f.maker.fgraph.toposort()[-1].op.inplace assert check_stack_trace(f, ops_to_check=sparse_block_outer) else: assert f.maker.fgraph.toposort()[-1].op.inplace assert check_stack_trace(f, ops_to_check=sparse_block_outer_inplace)
def test_sparseblockgemv_grad_shape(self): b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) go = aesara.grad(o.sum(), [b, W, h]) f = aesara.function([W, h, iIdx, b, oIdx], go, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() # just make sure that it runs correctly and all the shapes are ok. b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val) assert b_g.shape == b_val.shape assert h_g.shape == h_val.shape assert W_g.shape == W_val.shape
def test_sparseblockgemv(self): # Compares the numpy and aesara versions of sparseblockgemv. b = fmatrix() W = ftensor4() h = ftensor3() iIdx = imatrix() oIdx = imatrix() o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx) f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode) W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data() th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val) ref_out = self.gemv_numpy(b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val) utt.assert_allclose(ref_out, th_out)
def test_infer_shape(self): shape = (100, 40, 6, 3) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="valid")], [images], Images2Neibs, ) self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="valid")], [images], Images2Neibs, ) shape = (100, 40, 5, 4) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="ignore_borders")], [images], Images2Neibs, ) shape = (100, 40, 5, 3) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="ignore_borders")], [images], Images2Neibs, ) shape = (100, 40, 6, 7) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 2), mode="ignore_borders")], [images], Images2Neibs, ) shape = (100, 40, 5, 10) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(3, 3), mode="wrap_centered")], [images], Images2Neibs, ) shape = (100, 40, 6, 4) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="half")], [images], Images2Neibs, ) self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="half")], [images], Images2Neibs, ) shape = (100, 40, 6, 5) images = np.ones(shape).astype("float32") x = ftensor4() self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 1), mode="full")], [images], Images2Neibs, ) self._compile_and_check( [x], [images2neibs(x, neib_shape=(2, 3), mode="full")], [images], Images2Neibs, )