def test_AveragePoolGrad_grad_stride(self, example, ignore_border, mode): # checks the gradient of the gradient for # the case that stride is used rng = np.random.RandomState(utt.fetch_seed()) (avgpoolshp, stride, inputsize) = example imval = rng.rand(*inputsize) grad_shape = Pool.out_shape( imval.shape, avgpoolshp, ndim=len(avgpoolshp), ignore_border=ignore_border, stride=stride, ) # skip the grad verification when the output is empty if np.prod(grad_shape) != 0: grad_val = rng.rand(*grad_shape) def mp(input, grad): grad_op = AveragePoolGrad(ndim=len(avgpoolshp), ignore_border=ignore_border, mode=mode) return grad_op(input, grad, avgpoolshp, stride) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMaxPaddingStride(self): ignore_border = True # padding does not support ignore_border=False rng = np.random.RandomState(utt.fetch_seed()) # maxpool, stride, pad, input sizes examples = ( ((3, ), (2, ), (2, ), (5, )), ((3, ), (2, ), (2, ), (4, 5)), ((3, ), (2, ), (2, ), (4, 2, 5, 5)), ((3, 3), (2, 2), (2, 2), (4, 2, 5, 5)), ((4, 4), (2, 2), (1, 2), (4, 2, 5, 5)), ((3, 4), (1, 1), (2, 1), (4, 2, 5, 6)), ((4, 3), (1, 2), (0, 0), (4, 2, 6, 5)), ((2, 2), (2, 2), (1, 1), (4, 2, 5, 5)), ((4, 3, 2), (1, 2, 2), (0, 2, 1), (4, 6, 6, 5)), ((4, 3, 2), (1, 2, 2), (0, 2, 1), (4, 2, 6, 5, 5)), ) for example, mode in product( examples, ["max", "sum", "average_inc_pad", "average_exc_pad"]): (maxpoolshp, stridesize, padsize, inputsize) = example imval = rng.rand(*inputsize) - 0.5 images = aesara.shared(imval) numpy_output_val = self.numpy_max_pool_nd_stride_pad( imval, maxpoolshp, ignore_border, stridesize, padsize, mode) maxpool_op = Pool(ndim=len(maxpoolshp), ignore_border=ignore_border, mode=mode)(images, maxpoolshp, stridesize, padsize) f = function([], maxpool_op) output_val = f() utt.assert_allclose(output_val, numpy_output_val)
def mp(input, grad): out = Pool( ndim=len(maxpoolshp), ignore_border=True, )(input, maxpoolshp, stridesize, padsize) grad_op = MaxPoolGrad(ndim=len(maxpoolshp), ignore_border=True) return grad_op(input, out, grad, maxpoolshp, stridesize, padsize)
def mp(input1, input2): op1 = Pool(ndim=len(maxpoolshp), ignore_border=True) pooled_out = op1(input1, maxpoolshp, stridesize, padsize) op2 = DownsampleFactorMaxGradGrad(ndim=len(maxpoolshp), ignore_border=True) out = op2(input1, pooled_out, input2, maxpoolshp, stridesize, padsize) return out
def mp(input, grad): out = Pool(ndim=len(maxpoolshp), ignore_border=ignore_border)(input, maxpoolshp, stride) grad_op = MaxPoolGrad(ndim=len(maxpoolshp), ignore_border=ignore_border) return grad_op(input, out, grad, maxpoolshp, stride) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_downsample(self): rng = np.random.RandomState(utt.fetch_seed()) # ws, shp examples = ( ((2, ), (16, )), ( (2, ), ( 4, 16, ), ), ( (2, ), ( 4, 2, 16, ), ), ((1, 1), (4, 2, 16, 16)), ((2, 2), (4, 2, 16, 16)), ((3, 3), (4, 2, 16, 16)), ((3, 2), (4, 2, 16, 16)), ((3, 2, 2), (3, 2, 16, 16, 16)), ((2, 3, 2), (3, 2, 16, 16, 16)), ((2, 2, 3), (3, 2, 16, 16, 16)), ((2, 2, 3, 2), (3, 2, 6, 6, 6, 5)), ) for example, ignore_border in itertools.product( examples, [True, False]): (ws, shp) = example vx = rng.rand(*shp) vex = rng.rand(*shp) x = aesara.shared(vx) ex = aesara.shared(vex) maxpool_op = Pool(ignore_border, ndim=len(ws)) a_pooled = maxpool_op(x, ws).flatten() yv = Rop(a_pooled, x, ex) mode = None if aesara.config.mode == "FAST_COMPILE": mode = "FAST_RUN" rop_f = function([], yv, on_unused_input="ignore", mode=mode) sy, _ = aesara.scan( lambda i, y, x, v: (grad(y[i], x) * v).sum(), sequences=aet.arange(a_pooled.shape[0]), non_sequences=[a_pooled, x, ex], mode=mode, ) scan_f = function([], sy, on_unused_input="ignore", mode=mode) v1 = rop_f() v2 = scan_f() assert np.allclose(v1, v2), f"Rop mismatch: {v1} {v2}"
def test_DownsampleFactorMaxPaddingStride_grad_grad(self): rng = np.random.RandomState(utt.fetch_seed()) # maxpool, stride, pad, input sizes examples = ( ((3, ), (2, ), (2, ), (10, )), ( (3, ), (2, ), (2, ), ( 2, 10, ), ), ( (3, ), (2, ), (2, ), ( 2, 1, 10, ), ), ((5, 3), (3, 2), (2, 2), (1, 1, 10, 10)), ((3, 5), (2, 3), (2, 1), (1, 1, 10, 5)), ((5, 3, 3), (3, 2, 2), (2, 2, 2), (1, 1, 10, 5, 5)), ((3, 3, 5), (2, 2, 3), (2, 2, 1), (1, 1, 5, 5, 10)), ) for (maxpoolshp, stridesize, padsize, inputsize) in examples: imval = rng.rand(*inputsize) * 10.0 grad_shape = Pool.out_shape( imval.shape, maxpoolshp, ndim=len(maxpoolshp), stride=stridesize, ignore_border=True, pad=padsize, ) grad_val = rng.rand(*grad_shape) * 10.0 def mp(input, grad): out = Pool( ndim=len(maxpoolshp), ignore_border=True, )(input, maxpoolshp, stridesize, padsize) grad_op = MaxPoolGrad(ndim=len(maxpoolshp), ignore_border=True) return grad_op(input, out, grad, maxpoolshp, stridesize, padsize) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolPaddingStride_grad_grad(self): rng = np.random.RandomState(utt.fetch_seed()) # avgpool, stride, pad, input sizes examples = ( ((3, ), (2, ), (2, ), (10, )), ( (3, ), (2, ), (2, ), ( 2, 10, ), ), ( (3, ), (2, ), (2, ), ( 2, 1, 10, ), ), ((5, 3), (3, 2), (2, 2), (1, 1, 10, 10)), ((3, 5), (2, 3), (2, 1), (1, 1, 10, 5)), ((5, 3, 2), (3, 2, 1), (2, 2, 2), (1, 1, 10, 5, 5)), ) for (avgpoolshp, stridesize, padsize, inputsize) in examples: imval = rng.rand(*inputsize) * 10.0 # 'average_exc_pad' with non-zero padding is not implemented for mode in ["sum", "average_inc_pad"]: grad_shape = Pool.out_shape( imval.shape, avgpoolshp, ndim=len(avgpoolshp), stride=stridesize, ignore_border=True, pad=padsize, ) grad_val = rng.rand(*grad_shape) * 10.0 def mp(input, grad): grad_op = AveragePoolGrad(ndim=len(avgpoolshp), ignore_border=True, mode=mode) return grad_op(input, grad, avgpoolshp, stridesize, padsize) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_DownsampleFactorMaxStrideExtra(self): rng = np.random.RandomState(utt.fetch_seed()) maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9)) stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1), (2, 3), (10, 10), (1, 1)) imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5), (8, 5), (8, 5), (8, 5)) outputshps = ( (4, 10, 4, 7), (4, 10, 5, 8), (4, 10, 2, 3), (4, 10, 3, 4), (4, 10, 2, 3), (4, 10, 2, 3), (4, 10, 4, 1), (4, 10, 4, 1), (4, 10, 3, 2), (4, 10, 4, 2), (4, 10, 1, 0), (4, 10, 1, 1), (4, 10, 0, 0), (4, 10, 1, 1), ) images = dtensor4() for indx in np.arange(len(maxpoolshps)): imvsize = imvsizs[indx] imval = rng.rand(4, 10, imvsize[0], imvsize[1]) stride = stridesizes[indx] maxpoolshp = maxpoolshps[indx] for ignore_border, mode in product( [True, False], ["max", "sum", "average_inc_pad", "average_exc_pad"]): indx_out = indx * 2 if not ignore_border: indx_out += 1 outputshp = outputshps[indx_out] # Pool op numpy_output_val = self.numpy_max_pool_2d_stride( imval, maxpoolshp, ignore_border, stride, mode) assert (numpy_output_val.shape == outputshp ), "outshape is {}, calculated shape is {}".format( outputshp, numpy_output_val.shape, ) maxpool_op = Pool(ignore_border=ignore_border, ndim=len(maxpoolshp), mode=mode)(images, maxpoolshp, stride) f = function([images], maxpool_op) output_val = f(imval) utt.assert_allclose(output_val, numpy_output_val)
def test_DownsampleFactorMaxGrad_grad(self): rng = np.random.RandomState(utt.fetch_seed()) # maxpool, input sizes examples = ( ((2, ), (2, )), ((2, ), (2, 3)), ((1, 1), (2, 3, 3, 4)), ((3, 2), (2, 3, 3, 4)), ((2, 3), (2, 3, 3, 4)), ((1, 1, 1), (2, 3, 3, 4)), ((3, 2, 2), (2, 3, 3, 4)), ((2, 3, 2), (2, 3, 3, 4)), ((2, 2, 3), (2, 3, 3, 4)), ((2, 2, 3), (2, 1, 3, 3, 4)), ) for (maxpoolshp, inputsize) in examples: imval = rng.rand(*inputsize) * 10.0 # more variance means numeric gradient will be more accurate for ignore_border in [True, False]: # print 'maxpoolshp =', maxpoolshp # print 'ignore_border =', ignore_border # The shape of the gradient will be the shape of the output grad_shape = Pool.out_shape( imval.shape, maxpoolshp, ndim=len(maxpoolshp), ignore_border=ignore_border, ) grad_val = rng.rand(*grad_shape) * 10.0 def mp(input, grad): out = Pool(ndim=len(maxpoolshp), ignore_border=ignore_border)(input, maxpoolshp) grad_op = MaxPoolGrad(ndim=len(maxpoolshp), ignore_border=ignore_border) return grad_op(input, out, grad, maxpoolshp) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_AveragePoolGrad_grad(self): rng = np.random.RandomState(utt.fetch_seed()) # avgpool, input sizes examples = ( ((2, ), (2, )), ((2, ), (2, 3)), ((1, 1), (2, 3, 3, 4)), ((3, 2), (2, 3, 3, 4)), ((2, 3), (2, 3, 3, 4)), ((3, 2, 2), (2, 3, 3, 4)), ((2, 2, 3), (2, 3, 3, 4)), ) for (avgpoolshp, inputsize) in examples: imval = rng.rand(*inputsize) * 10.0 # more variance means numeric gradient will be more accurate for ignore_border in [True, False]: for mode in ["sum", "average_inc_pad", "average_exc_pad"]: # print 'maxpoolshp =', maxpoolshp # print 'ignore_border =', ignore_border # The shape of the gradient will be the shape of the output grad_shape = Pool.out_shape( imval.shape, avgpoolshp, ndim=len(avgpoolshp), ignore_border=ignore_border, ) grad_val = rng.rand(*grad_shape) * 10.0 def mp(input, grad): grad_op = AveragePoolGrad(ndim=len(avgpoolshp), ignore_border=ignore_border, mode=mode) return grad_op(input, grad, avgpoolshp) utt.verify_grad(mp, [imval, grad_val], rng=rng)
def test_pool2d(): shps = [ (1, 12), (1, 1, 12), (1, 1, 1, 12), (1, 1, 2, 2), (1, 1, 1, 1), (1, 1, 4, 4), (1, 1, 10, 11), (1, 2, 2, 2), (3, 5, 4, 4), (25, 1, 7, 7), (1, 1, 12, 12), (1, 1, 2, 14), (1, 1, 12, 14), (1, 1, 14, 14), (1, 1, 16, 16), (1, 1, 18, 18), (1, 1, 24, 24), (1, 6, 24, 24), (10, 1, 24, 24), (10, 6, 24, 24), (30, 6, 12, 12), (30, 2, 24, 24), (30, 6, 24, 24), (10, 10, 10, 11), (1, 1, 10, 1025), (1, 1, 10, 1023), (1, 1, 1025, 10), (1, 1, 1023, 10), (3, 2, 16, 16, 16), (3, 2, 6, 6, 6, 5), (3, 2, 6, 6, 6, 5, 7), ] np.random.RandomState(utt.fetch_seed()).shuffle(shps) test_ws = (2, 2), (3, 2), (1, 1) test_st = (2, 2), (3, 2), (1, 1) test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"] ref_mode = copy.copy(mode_without_gpu) ref_mode.check_py_code = False gpu_mode = mode_with_gpu.excluding("cudnn") gpu_mode.check_py_code = False for shp in shps: for mode, ws, st in itertools.product(test_mode, test_ws, test_st): if ws[0] > shp[-2] or ws[1] > shp[-1]: continue for ignore_border, pad in zip((True, False), [(1, 1), (0, 0)]): if pad[0] >= ws[0] or pad[1] >= ws[1]: continue if mode == "average_exc_pad" and (pad[0] > 0 or pad[1] > 0): continue # print('test_pool2d', shp, ws, st, pad, mode, ignore_border) ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border) a = aesara.shared(rand(*shp), "a") a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad) f = aesara.function([], a_pooled, mode=gpu_mode) f2 = aesara.function([], a_pooled, mode=ref_mode) assert any( [isinstance(node.op, GpuPool) for node in f.maker.fgraph.toposort()] ) assert any( [isinstance(node.op, Pool) for node in f2.maker.fgraph.toposort()] ) assert np.allclose(f(), f2()), (shp, ws, st, pad, mode, ignore_border) a_pooled_grad = grad(a_pooled.sum(), a) g = aesara.function([], a_pooled_grad, mode=gpu_mode) g2 = aesara.function([], a_pooled_grad, mode=ref_mode) if mode == "max": gop = GpuMaxPoolGrad gop2 = MaxPoolGrad else: gop = GpuAveragePoolGrad gop2 = AveragePoolGrad assert any( [isinstance(node.op, gop) for node in g.maker.fgraph.toposort()] ) assert any( [isinstance(node.op, gop2) for node in g2.maker.fgraph.toposort()] ) assert np.allclose(g(), g2()), (shp, ws, st, pad, mode, ignore_border) # test rop and grad grad for max pooling # for average pooling grad grad is just average pooling grad if mode != "max": continue ea = aesara.shared(rand(*shp), "ea") gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode) gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode) assert any( [ isinstance(node.op, GpuDownsampleFactorMaxGradGrad) for node in gr.maker.fgraph.toposort() ] ) assert any( [ isinstance(node.op, DownsampleFactorMaxGradGrad) for node in gr2.maker.fgraph.toposort() ] ) assert np.allclose(gr(), gr2()), (shp, ws, st, pad, mode, ignore_border) ggf = Lop(grad((a_pooled ** 2).sum(), a), a, a) gg = aesara.function([], ggf, mode=gpu_mode) gg2 = aesara.function([], ggf, mode=ref_mode) assert any( [ isinstance(node.op, GpuDownsampleFactorMaxGradGrad) for node in gg.maker.fgraph.toposort() ] ) assert any( [ isinstance(node.op, DownsampleFactorMaxGradGrad) for node in gg2.maker.fgraph.toposort() ] ) assert np.allclose(gg(), gg2()), (shp, ws, st, pad, mode, ignore_border)
def test_infer_shape(self): image = dtensor4() maxout = dtensor4() gz = dtensor4() rng = np.random.RandomState(utt.fetch_seed()) maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2)) image_val = rng.rand(4, 6, 7, 9) out_shapes = [ [ [[4, 6, 7, 9], [4, 6, 7, 9]], [[4, 6, 3, 4], [4, 6, 4, 5]], [[4, 6, 2, 3], [4, 6, 3, 3]], [[4, 6, 3, 3], [4, 6, 4, 3]], [[4, 6, 2, 4], [4, 6, 3, 5]], ], [ [None, None], [[4, 6, 4, 5], None], [[4, 6, 3, 3], None], [[4, 6, 4, 3], None], [[4, 6, 3, 5], None], ], [ [None, None], [None, None], [[4, 6, 3, 4], None], [[4, 6, 4, 4], None], [None, None], ], ] for i, maxpoolshp in enumerate(maxpoolshps): for j, ignore_border in enumerate([True, False]): for k, pad in enumerate([(0, 0), (1, 1), (1, 2)]): if out_shapes[k][i][j] is None: continue # checking shapes generated by Pool self._compile_and_check( [image], [ Pool(ignore_border=ignore_border)( image, maxpoolshp, pad=pad) ], [image_val], Pool, ) # checking shapes generated by MaxPoolGrad maxout_val = rng.rand(*out_shapes[k][i][j]) gz_val = rng.rand(*out_shapes[k][i][j]) self._compile_and_check( [image, maxout, gz], [ MaxPoolGrad(ignore_border=ignore_border)( image, maxout, gz, maxpoolshp, pad=pad) ], [image_val, maxout_val, gz_val], MaxPoolGrad, warn=False, ) # checking with broadcastable input image = tensor(dtype="float64", broadcastable=(False, False, True, True)) image_val = rng.rand(4, 6, 1, 1) self._compile_and_check( [image], [Pool(ignore_border=True)(image, (2, 2), pad=(0, 0))], [image_val], Pool, )
def test_out_shape(self): assert Pool.out_shape((9, 8, 6), (2, 2)) == [9, 4, 3] assert Pool.out_shape((8, 6), (2, 2)) == [4, 3]
def infer_shape(self, fgraph, node, in_shapes): ws, stride, pad = [node.inputs[1], node.inputs[2], node.inputs[3]] shp = Pool.out_shape(in_shapes[0], ws, self.ignore_border, stride, pad, self.ndim) return [shp]
def test_DownsampleFactorMaxStride(self): rng = np.random.RandomState(utt.fetch_seed()) # maxpool, stride, ignore_border, input, output sizes examples = ( ((1, 1), (1, 1), True, (4, 10, 16, 16), (4, 10, 16, 16)), ((1, 1), (5, 7), True, (4, 10, 16, 16), (4, 10, 4, 3)), ((1, 1), (1, 1), False, (4, 10, 16, 16), (4, 10, 16, 16)), ((1, 1), (5, 7), False, (4, 10, 16, 16), (4, 10, 4, 3)), ((3, 3), (1, 1), True, (4, 10, 16, 16), (4, 10, 14, 14)), ((3, 3), (3, 3), True, (4, 10, 16, 16), (4, 10, 5, 5)), ((3, 3), (5, 7), True, (4, 10, 16, 16), (4, 10, 3, 2)), ((3, 3), (1, 1), False, (4, 10, 16, 16), (4, 10, 14, 14)), ((3, 3), (3, 3), False, (4, 10, 16, 16), (4, 10, 6, 6)), ((3, 3), (5, 7), False, (4, 10, 16, 16), (4, 10, 4, 3)), ((5, 3), (1, 1), True, (4, 10, 16, 16), (4, 10, 12, 14)), ((5, 3), (3, 3), True, (4, 10, 16, 16), (4, 10, 4, 5)), ((5, 3), (5, 7), True, (4, 10, 16, 16), (4, 10, 3, 2)), ((5, 3), (1, 1), False, (4, 10, 16, 16), (4, 10, 12, 14)), ((5, 3), (3, 3), False, (4, 10, 16, 16), (4, 10, 5, 6)), ((5, 3), (5, 7), False, (4, 10, 16, 16), (4, 10, 4, 3)), ((16, 16), (1, 1), True, (4, 10, 16, 16), (4, 10, 1, 1)), ((16, 16), (5, 7), True, (4, 10, 16, 16), (4, 10, 1, 1)), ((16, 16), (1, 1), False, (4, 10, 16, 16), (4, 10, 1, 1)), ((16, 16), (5, 7), False, (4, 10, 16, 16), (4, 10, 1, 1)), ((3, ), (5, ), True, (16, ), (3, )), ( (3, ), (5, ), True, ( 2, 16, ), ( 2, 3, ), ), ( (5, ), (3, ), True, ( 2, 3, 16, ), ( 2, 3, 4, ), ), ((5, 1, 3), (3, 3, 3), True, (2, 16, 16, 16), (2, 4, 6, 5)), ((5, 1, 3), (3, 3, 3), True, (4, 2, 16, 16, 16), (4, 2, 4, 6, 5)), ) for example, mode in product( examples, ["max", "sum", "average_inc_pad", "average_exc_pad"]): (maxpoolshp, stride, ignore_border, inputshp, outputshp) = example # generate random images imval = rng.rand(*inputshp) images = aesara.shared(imval) # Pool op numpy_output_val = self.numpy_max_pool_nd_stride( imval, maxpoolshp, ignore_border, stride, mode) assert ( numpy_output_val.shape == outputshp ), f"outshape is {outputshp}, calculated shape is {numpy_output_val.shape}" maxpool_op = Pool(ndim=len(maxpoolshp), ignore_border=ignore_border, mode=mode)(images, maxpoolshp, stride) f = function([], maxpool_op) output_val = f() utt.assert_allclose(output_val, numpy_output_val)
def mp(input): return Pool( ndim=len(maxpoolshp), ignore_border=True, mode=mode, )(input, maxpoolshp, stridesize, padsize)
def mp(input): return Pool(ndim=len(maxpoolshp), ignore_border=ignore_border, mode=mode)(input, maxpoolshp, stridesize)
def mp(input, grad): out = Pool(ndim=len(maxpoolshp), ignore_border=ignore_border)(input, maxpoolshp) grad_op = MaxPoolGrad(ndim=len(maxpoolshp), ignore_border=ignore_border) return grad_op(input, out, grad, maxpoolshp)
def test_DownsampleFactorMax(self): rng = np.random.RandomState(utt.fetch_seed()) # maxpool, input size examples = ( ((2, ), (16, )), ( (2, ), ( 4, 16, ), ), ( (2, ), ( 4, 2, 16, ), ), ((1, 1), (4, 2, 16, 16)), ((2, 2), (4, 2, 16, 16)), ((3, 3), (4, 2, 16, 16)), ((3, 2), (4, 2, 16, 16)), ((3, 2, 2), (3, 2, 16, 16, 16)), ((2, 2, 3, 2), (3, 2, 6, 6, 6, 5)), ) for example, ignore_border, mode in product( examples, [True, False], ["max", "sum", "average_inc_pad", "average_exc_pad"], ): (maxpoolshp, inputsize) = example imval = rng.rand(*inputsize) images = aesara.shared(imval) # Pure Numpy computation numpy_output_val = self.numpy_max_pool_nd(imval, maxpoolshp, ignore_border, mode=mode) # The pool_2d or pool_3d helper methods if len(maxpoolshp) == 2: output = pool_2d(images, maxpoolshp, ignore_border, mode=mode) f = function( [], [ output, ], ) output_val = f() utt.assert_allclose(output_val, numpy_output_val) elif len(maxpoolshp) == 3: output = pool_3d(images, maxpoolshp, ignore_border, mode=mode) f = function( [], [ output, ], ) output_val = f() utt.assert_allclose(output_val, numpy_output_val) # Pool op maxpool_op = Pool(ndim=len(maxpoolshp), ignore_border=ignore_border, mode=mode)(images, maxpoolshp) output_shape = Pool.out_shape( imval.shape, maxpoolshp, ndim=len(maxpoolshp), ignore_border=ignore_border, ) utt.assert_allclose(np.asarray(output_shape), numpy_output_val.shape) f = function([], maxpool_op) output_val = f() utt.assert_allclose(output_val, numpy_output_val)