def check_forward(self, x_data, use_cudnn='always'): dims = self.dims ksize = self.ksize stride = self.stride pad = self.pad x = chainer.Variable(x_data) with chainer.using_config('use_cudnn', use_cudnn): y = functions.average_pooling_nd(x, ksize, stride, pad, self.pad_value) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) def denom(idx): if self.pad_value is None: s = 1 for slic in idx: s *= slic.stop - slic.start return s else: return functools.reduce(operator.mul, ksize) self.assertEqual(self.gy.shape, y_data.shape) patches = pooling_nd_helper.pooling_patches(dims, ksize, stride, pad, False) for k in six.moves.range(2): for c in six.moves.range(3): x = self.x[k, c] expect = numpy.array( [x[idx].sum() / denom(idx) for idx in patches]) expect = expect.reshape(y_data.shape[2:]) testing.assert_allclose(expect, y_data[k, c], **self.check_forward_options)
def check_forward(self, x_data, use_cudnn='always'): dims = self.dims ksize = self.ksize stride = self.stride pad = self.pad x = chainer.Variable(x_data) with chainer.using_config('use_cudnn', use_cudnn): y = functions.average_pooling_nd( x, ksize, stride, pad, self.pad_value) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) def denom(idx): if self.pad_value is None: s = 1 for slic in idx: s *= slic.stop - slic.start return s else: return functools.reduce(operator.mul, ksize) self.assertEqual(self.gy.shape, y_data.shape) patches = pooling_nd_helper.pooling_patches( dims, ksize, stride, pad, False) for k in six.moves.range(2): for c in six.moves.range(3): x = self.x[k, c] expect = numpy.array( [x[idx].sum() / denom(idx) for idx in patches]) expect = expect.reshape(y_data.shape[2:]) testing.assert_allclose( expect, y_data[k, c], **self.check_forward_options)
def forward_expected(self, inputs): x, = inputs patches = pooling_nd_helper.pooling_patches( self.dims, self.ksize, self.stride, self.pad, False) def denom(idx): if self.pad_value is None: s = 1 for slic in idx: s *= slic.stop - slic.start return s else: return functools.reduce(operator.mul, self.ksize) y = [] for k in six.moves.range(2): tmp = [] for c in six.moves.range(3): x_ = x[k, c] expect = numpy.array( [x_[idx].sum() / denom(idx) for idx in patches]) expect = expect.reshape(self.output_shape[2:]) tmp.append(expect) y.append(tmp) return numpy.asarray(y, dtype=self.dtype),
def forward_expected(self, inputs): x, = inputs patches = pooling_nd_helper.pooling_patches(self.dims, self.ksize, self.stride, self.pad, False) def denom(idx): if self.pad_value is None: s = 1 for slic in idx: s *= slic.stop - slic.start return s else: return functools.reduce(operator.mul, self.ksize) y = [] for k in six.moves.range(2): tmp = [] for c in six.moves.range(3): x_ = x[k, c] expect = numpy.array( [x_[idx].sum() / denom(idx) for idx in patches]) expect = expect.reshape(self.output_shape[2:]) tmp.append(expect) y.append(tmp) return numpy.asarray(y, dtype=self.dtype),
def forward_expected(self, inputs): in_dims = self.in_dims ksize = self.ksize stride = self.stride pad = self.pad cover_all = self.cover_all patches = pooling_nd_helper.pooling_patches(in_dims, ksize, stride, pad, cover_all) x, = inputs out_dims = self._get_out_dims(x.shape[2:]) y_shape = x.shape[:2] + out_dims x = x.astype(numpy.float64) y = numpy.empty(y_shape, numpy.float64) for i in six.moves.range(2): for c in six.moves.range(3): d = numpy.array([x[i, c][idx].max() for idx in patches]) y[i, c, ...] = d.reshape(out_dims) return y.astype(self.dtype),
def forward_expected(self, inputs): in_dims = self.in_dims ksize = self.ksize stride = self.stride pad = self.pad cover_all = self.cover_all patches = pooling_nd_helper.pooling_patches( in_dims, ksize, stride, pad, cover_all) x, = inputs out_dims = self._get_out_dims(x.shape[2:]) y_shape = x.shape[:2] + out_dims x = x.astype(numpy.float64) y = numpy.empty(y_shape, numpy.float64) for i in six.moves.range(2): for c in six.moves.range(3): d = numpy.array([x[i, c][idx].max() for idx in patches]) y[i, c, ...] = d.reshape(out_dims) return y.astype(self.dtype),
def check_forward(self, x_data, use_cudnn='always'): dims = self.dims ksize = self.ksize stride = self.stride pad = self.pad x = chainer.Variable(x_data) with chainer.using_config('use_cudnn', use_cudnn): y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad, cover_all=self.cover_all) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) self.assertEqual(self.gy.shape, y_data.shape) patches = pooling_nd_helper.pooling_patches( dims, ksize, stride, pad, self.cover_all) for k in six.moves.range(2): for c in six.moves.range(3): x = self.x[k, c] expect = numpy.array([x[idx].max() for idx in patches]) expect = expect.reshape(y_data.shape[2:]) testing.assert_allclose(expect, y_data[k, c])