예제 #1
0
def test_conv_transpose_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, outsize, float_dtype):
    with pytest.raises(chainerx.DimensionError):
        chainerx.conv_transpose(
            *_create_conv_transpose_args(
                chainerx, device, x_shape, w_shape, b_shape, stride, pad,
                outsize, float_dtype))
예제 #2
0
def test_conv_transpose_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, outsize, float_dtype):
    with pytest.raises(chainerx.DimensionError):
        chainerx.conv_transpose(
            *_create_conv_transpose_args(
                chainerx, device, x_shape, w_shape, b_shape, stride, pad,
                outsize, float_dtype))
예제 #3
0
def test_conv_transpose_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, outsize, float_dtype):
    dtype = float_dtype
    x = array_utils.create_dummy_ndarray(chainerx, x_shape, dtype)
    w = array_utils.create_dummy_ndarray(chainerx, w_shape, dtype)
    if b_shape is None:
        b = None
    else:
        b = array_utils.create_dummy_ndarray(chainerx, b_shape, float_dtype)

    with pytest.raises(chainerx.DimensionError):
        chainerx.conv_transpose(x, w, b, stride, pad, outsize)
예제 #4
0
def test_conv_transpose_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, outsize, float_dtype):
    dtype = float_dtype
    x = array_utils.create_dummy_ndarray(chainerx, x_shape, dtype)
    w = array_utils.create_dummy_ndarray(chainerx, w_shape, dtype)
    if b_shape is None:
        b = None
    else:
        b = array_utils.create_dummy_ndarray(chainerx, b_shape, float_dtype)

    with pytest.raises(chainerx.DimensionError):
        chainerx.conv_transpose(x, w, b, stride, pad, outsize)
예제 #5
0
def test_conv_transpose(
        device, x_shape, w_shape, b_shape, stride, pad, cover_all,
        float_dtype):
    if device.backend.name == 'cuda' and len(x_shape) <= 3:
        # cuDNN does not support 1 dimensional convolution and throws
        # DimensionError.
        # TODO(sonots): Support 1 dimensional convolution with CUDA.
        return chainerx.testing.ignore()
    if device.backend.name == 'cuda' and cover_all is True:
        # outsize (for cover_all=True) is not supported by CUDA.
        return chainerx.testing.ignore()

    def create_args(xp):
        if cover_all is None:
            outsize = None
        else:
            outsize = _get_conv_transpose_outsize(
                x_shape, w_shape, stride, pad, cover_all)
        return _create_conv_transpose_args(
            xp, device, x_shape, w_shape, b_shape, stride, pad, outsize,
            float_dtype)

    chainerx.testing.assert_allclose_ex(
        chainerx.conv_transpose(
            *create_args(chainerx)),
        chainer.functions.deconvolution_nd(*create_args(numpy)).data,
        rtol=1e-3, float16_rtol=1e-2, float16_atol=1e-2, strides_check=False)
예제 #6
0
def test_conv_transpose(
        device, x_shape, w_shape, b_shape, stride, pad, cover_all,
        float_dtype):
    if device.backend.name == 'cuda' and len(x_shape) <= 3:
        # cuDNN does not support 1 dimensional convolution and throws
        # DimensionError.
        # TODO(sonots): Support 1 dimensional convolution with CUDA.
        return chainerx.testing.ignore()
    if device.backend.name == 'cuda' and cover_all is True:
        # outsize (for cover_all=True) is not supported by CUDA.
        return chainerx.testing.ignore()

    def create_args(xp):
        if cover_all is None:
            outsize = None
        else:
            outsize = _get_conv_transpose_outsize(
                x_shape, w_shape, stride, pad, cover_all)
        return _create_conv_transpose_args(
            xp, device, x_shape, w_shape, b_shape, stride, pad, outsize,
            float_dtype)

    chainerx.testing.assert_allclose(
        chainerx.conv_transpose(
            *create_args(chainerx)),
        chainer.functions.deconvolution_nd(*create_args(numpy)).data,
        rtol=1e-3)
예제 #7
0
 def forward_chainerx(self, inputs):
     if len(inputs) == 3:
         x, w, b = inputs
     else:
         (x, w), b = inputs, None
     y = chainerx.conv_transpose(
         x, w, b, self.stride, self.pad, self.outsize)
     return y,
예제 #8
0
 def forward_chainerx(self, inputs):
     if len(inputs) == 3:
         x, w, b = inputs
     else:
         (x, w), b = inputs, None
     y = chainerx.conv_transpose(
         x, w, b, self.stride, self.pad, self.outsize)
     return y,
예제 #9
0
    def forward_chainerx(self, inputs):
        # TODO(imanishi): Support it
        if any(d != 1 for d in self.dilate):
            return chainer.Fallback
        # TODO(imanishi): Support it
        if self.groups != 1:
            return chainer.Fallback
        # TODO(imanishi): Support it
        if any(a.dtype != inputs[0].dtype for a in inputs):
            return chainer.Fallback
        # TODO(imanishi): Supporft it
        if inputs[0].device.backend.name == 'cuda' and self.ndim < 2:
            return chainer.Fallback

        stride = self.stride
        pad = self.pad

        return chainerx.conv_transpose(*inputs, stride=stride, pad=pad),
예제 #10
0
    def forward_chainerx(self, inputs):
        # TODO(imanishi): Support it
        if any(d != 1 for d in self.dilate):
            return chainer.Fallback
        # TODO(imanishi): Support it
        if self.groups != 1:
            return chainer.Fallback
        # TODO(imanishi): Support it
        if any(a.dtype != inputs[0].dtype for a in inputs):
            return chainer.Fallback
        # TODO(imanishi): Supporft it
        if inputs[0].device.backend.name == 'cuda' and self.ndim < 2:
            return chainer.Fallback

        stride = self.stride
        pad = self.pad

        return chainerx.conv_transpose(*inputs, stride=stride, pad=pad),
예제 #11
0
    def forward_chainerx(self, inputs):
        # TODO(imanishi): Support it
        if self.dy != 1 or self.dx != 1:
            return chainer.Fallback
        # TODO(imanishi): Support it
        if self.groups != 1:
            return chainer.Fallback
        # TODO(imanishi): Support it
        if any(a.dtype != inputs[0].dtype for a in inputs):
            return chainer.Fallback
        # TODO(imanishi): Support it
        self._calc_out_size(inputs[0], inputs[1])
        self._set_cover_all(inputs[0], inputs[1])
        if self.cover_all:
            return chainer.Fallback

        stride = (self.sy, self.sx)
        pad = (self.ph, self.pw)
        outsize = None if self.outh is None else (self.outh, self.outw)

        return chainerx.conv_transpose(
            *inputs, stride=stride, pad=pad, outsize=outsize),