Esempio n. 1
0
def test_conv_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, cover_all,
        float_dtype):
    with pytest.raises(chainerx.DimensionError):
        chainerx.conv(
            *_create_conv_args(
                chainerx, device, x_shape, w_shape, b_shape, stride, pad,
                cover_all, float_dtype))
Esempio n. 2
0
def test_conv_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, cover_all,
        float_dtype):
    with pytest.raises(chainerx.DimensionError):
        chainerx.conv(
            *_create_conv_args(
                chainerx, device, x_shape, w_shape, b_shape, stride, pad,
                cover_all, float_dtype))
Esempio n. 3
0
 def __call__(self, x):
     if self.b is not None:
         return chx.conv(x,
                         self.W,
                         self.b,
                         stride=self.stride,
                         pad=self.pad)
     else:
         return chx.conv(x, self.W, stride=self.stride, pad=self.pad)
Esempio n. 4
0
 def forward_chainerx(self, inputs):
     if len(inputs) == 2:
         (x, w), b = inputs, None
     else:
         x, w, b = inputs
     y = chainerx.conv(x, w, b, self.stride, self.pad, self.cover_all)
     return y,
Esempio n. 5
0
 def forward_chainerx(self, inputs):
     if len(inputs) == 2:
         (x, w), b = inputs, None
     else:
         x, w, b = inputs
     y = chainerx.conv(x, w, b, self.stride, self.pad, self.cover_all)
     return y,
Esempio n. 6
0
def test_conv(device, x_shape, w_shape, b_shape, stride, pad, cover_all,
              float_dtype):
    if device.backend.name == 'cuda' and len(x_shape) <= 3:
        # cuDNN does not support 1 dimensional convolution and throws
        # DimensionError.
        # TODO(hvy): Support 1 dimensional convolution with CUDA.
        return chainerx.testing.ignore()

    def create_args(xp):
        return _create_conv_args(xp, device, x_shape, w_shape, b_shape, stride,
                                 pad, cover_all, float_dtype)

    chainerx.testing.assert_allclose(
        chainerx.conv(*create_args(chainerx)),
        chainer.functions.convolution_nd(*create_args(numpy)).data)
Esempio n. 7
0
    def forward_chainerx(self, inputs):
        # TODO(hvy): Support mixed precision.
        if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
            return chainer.Fallback
        # TODO(hvy): Support dilate > 1.
        if self.dy > 1 or self.dx > 1:
            return chainer.Fallback
        # TODO(hvy): Support groups > 1.
        if self.groups > 1:
            return chainer.Fallback
        if inputs[0].device.backend.name == 'cuda' and self.cover_all:
            return chainer.Fallback

        return chainerx.conv(
            *inputs, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
            cover_all=self.cover_all),
Esempio n. 8
0
    def forward_chainerx(self, inputs):
        # TODO(hvy): Support mixed precision.
        if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
            return chainer.Fallback
        # TODO(hvy): Support dilate > 1.
        if self.dy > 1 or self.dx > 1:
            return chainer.Fallback
        # TODO(hvy): Support groups > 1.
        if self.groups > 1:
            return chainer.Fallback
        if inputs[0].device.backend.name == 'cuda' and self.cover_all:
            return chainer.Fallback

        return chainerx.conv(
            *inputs, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
            cover_all=self.cover_all),
Esempio n. 9
0
def test_conv(
        device, x_shape, w_shape, b_shape, stride, pad, cover_all,
        float_dtype):
    if device.backend.name == 'cuda' and len(x_shape) <= 3:
        # cuDNN does not support 1 dimensional convolution and throws
        # DimensionError.
        # TODO(hvy): Support 1 dimensional convolution with CUDA.
        return chainerx.testing.ignore()

    def create_args(xp):
        return _create_conv_args(
            xp, device, x_shape, w_shape, b_shape, stride, pad, cover_all,
            float_dtype)
    chainerx.testing.assert_allclose(
        chainerx.conv(*create_args(chainerx)),
        chainer.functions.convolution_nd(*create_args(numpy)).data)
Esempio n. 10
0
    def forward_chainerx(self, inputs):
        # TODO(hvy): Support mixed precision.
        if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
            return chainer.Fallback
        # TODO(hvy): Support dilate > 1.
        if any(d != 1 for d in self.dilate):
            return chainer.Fallback
        # TODO(hvy): Support groups > 1.
        if self.groups > 1:
            return chainer.Fallback
        if inputs[0].device.backend.name == 'cuda' and (
                self.cover_all or self.ndim < 2):
            return chainer.Fallback

        return chainerx.conv(
            *inputs, stride=self.stride, pad=self.pad,
            cover_all=self.cover_all),
Esempio n. 11
0
    def forward_chainerx(self, inputs):
        # TODO(hvy): Support mixed precision.
        if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
            return chainer.Fallback
        # TODO(hvy): Support dilate > 1.
        if any(d != 1 for d in self.dilate):
            return chainer.Fallback
        # TODO(hvy): Support groups > 1.
        if self.groups > 1:
            return chainer.Fallback
        if inputs[0].device.backend.name == 'cuda' and (
                self.cover_all or self.ndim < 2):
            return chainer.Fallback

        return chainerx.conv(
            *inputs, stride=self.stride, pad=self.pad,
            cover_all=self.cover_all),
Esempio n. 12
0
 def __call__(self, x):
     if self.b is not None:
         return chx.conv(
             x, self.W, self.b, stride=self.stride, pad=self.pad)
     else:
         return chx.conv(x, self.W, stride=self.stride, pad=self.pad)