Exemplo n.º 1
0
    def setUp(self):
        ndim = self.ndim
        dtype = self.dtype
        batches = 2
        in_channels_a_group = 3
        out_channels_a_group = 2
        in_channels = in_channels_a_group * self.groups
        out_channels = out_channels_a_group * self.groups
        ksize = 3
        stride = 2
        pad = ksize // stride * self.dilate
        self.strides = (stride,) * ndim
        self.pads = (pad,) * ndim
        self.dilations = (self.dilate,) * ndim
        self.x = cupy.zeros(
            (batches, in_channels) + (ksize,) * ndim, dtype)
        self.gy = cupy.zeros((batches, out_channels) + (2,) * ndim, dtype)

        self.gW = cupy.ones(
            (out_channels, in_channels_a_group) + (ksize,) * ndim, dtype)

        version = libcudnn.getVersion()
        deterministic = self.deterministic
        self.err = None
        if ((self.dilate > 1 and version < 6000) or
                (self.groups > 1 and version < 7000)):
            self.err = ValueError
        elif ((self.dilate > 1 and deterministic and version < 7000) or
                (ndim > 2 and deterministic and version < 6000) or
                (ndim > 2 and deterministic and self.dtype == numpy.float64)):
            self.err = libcudnn.CuDNNError
        self._workspace_size = cudnn.get_max_workspace_size()
        cudnn.set_max_workspace_size(self.max_workspace_size)
Exemplo n.º 2
0
 def test_cudnn(self):
     preload_version = self._get_config()['cudnn']['version']
     major, minor, patchlevel = (int(x) for x in preload_version.split('.'))
     expected_version = major * 1000 + minor * 100 + patchlevel
     assert libcudnn.available
     assert libcudnn.get_build_version() == expected_version
     assert libcudnn.getVersion() == expected_version
Exemplo n.º 3
0
 def test_activation_backward_version(self):
     if libcudnn.getVersion() >= 4000:
         patch = 'cupy.cuda.cudnn.activationBackward_v4'
     else:
         patch = 'cupy.cuda.cudnn.activationBackward_v3'
     with mock.patch(patch) as func:
         cupy.cudnn.activation_backward(self.x, self.y, self.g, self.mode)
         self.assertEqual(func.called, True)
Exemplo n.º 4
0
 def test_activation_backward_version(self):
     if libcudnn.getVersion() >= 4000:
         patch = 'cupy.cuda.cudnn.activationBackward_v4'
     else:
         patch = 'cupy.cuda.cudnn.activationBackward_v3'
     with mock.patch(patch) as func:
         cupy.cudnn.activation_backward(self.x, self.y, self.g, self.mode)
         self.assertEqual(func.called, True)
Exemplo n.º 5
0
    def test_cudnn(self):
        config = cupy._environment.get_preload_config()
        assert config is not None

        preload_version = config['cudnn']['version']
        major, minor, patchlevel = (int(x) for x in preload_version.split('.'))
        expected_version = major * 1000 + minor * 100 + patchlevel
        assert libcudnn.available
        assert libcudnn.get_build_version() == expected_version
        assert libcudnn.getVersion() == expected_version
Exemplo n.º 6
0
    def setUp(self):
        ndim = self.ndim
        dtype = self.dtype
        batches = 2
        in_channels_a_group = 3
        out_channels_a_group = 2
        in_channels = in_channels_a_group * self.groups
        out_channels = out_channels_a_group * self.groups
        # TODO(anaruse): increase test cases.
        ksize = 3
        stride = 2
        pad = ksize // stride * self.dilate
        self.strides = (stride, ) * ndim
        self.pads = (pad, ) * ndim
        self.dilations = (self.dilate, ) * ndim
        self.W = cupy.zeros(
            (out_channels, in_channels_a_group) + (ksize, ) * ndim, dtype)
        self.gy = cupy.zeros((batches, out_channels) + (2, ) * ndim, dtype)
        self.b = None
        if self.bias:
            self.b = cupy.zeros((in_channels, ), dtype)

        self.gx = cupy.ones((batches, in_channels) + (ksize, ) * ndim, dtype)

        version = libcudnn.getVersion()
        deterministic = self.deterministic
        self.err = None
        if ((self.dilate > 1 and version < 6000)
                or (self.groups > 1 and version < 7000)):
            self.err = ValueError
        elif (sys.platform.startswith('win32') and version == 7605
              and deterministic and self.dtype == numpy.float16
              and self.ndim == 3 and self.dilate == 2 and self.groups == 2):
            # see https://github.com/cupy/cupy/pull/4893
            self.err = RuntimeError
        elif deterministic and (
            (self.dilate > 1 and
             (ndim != 2 and version < 8100 or version < 7300)) or
            (ndim > 2 and version < 6000) or
            (ndim > 2 and self.dtype == numpy.float64 and version < 8100)):
            self.err = libcudnn.CuDNNError
        elif (8000 <= version < 8100
              and int(cupy.cuda.device.get_compute_capability()) < 70
              and self.dilate > 1 and self.groups > 1 and ndim > 2
              and self.dtype == numpy.float16):
            self.err = RuntimeError
        _workspace_size = cudnn.get_max_workspace_size()
        cudnn.set_max_workspace_size(self.max_workspace_size)
        yield
        cudnn.set_max_workspace_size(_workspace_size)
Exemplo n.º 7
0
    def __init__(self):
        self.cupy_version = cupy.__version__

        self.cuda_path = cupy.cuda.get_cuda_path()
        self.cuda_build_version = cupy.cuda.driver.get_build_version()
        self.cuda_driver_version = cupy.cuda.runtime.driverGetVersion()
        self.cuda_runtime_version = cupy.cuda.runtime.runtimeGetVersion()

        if cudnn is not None:
            self.cudnn_build_version = cudnn.get_build_version()
            self.cudnn_version = cudnn.getVersion()

        if nccl is not None:
            self.nccl_build_version = nccl.get_version()
Exemplo n.º 8
0
    def setUp(self):
        ndim = self.ndim
        dtype = self.dtype
        batches = 2
        if self.layout == libcudnn.CUDNN_TENSOR_NHWC:
            # channel size must be multiple of 4
            in_channels_a_group = 4
            out_channels_a_group = 4
        else:
            in_channels_a_group = 3
            out_channels_a_group = 2
        in_channels = in_channels_a_group * self.groups
        out_channels = out_channels_a_group * self.groups
        # TODO(anaruse): increase test cases.
        ksize = 3
        stride = 2
        pad = ksize // stride * self.dilate
        self.strides = (stride, ) * ndim
        self.pads = (pad, ) * ndim
        self.dilations = (self.dilate, ) * ndim
        if self.layout == libcudnn.CUDNN_TENSOR_NHWC:
            self.x = cupy.zeros(
                (batches, ) + (ksize, ) * ndim + (in_channels, ), dtype)
            self.W = cupy.zeros(
                (out_channels, ) + (ksize, ) * ndim + (in_channels_a_group, ),
                dtype)
            self.y = cupy.ones((batches, ) + (2, ) * ndim + (out_channels, ),
                               dtype)
        else:
            self.x = cupy.zeros((batches, in_channels) + (ksize, ) * ndim,
                                dtype)
            self.W = cupy.zeros(
                (out_channels, in_channels_a_group) + (ksize, ) * ndim, dtype)
            self.y = cupy.ones((batches, out_channels) + (2, ) * ndim, dtype)
        self.b = None
        if self.bias:
            self.b = cupy.zeros((out_channels, ), dtype)

        version = libcudnn.getVersion()
        self.err = None
        if ((self.dilate > 1 and version < 6000)
                or (self.groups > 1 and version < 7000)):
            self.err = ValueError
        elif ndim > 2 and self.dilate > 1:
            self.err = libcudnn.CuDNNError
        _workspace_size = cudnn.get_max_workspace_size()
        cudnn.set_max_workspace_size(self.max_workspace_size)
        yield
        cudnn.set_max_workspace_size(_workspace_size)
Exemplo n.º 9
0
 def test_call(self):
     if self.layout == libcudnn.CUDNN_TENSOR_NHWC:
         version = libcudnn.getVersion()
         if self.groups > 1:
             return unittest.SkipTest()
         if self.dilate > 1 and version < 7300:
             return unittest.SkipTest()
         if self.dtype is numpy.float64 and version < 7100:
             return unittest.SkipTest()
     if self.err is None:
         self.call()
         self.assertTrue((self.y == 0).all())
     else:
         with self.assertRaises(self.err):
             self.call()
Exemplo n.º 10
0
 def test_call(self):
     if self.layout == libcudnn.CUDNN_TENSOR_NHWC:
         version = libcudnn.getVersion()
         if self.groups > 1:
             pytest.skip()
         if self.dilate > 1 and version < 7300:
             pytest.skip()
         if self.dtype is numpy.float64 and version < 7100:
             pytest.skip()
     if self.err is None:
         self.call()
         assert (self.y == 0).all()
     else:
         with pytest.raises(self.err):
             self.call()
Exemplo n.º 11
0
    def setUp(self):
        ndim = self.ndim
        dtype = self.dtype
        batches = 2
        in_channels_a_group = 3
        out_channels_a_group = 2
        in_channels = in_channels_a_group * self.groups
        out_channels = out_channels_a_group * self.groups
        # TODO(anaruse): increase test cases.
        ksize = 3
        stride = 2
        pad = ksize // stride * self.dilate
        self.strides = (stride,) * ndim
        self.pads = (pad,) * ndim
        self.dilations = (self.dilate,) * ndim
        self.W = cupy.zeros(
            (out_channels, in_channels_a_group) + (ksize,) * ndim, dtype)
        self.gy = cupy.zeros((batches, out_channels) + (2,) * ndim, dtype)
        self.b = None
        if self.bias:
            self.b = cupy.zeros((in_channels,), dtype)

        self.gx = cupy.ones(
            (batches, in_channels) + (ksize,) * ndim, dtype)

        version = libcudnn.getVersion()
        deterministic = self.deterministic
        self.err = None
        if ((self.dilate > 1 and version < 6000) or
                (self.groups > 1 and version < 7000)):
            self.err = ValueError
        elif deterministic and (
                (self.dilate > 1 and (ndim != 2 or version < 7300)) or
                (ndim > 2 and version < 6000) or
                (ndim > 2 and self.dtype == numpy.float64)):
            self.err = libcudnn.CuDNNError
        elif (8000 <= version and
              int(cupy.cuda.device.get_compute_capability()) < 70 and
              self.dilate > 1 and self.groups > 1 and ndim > 2 and
              self.dtype == numpy.float16):
            self.err = RuntimeError
        self._workspace_size = cudnn.get_max_workspace_size()
        cudnn.set_max_workspace_size(self.max_workspace_size)
Exemplo n.º 12
0
        libcudnn.CUDNN_ACTIVATION_SIGMOID,
        libcudnn.CUDNN_ACTIVATION_RELU,
        libcudnn.CUDNN_ACTIVATION_TANH,
    ]
    import cupy.cudnn
except ImportError:
    cudnn_enabled = False
    modes = []
from cupy import testing


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float64],
    'mode': modes,
}))
@unittest.skipUnless(cudnn_enabled and libcudnn.getVersion() >= 3000,
                     'cuDNN >= 3.0 is supported')
class TestCudnnActivation(unittest.TestCase):
    def setUp(self):
        self.x = testing.shaped_arange((3, 4), cupy, self.dtype)
        self.y = testing.shaped_arange((3, 4), cupy, self.dtype)
        self.g = testing.shaped_arange((3, 4), cupy, self.dtype)

    def test_activation_forward_version(self):
        if libcudnn.getVersion() >= 4000:
            patch = 'cupy.cuda.cudnn.activationForward_v4'
        else:
            patch = 'cupy.cuda.cudnn.activationForward_v3'
        with mock.patch(patch) as func:
            cupy.cudnn.activation_forward(self.x, self.mode)
            self.assertEqual(func.called, True)
Exemplo n.º 13
0
import atexit

import functools
import numpy
import operator
import six

import cupy
from cupy import cuda
from cupy.cuda import cudnn

_cudnn_version = cudnn.getVersion()

_handles = {}


def get_handle():
    global _handles
    device = cuda.Device()
    handle = _handles.get(device.id, None)
    if handle is None:
        handle = cudnn.create()
        _handles[device.id] = handle
    return handle


@atexit.register
def reset_handles():
    global _handles
    handles = _handles
    _handles = {}
Exemplo n.º 14
0
try:
    import cupy.cuda.cudnn as libcudnn
    cudnn_enabled = True
    modes = [
        libcudnn.CUDNN_ACTIVATION_SIGMOID,
        libcudnn.CUDNN_ACTIVATION_RELU,
        libcudnn.CUDNN_ACTIVATION_TANH,
    ]
    coef_modes = [
        libcudnn.CUDNN_ACTIVATION_CLIPPED_RELU,
    ]
    layouts = [
        libcudnn.CUDNN_TENSOR_NCHW,
        libcudnn.CUDNN_TENSOR_NHWC,
    ]
    if libcudnn.getVersion() >= 6000:
        coef_modes.append(libcudnn.CUDNN_ACTIVATION_ELU)

    from cupy import cudnn
except ImportError:
    cudnn_enabled = False
    modes = []
    coef_modes = []
    layouts = []

from cupy import testing


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float64],
    'mode': modes,
Exemplo n.º 15
0
try:
    import cupy.cuda.cudnn as libcudnn
    cudnn_enabled = True
    modes = [
        libcudnn.CUDNN_ACTIVATION_SIGMOID,
        libcudnn.CUDNN_ACTIVATION_RELU,
        libcudnn.CUDNN_ACTIVATION_TANH,
    ]
    coef_modes = [
        libcudnn.CUDNN_ACTIVATION_CLIPPED_RELU,
    ]
    layouts = [
        libcudnn.CUDNN_TENSOR_NCHW,
        libcudnn.CUDNN_TENSOR_NHWC,
    ]
    cudnn_version = libcudnn.getVersion()
    if cudnn_version >= 6000:
        coef_modes.append(libcudnn.CUDNN_ACTIVATION_ELU)

    from cupy import cudnn
except ImportError:
    cudnn_enabled = False
    cudnn_version = -1
    modes = []
    coef_modes = []
    layouts = []

from cupy import testing


@testing.parameterize(*testing.product({
Exemplo n.º 16
0
        self.g = testing.shaped_arange((3, 4), cupy, self.dtype)

    def test_activation_forward(self):
        cupy.cudnn.activation_forward(self.x, self.mode)

    def test_activation_backward(self):
        cupy.cudnn.activation_backward(self.x, self.y, self.g, self.mode)


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float64],
    'ratio': [0.0, 0.1, 0.2, 0.5],
    'seed': [0, 100]
}))
@unittest.skipUnless(
    cudnn_enabled and libcudnn.getVersion() >= 5000,
    'cuDNN >= 5.0 is supported')
class TestCudnnDropout(unittest.TestCase):

    def setUp(self):
        self.x = testing.shaped_arange((3, 4), cupy, self.dtype)
        self.gy = testing.shaped_arange((3, 4), cupy, self.dtype)
        self.states = cupy.cudnn.DropoutStates(
            cupy.cudnn.get_handle(), self.seed)

    def test_dropout_forward(self):
        _, y = self.states.forward(
            cupy.cudnn.get_handle(), self.x, self.ratio)
        if self.ratio == 0:
            self.assertTrue(cupy.all(self.x == y))
        else:
Exemplo n.º 17
0
import numpy

import cupy
try:
    import cupy.cuda.cudnn as libcudnn
    cudnn_enabled = True
    modes = [
        libcudnn.CUDNN_ACTIVATION_SIGMOID,
        libcudnn.CUDNN_ACTIVATION_RELU,
        libcudnn.CUDNN_ACTIVATION_TANH,
    ]
    coef_modes = [
        libcudnn.CUDNN_ACTIVATION_CLIPPED_RELU,
    ]
    if libcudnn.getVersion() >= 6000:
        coef_modes.append(libcudnn.CUDNN_ACTIVATION_ELU)

    import cupy.cudnn
except ImportError:
    cudnn_enabled = False
    modes = []
    coef_modes = []
from cupy import testing


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float64],
    'mode': modes,
}))
@unittest.skipUnless(cudnn_enabled, 'cuDNN is not available')
Exemplo n.º 18
0
        libcudnn.CUDNN_ACTIVATION_RELU,
        libcudnn.CUDNN_ACTIVATION_TANH,
    ]
    import cupy.cudnn
except ImportError:
    cudnn_enabled = False
    modes = []
from cupy import testing


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float64],
    'mode': modes,
}))
@unittest.skipUnless(
    cudnn_enabled and libcudnn.getVersion() >= 3000,
    'cuDNN >= 3.0 is supported')
class TestCudnnActivation(unittest.TestCase):

    def setUp(self):
        self.x = testing.shaped_arange((3, 4), cupy, self.dtype)
        self.y = testing.shaped_arange((3, 4), cupy, self.dtype)
        self.g = testing.shaped_arange((3, 4), cupy, self.dtype)

    def test_activation_forward_version(self):
        if libcudnn.getVersion() >= 4000:
            patch = 'cupy.cuda.cudnn.activationForward_v4'
        else:
            patch = 'cupy.cuda.cudnn.activationForward_v3'
        with mock.patch(patch) as func:
            cupy.cudnn.activation_forward(self.x, self.mode)
Exemplo n.º 19
0
import atexit

import functools
import numpy
import operator
import six

import cupy
from cupy import cuda
from cupy.cuda import cudnn


_cudnn_version = cudnn.getVersion()

_handles = {}


def get_handle():
    dev = cuda.get_device_id()
    if dev in _handles:
        return _handles[dev]
    handle = cudnn.create()
    _handles[dev] = handle
    return handle


@atexit.register
def reset_handles():
    global _handles
    handles = _handles
    _handles = {}
Exemplo n.º 20
0
try:
    import cupy.cuda.cudnn as libcudnn
    cudnn_enabled = True
    modes = [
        libcudnn.CUDNN_ACTIVATION_SIGMOID,
        libcudnn.CUDNN_ACTIVATION_RELU,
        libcudnn.CUDNN_ACTIVATION_TANH,
    ]
    coef_modes = [
        libcudnn.CUDNN_ACTIVATION_CLIPPED_RELU,
    ]
    layouts = [
        libcudnn.CUDNN_TENSOR_NCHW,
        libcudnn.CUDNN_TENSOR_NHWC,
    ]
    if libcudnn.getVersion() >= 6000:
        coef_modes.append(libcudnn.CUDNN_ACTIVATION_ELU)

    from cupy import cudnn
except ImportError:
    cudnn_enabled = False
    modes = []
    coef_modes = []
    layouts = []

from cupy import testing


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float64],
    'mode': modes,