def __init__(self): with Device(0): self.send1 = cp.ones((4, ), dtype=cp.float32) with Device(1): self.send2 = cp.ones((4, ), dtype=cp.float32) * 2 self.recv = cp.zeros((4, ), dtype=cp.float32)
def test_linspace_errors(): assert_raises(ValueError, lambda: linspace(0, 1, 10, device="cpu")) # numpy's pick assert_raises(ValueError, lambda: linspace(0, 1, 10, device="gpu")) assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype=float)) assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype="f")) linspace(0, 1, 10, device=Device()) # on current device
def set_device(device_id): """ Set the device (CPU or GPU) to be used. if device_id >= 0 the corresponding GPU is used, otherwise CPU is used. """ if device_id < 0: # Use CPU return try: from cupy.cuda import Device from cupy.cuda.runtime import CUDARuntimeError except ImportError: print("Failed to import CuPy. Use CPU instead.") return try: Device(device_id).use() except CUDARuntimeError as e: print(e) return print("Device {} is in use".format(device_id)) global GPU_ENABLED GPU_ENABLED = True # Reload the module to reflect the GPU status import pcanet importlib.reload(pcanet)
def test_empty_like_errors(): assert_raises(ValueError, lambda: empty_like(asarray(1), device="cpu")) # numpy's pick assert_raises(ValueError, lambda: empty_like(asarray(1), device="gpu")) assert_raises(ValueError, lambda: empty_like(asarray(1), dtype=int)) assert_raises(ValueError, lambda: empty_like(asarray(1), dtype="i")) empty_like(asarray(1), device=Device()) # on current device
def test_zeros_errors(): assert_raises(ValueError, lambda: zeros( (1, ), device="cpu")) # numpy's pick assert_raises(ValueError, lambda: zeros((1, ), device="gpu")) assert_raises(ValueError, lambda: zeros((1, ), dtype=int)) assert_raises(ValueError, lambda: zeros((1, ), dtype="i")) zeros((1, ), device=Device()) # on current device
def from_device_id(device_id): check_cuda_available() if not (isinstance(device_id, _integer_types) and device_id >= 0): raise ValueError('Invalid CUDA device ID: {}'.format(device_id)) return GpuDevice(Device(device_id))
def test_full_errors(): assert_raises(ValueError, lambda: full( (1, ), 0, device="cpu")) # numpy's pick assert_raises(ValueError, lambda: full((1, ), 0, device="gpu")) assert_raises(ValueError, lambda: full((1, ), 0, dtype=int)) assert_raises(ValueError, lambda: full((1, ), 0, dtype="i")) full((1, ), 0, device=Device()) # on current device
def compute(self): if self.rank == 0: with Device(0): collective.send_multigpu(self.send1 * 2, 1, 1, "8") else: # with Device(1): collective.recv_multigpu(self.recv2, 0, 0, "8") return self.recv2
def from_device_id(device_id): """Returns a :class:`~chainer.backend.GpuDevice` corresponding \ to the CUDA device ID. """ check_cuda_available() if not (isinstance(device_id, _integer_types) and device_id >= 0): raise ValueError('Invalid CUDA device ID: {}'.format(device_id)) return GpuDevice(Device(device_id))
def get_device_from_id(device_id): """Gets the device from an ID integer. Args: device_id (int or None): The ID of the device which this function returns. """ if device_id is not None: check_cuda_available() return Device(device_id) else: return DummyDevice
def get_device_from_id(device_id: tp.Optional[int]) -> Device: """Gets the device from an ID integer. Args: device_id (int or None): The ID of the device which this function returns. """ if device_id is not None: if device_id >= 0: check_cuda_available() return Device(int(device_id)) return DummyDevice
def _get_device(*args): for arg in args: if type(arg) is not bool and isinstance(arg, _integer_types): check_cuda_available() return Device(arg) if isinstance(arg, ndarray): if arg.device is None: continue return arg.device if available and isinstance(arg, Device): return arg return DummyDevice
def _get_cuda_device(*args): # Returns cuda.Device or DummyDevice. for arg in args: if type(arg) is not bool and isinstance(arg, _integer_types): check_cuda_available() return Device(arg) if isinstance(arg, ndarray): if arg.device is None: continue return arg.device if available and isinstance(arg, Device): return arg # NOTE: This function returns DummyDevice for both NumPy and ChainerX return DummyDevice
def test_asarray_errors(): # Test various protections against incorrect usage assert_raises(TypeError, lambda: Array([1])) # assert_raises(TypeError, lambda: asarray(["a"])) # TODO(leofang): fix this? assert_raises(ValueError, lambda: asarray([1.0], dtype=cp.float16)) assert_raises(OverflowError, lambda: asarray(2**100)) # Preferably this would be OverflowError # assert_raises(OverflowError, lambda: asarray([2**100])) # assert_raises(TypeError, lambda: asarray([2**100])) # TODO(leofang): fix this? assert_raises(ValueError, lambda: asarray([1], device="cpu")) # numpy's pick assert_raises(ValueError, lambda: asarray([1], device="gpu")) assert_raises(ValueError, lambda: asarray([1], dtype=int)) assert_raises(ValueError, lambda: asarray([1], dtype="i")) asarray([1], device=Device()) # on current device
def get_device(*args): """Gets the device from a device object, an ID integer or an array object. .. note:: This API is deprecated. Please use :method:`cupy.cuda.get_device_from_id` or :method:`cupy.cuda.get_device_from_array` instead. This is a convenient utility to select a correct device if the type of ``arg`` is unknown (i.e., one can use this function on arrays that may be on CPU or GPU). The returned device object supports the context management protocol of Python for the *with* statement. Args: args: Values to specify a GPU device. The first device object, integer or :class:`cupy.ndarray` object is used to select a device. If it is a device object, it is returned. If it is an integer, the corresponding device is returned. If it is a CuPy array, the device on which this array reside is returned. If any arguments are neither integers nor CuPy arrays, a dummy device object representing CPU is returned. Returns: Device object specified by given ``args``. .. seealso:: See :class:`cupy.cuda.Device` for the device selection not by arrays. """ warnings.warn( 'get_device is deprecated. Please use get_device_from_id or' ' get_device_from_array instead.', DeprecationWarning) for arg in args: if type(arg) in _integer_types: check_cuda_available() return Device(arg) if isinstance(arg, ndarray): if arg.device is None: continue return arg.device if available and isinstance(arg, Device): return arg return DummyDevice
def create_context(self): # Creates a new cuda.Device instance because a single cuda.Device # instance cannot be used across threads. return Device(self.device.id)
parser.add_argument('--num_sqrt_iters', type=int, default=6) parser.add_argument('--scale', type=float, default=1.0) parser.add_argument('--embedding_file', type=str, default=None) parser.add_argument('--save_each', type=int, default=5) parser.add_argument('--lbda', type=float, default=1E-2) parser.add_argument( '--cn', type=float, default=1, help='the means to Bures coefficient for learning ellipse embeddings') args = parser.parse_args() FORMAT = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.DEBUG, format=FORMAT) Device(args.device).use() logging.info("Build dataset") try: os.makedirs(args.output) os.makedirs(os.path.join(args.output, "embeddings")) os.makedirs(os.path.join(args.output, "figs")) os.makedirs(os.path.join(args.output, "losses")) except OSError as exception: if exception.errno != errno.EEXIST: raise data = Options(args.data, save_path=os.path.join(args.output, 'embeddings/vocab.txt'))
def test_eye_errors(): assert_raises(ValueError, lambda: eye(1, device="cpu")) # numpy's pick assert_raises(ValueError, lambda: eye(1, device="gpu")) assert_raises(ValueError, lambda: eye(1, dtype=int)) assert_raises(ValueError, lambda: eye(1, dtype="i")) eye(1, device=Device()) # on current device