コード例 #1
0
def get_xp(obj: Union[_NDArray, torch.Tensor]) -> Any:
    """Returns a module of ndarray implementation (`numpy` or `cupy`) for the
    given `obj`.

    The `obj` can be `torch.Tensor`, `torch.device` or NumPy/CuPy `ndarray`.
    """
    if isinstance(obj, torch.Tensor):
        devtype = obj.device.type
    elif isinstance(obj, torch.device):
        devtype = obj.type
    elif isinstance(obj, numpy.ndarray):
        devtype = 'cpu'
    elif isinstance(obj, cupy.ndarray):
        devtype = 'cuda'
    else:
        raise TypeError('expected torch.Tensor, torch.device, numpy.ndarray, '
                        f'or cupy.ndarray (got {type(obj).__name__})')

    if devtype == 'cpu':
        return numpy
    elif devtype == 'cuda':
        ensure_cupy()
        return cupy

    raise ValueError(f'unsupported device type: {devtype}')
コード例 #2
0
def use_torch_mempool_in_cupy():
    """Use the PyTorch memory pool in CuPy.

    If you want to use PyTorch's memory pool and non-default CUDA streams,
    streams must be created and managed using PyTorch (using
    `torch.cuda.Stream()` and `pytorch_pfn_extras.cuda.stream(stream)`).
    """
    global _allocator

    ensure_cupy()
    _allocator = cupy.cuda.memory.PythonFunctionAllocator(
        _torch_alloc, _torch_free)
    cupy.cuda.set_allocator(_allocator.malloc)
コード例 #3
0
def as_ndarray(tensor: torch.Tensor) -> _NDArray:
    """Creates a `numpy.ndarray` or `cupy.ndarray` from `torch.Tensor`.

    This method returns a tensor as a NumPy or CuPy ndarray depending on where
    the given `tensor` resides in. The `tensor` and the returned `ndarray`
    share the same underlying storage. Changes to the tensor will be reflected
    in the `ndarray` and vice versa. Note that changes made to `ndarray`
    cannot be tracked in the computational graph.
    """
    devtype = tensor.device.type
    if devtype == 'cpu':
        return tensor.detach().numpy()
    elif devtype == 'cuda':
        ensure_cupy()
        return cupy.fromDlpack(torch.utils.dlpack.to_dlpack(tensor))
    raise ValueError(f'Tensor is on unsupported device: {devtype}')
コード例 #4
0
def as_ndarray(tensor: torch.Tensor) -> _NDArray:
    """Creates a `numpy.ndarray` or `cupy.ndarray` from `torch.Tensor`.

    This method returns a tensor as a NumPy or CuPy ndarray depending on where
    the given `tensor` resides in. The `tensor` and the returned `ndarray`
    share the same underlying storage. Changes to the tensor will be reflected
    in the `ndarray` and vice versa. Note that changes made to `ndarray`
    cannot be tracked in the computational graph.
    """
    devtype = tensor.device.type
    if devtype == 'cpu':
        return tensor.detach().numpy()
    elif devtype == 'cuda':
        ensure_cupy()
        if hasattr(cupy, 'from_dlpack'):
            # TODO: Avoid using ``torch.utils.dlpack.to_dlpack``.
            # => return cupy.from_dlpack(tensor)
            # Blocked by PyTorch 1.10 bug
            # (https://github.com/pytorch/pytorch/pull/67618)
            return cupy.from_dlpack(torch.utils.dlpack.to_dlpack(tensor))
        return cupy.fromDlpack(torch.utils.dlpack.to_dlpack(tensor))
    raise ValueError(f'Tensor is on unsupported device: {devtype}')
コード例 #5
0
def use_default_mempool_in_cupy():
    """Use the default memory pool in CuPy."""
    ensure_cupy()
    cupy.cuda.set_allocator(cupy.get_default_memory_pool().malloc)