예제 #1
0
 def mps(self):
     """Returns a CPU copy of this storage if it's not already on the CPU"""
     if self.device.type != 'mps':
         return torch._UntypedStorage(self.size(),
                                      device="mps").copy_(self, False)
     else:
         return self
예제 #2
0
def _cuda(self, device=None, non_blocking=False, **kwargs):
    """Returns a copy of this object in CUDA memory.

    If this object is already in CUDA memory and on the correct device, then
    no copy is performed and the original object is returned.

    Args:
        device (int): The destination GPU id. Defaults to the current device.
        non_blocking (bool): If ``True`` and the source is in pinned memory,
            the copy will be asynchronous with respect to the host. Otherwise,
            the argument has no effect.
        **kwargs: For compatibility, may contain the key ``async`` in place of
            the ``non_blocking`` argument.
    """
    non_blocking = _get_async_or_non_blocking('cuda', non_blocking, kwargs)
    if self.is_cuda:
        if device is None:
            device = torch.cuda.current_device()
        if self.get_device() == device:
            return self
    else:
        if device is None:
            device = -1
    with torch.cuda.device(device):
        if self.is_sparse:
            new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
            indices = torch.Tensor._indices(self).cuda(device, non_blocking)
            values = torch.Tensor._values(self).cuda(device, non_blocking)
            return new_type(indices, values, self.size())
        else:
            return torch._UntypedStorage(self.size(),
                                         device=torch.device('cuda')).copy_(
                                             self, non_blocking)
예제 #3
0
def _cuda_deserialize(obj, location):
    if location.startswith('cuda'):
        device = validate_cuda_device(location)
        if getattr(obj, "_torch_load_uninitialized", False):
            with torch.cuda.device(device):
                return torch._UntypedStorage(obj.nbytes(), device=torch.device(location))
        else:
            return obj.cuda(device)
예제 #4
0
    def persistent_load(saved_id):
        assert isinstance(saved_id, tuple)
        typename = _maybe_decode_ascii(saved_id[0])
        data = saved_id[1:]

        if typename == 'module':
            # Ignore containers that don't have any sources saved
            if all(data[1:]):
                _check_container_source(*data)
            return data[0]
        elif typename == 'storage':
            storage_type, root_key, location, numel, view_metadata = data
            location = _maybe_decode_ascii(location)
            dtype = storage_type.dtype

            nbytes = numel * torch._utils._element_size(dtype)

            if root_key not in deserialized_objects:
                obj = cast(Storage, torch._UntypedStorage(nbytes))
                obj._torch_load_uninitialized = True
                # TODO: Once we decide to break serialization FC, we can
                # stop wrapping with _TypedStorage
                deserialized_objects[root_key] = torch.storage._TypedStorage(
                    wrap_storage=restore_location(obj, location), dtype=dtype)

            typed_storage = deserialized_objects[root_key]
            if view_metadata is not None:
                view_key, offset, view_size = view_metadata
                offset_bytes = offset * torch._utils._element_size(dtype)
                view_size_bytes = view_size * torch._utils._element_size(dtype)
                if view_key not in deserialized_objects:
                    # TODO: Once we decide to break serialization FC, we can
                    # stop wrapping with _TypedStorage
                    deserialized_objects[
                        view_key] = torch.storage._TypedStorage(
                            wrap_storage=typed_storage.
                            _storage[offset_bytes:offset_bytes +
                                     view_size_bytes],
                            dtype=dtype)
                res = deserialized_objects[view_key]

            else:
                res = typed_storage
            return res
        else:
            raise RuntimeError("Unknown saved id type: %s" % saved_id[0])
예제 #5
0
    def __init__(self, *args, device=None, dtype=None, wrap_storage=None):
        arg_error_msg = (
            '_TypedStorage.__init__ received an invalid combination '
            'of arguments. Expected one of:\n'
            ' * (*, torch.device device, torch.dtype dtype)\n'
            ' * (int size, *, torch.device device, torch.dtype dtype)\n'
            ' * (Sequence data, *, torch.device device, torch.dtype dtype)\n'
            ' * (*, _UntypedStorage wrap_storage, torch.dtype dtype)')

        if wrap_storage is not None:
            if len(args) != 0:
                raise RuntimeError(
                    arg_error_msg +
                    "\nNo positional arguments should be given when using "
                    "'wrap_storage'")

            if dtype is None:
                raise RuntimeError(arg_error_msg +
                                   "\nArgument 'dtype' must be specified")

            if not isinstance(dtype, torch.dtype):
                raise TypeError(
                    arg_error_msg +
                    f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}"
                )

            if device is not None:
                raise RuntimeError(
                    arg_error_msg +
                    "\nArgument 'device' should not be specified when 'wrap_storage' is given"
                )

            self.dtype = dtype

            if not isinstance(wrap_storage, torch._UntypedStorage):
                raise TypeError(
                    arg_error_msg +
                    f"\nArgument 'wrap_storage' must be _UntypedStorage, but got {type(wrap_storage)}"
                )

            self._storage = wrap_storage

        else:
            self.dtype = torch.get_default_dtype() if dtype is None else dtype
            device = torch.device('cpu' if device is None else device)

            if self.dtype in [
                    torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32,
                    torch.qint8
            ]:
                if device.type == 'cuda':
                    raise RuntimeError(
                        "Cannot create CUDA storage with quantized dtype")

            if len(args) == 0:
                self._storage = torch._UntypedStorage(device=device)

            elif len(args) == 1:
                if _isint(args[0]):
                    self._storage = torch._UntypedStorage(int(args[0]) *
                                                          self.element_size(),
                                                          device=device)
                elif isinstance(args[0], collections.abc.Sequence):
                    self._storage = _get_storage_from_sequence(
                        args[0], self.dtype, device)
                else:
                    raise TypeError(
                        arg_error_msg +
                        f"\nArgument type not recognized: {type(args[0])}")

            else:
                raise RuntimeError(arg_error_msg +
                                   "\nToo many positional arguments")