Exemplo n.º 1
0
def broadcast_all(*values):
    r"""
    Given a list of values (possibly containing numbers), returns a list where each
    value is broadcasted based on the following rules:
      - `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`.
      - numbers.Number instances (scalars) are upcast to tensors having
        the same size and type as the first tensor passed to `values`.  If all the
        values are scalars, then they are upcasted to scalar Tensors.

    Args:
        values (list of `numbers.Number`, `torch.*Tensor` or objects implementing __torch_function__)

    Raises:
        ValueError: if any of the values is not a `numbers.Number` instance,
            a `torch.*Tensor` instance, or an instance implementing __torch_function__
    """
    if not all(isinstance(v, torch.Tensor) or has_torch_function((v,)) or isinstance(v, Number)
               for v in values):
        raise ValueError('Input arguments must all be instances of numbers.Number, '
                         'torch.Tensor or objects implementing __torch_function__.')
    if not all([isinstance(v, torch.Tensor) or has_torch_function((v,)) for v in values]):
        options: Dict[str, Any] = dict(dtype=torch.get_default_dtype())
        for value in values:
            if isinstance(value, torch.Tensor):
                options = dict(dtype=value.dtype, device=value.device)
                break
        new_values = [v if isinstance(v, torch.Tensor) or has_torch_function((v,)) else torch.tensor(v, **options)
                      for v in values]
        return torch.broadcast_tensors(*new_values)
    return torch.broadcast_tensors(*values)
Exemplo n.º 2
0
def foo(a, b, c=None):
    """A function multiple arguments and an optional argument"""
    if any(type(t) is not Tensor for t in (a, b, c)) and has_torch_function((a, b, c)):
        return handle_torch_function(foo, (a, b, c), a, b, c=c)
    if c:
        return a + b + c
    return a + b
Exemplo n.º 3
0
    def unflatten(self, dim, namedshape):
        r"""Unflattens the named dimension :attr:`dim`, viewing it in the shape
        specified by :attr:`namedshape`.

        Arguments:
            namedshape: (iterable of ``(name, size)`` tuples).

        Examples::

            >>> flat_imgs = torch.rand(32, 3 * 128 * 128, names=('N', 'features'))
            >>> imgs = flat_imgs.unflatten('features', (('C', 3), ('H', 128), ('W', 128)))
            >>> imgs.names, imgs.shape
            (('N', 'C', 'H', 'W'), torch.Size([32, 3, 128, 128]))

        .. warning::
            The named tensor API is experimental and subject to change.

        """
        relevant_args = (self, )
        from torch.overrides import has_torch_function, handle_torch_function
        if type(self) is not Tensor and has_torch_function(relevant_args):
            return handle_torch_function(Tensor.unflatten, relevant_args, self,
                                         dim, namedshape)
        names, sizes = unzip_namedshape(namedshape)
        return super(Tensor, self).unflatten(dim, sizes, names)
Exemplo n.º 4
0
 def istft(self,
           n_fft,
           hop_length=None,
           win_length=None,
           window=None,
           center=True,
           normalized=False,
           onesided=True,
           length=None):
     r"""See :func:`torch.istft`"""
     relevant_args = (self, )
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.istft,
                                      relevant_args,
                                      self,
                                      n_fft,
                                      hop_length=hop_length,
                                      win_length=win_length,
                                      window=window,
                                      center=center,
                                      normalized=normalized,
                                      onesided=onesided,
                                      length=None)
     return torch.istft(self, n_fft, hop_length, win_length, window, center,
                        normalized, onesided, length)
Exemplo n.º 5
0
    def stft(self,
             n_fft,
             hop_length=None,
             win_length=None,
             window=None,
             center=True,
             pad_mode='reflect',
             normalized=False,
             onesided=True):
        r"""See :func:`torch.stft`

        .. warning::
          This function changed signature at version 0.4.1. Calling with
          the previous signature may cause error or return incorrect result.
        """
        relevant_args = (self, )
        from torch.overrides import has_torch_function, handle_torch_function
        if type(self) is not Tensor and has_torch_function(relevant_args):
            return handle_torch_function(Tensor.stft,
                                         relevant_args,
                                         self,
                                         n_fft,
                                         hop_length=hop_length,
                                         win_length=win_length,
                                         window=window,
                                         center=center,
                                         pad_mode=pad_mode,
                                         normalized=normalized,
                                         onesided=onesided)
        return torch.stft(self, n_fft, hop_length, win_length, window, center,
                          pad_mode, normalized, onesided)
Exemplo n.º 6
0
def linear(input, weight, bias=None):
    # type: (Tensor, Tensor, Optional[Tensor]) -> Tensor
    r"""
    Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
    This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
    Shape:
        - Input: :math:`(N, *, in\_features)` N is the batch size, `*` means any number of
          additional dimensions
        - Weight: :math:`(out\_features, in\_features)`
        - Bias: :math:`(out\_features)`
        - Output: :math:`(N, *, out\_features)`
    """
    tens_ops = (input, weight)
    if not torch.jit.is_scripting():
        if any([type(t) is not Tensor
                for t in tens_ops]) and has_torch_function(tens_ops):
            return handle_torch_function(linear,
                                         tens_ops,
                                         input,
                                         weight,
                                         bias=bias)
    if input.dim() == 2 and bias is not None:
        # fused op is marginally faster
        ret = torch.addmm(bias, input, weight.t())
    else:
        output = input.matmul(weight.t())
        if bias is not None:
            output += bias
        ret = output
    return ret
Exemplo n.º 7
0
def softmax(input, dim=None, _stacklevel=3, dtype=None):
    # type: (Tensor, Optional[int], int, Optional[int]) -> Tensor
    r"""Applies a softmax function.
    Softmax is defined as:
    :math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
    It is applied to all slices along dim, and will re-scale them so that the elements
    lie in the range `[0, 1]` and sum to 1.
    See :class:`~torch.nn.Softmax` for more details.
    Args:
        input (Tensor): input
        dim (int): A dimension along which softmax will be computed.
        dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
          If specified, the input tensor is casted to :attr:`dtype` before the operation
          is performed. This is useful for preventing data type overflows. Default: None.
    .. note::
        This function doesn't work directly with NLLLoss,
        which expects the Log to be computed between the Softmax and itself.
        Use log_softmax instead (it's faster and has better numerical properties).
    """
    if not torch.jit.is_scripting():
        if type(input) is not Tensor and has_torch_function((input, )):
            return handle_torch_function(softmax, (input, ),
                                         input,
                                         dim=dim,
                                         _stacklevel=_stacklevel,
                                         dtype=dtype)
    if dim is None:
        dim = _get_softmax_dim('softmax', input.dim(), _stacklevel)
    if dtype is None:
        ret = input.softmax(dim)
    else:
        ret = input.softmax(dim, dtype=dtype)
    return ret
Exemplo n.º 8
0
def dropout(input, p=0.5, training=True, inplace=False):
    # type: (Tensor, float, bool, bool) -> Tensor
    r"""
    During training, randomly zeroes some of the elements of the input
    tensor with probability :attr:`p` using samples from a Bernoulli
    distribution.

    See :class:`~torch.nn.Dropout` for details.

    Args:
        p: probability of an element to be zeroed. Default: 0.5
        training: apply dropout if is ``True``. Default: ``True``
        inplace: If set to ``True``, will do this operation in-place. Default: ``False``
    """
    if not torch.jit.is_scripting():
        if type(input) is not Tensor and has_torch_function((input, )):
            return handle_torch_function(dropout, (input, ),
                                         input,
                                         p=p,
                                         training=training,
                                         inplace=inplace)
    if p < 0. or p > 1.:
        raise ValueError("dropout probability has to be between 0 and 1, "
                         "but got {}".format(p))
    return (_VF.dropout_(input, p, training) if inplace else _VF.dropout(
        input, p, training))
Exemplo n.º 9
0
 def __reduce_ex__(self, proto):
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__reduce_ex__, relevant_args, self, proto)
     func, args = self._reduce_ex_internal(proto)
     return (_rebuild_from_type, (func, type(self), args, self.__dict__))
Exemplo n.º 10
0
    def lu(self, pivot=True, get_infos=False):
        r"""See :func:`torch.lu`"""
        # If get_infos is True, then we don't need to check for errors and vice versa
        relevant_args = (self,)
        from torch.overrides import has_torch_function, handle_torch_function
        if type(self) is not Tensor and has_torch_function(relevant_args):
            return handle_torch_function(Tensor.lu, relevant_args, self, pivot=pivot, get_infos=get_infos)

        if not torch._jit_internal.is_scripting():
            if self.requires_grad:
                if not (self.size(-2) == self.size(-1) and self.dtype.is_floating_point):
                    raise ValueError(
                        'lu.backward works only with batches of squared full-rank matrices'
                        ' of floating types.'
                    )

                from torch._autograd_functions import _LU
                LU, pivots, infos = _LU.apply(self, pivot, get_infos)
                if get_infos:
                    return LU, pivots, infos
                else:
                    return LU, pivots
        else:
            if self.requires_grad:
                raise RuntimeError(
                    'Script and require gradients is not supported at the moment.'
                    'If you just want to do the forward, use .detach()'
                    'on the input before calling the function.'
                )

        LU, pivots, infos = torch._lu_with_info(self, pivot=pivot, check_errors=(not get_infos))
        if get_infos:
            return LU, pivots, infos
        else:
            return LU, pivots
Exemplo n.º 11
0
 def norm(self, p="fro", dim=None, keepdim=False, dtype=None):
     r"""See :func:`torch.norm`"""
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.norm, relevant_args, self, p=p, dim=dim, keepdim=keepdim, dtype=dtype)
     return torch.norm(self, p, dim, keepdim, dtype=dtype)
Exemplo n.º 12
0
 def wrapped(*args, **kwargs):
     if has_torch_function(args):
         return handle_torch_function(wrapped, args, *args, **kwargs)
     try:
         return f(*args, **kwargs)
     except TypeError:
         return NotImplemented
Exemplo n.º 13
0
 def __repr__(self):
     relevant_args = (self, )
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__repr__, relevant_args, self)
     # All strings are unicode in Python 3.
     return torch._tensor_str._str(self)
Exemplo n.º 14
0
    def retain_grad(self):
        r"""Enables .grad attribute for non-leaf Tensors."""
        relevant_args = (self, )
        from torch.overrides import has_torch_function, handle_torch_function
        if type(self) is not Tensor and has_torch_function(relevant_args):
            return handle_torch_function(Tensor.retain_grad, relevant_args,
                                         self)
        if not self.requires_grad:
            raise RuntimeError(
                "can't retain_grad on Tensor that has requires_grad=False")
        if self.is_leaf:  # no-op for leaves
            return
        if hasattr(self, 'retains_grad'):
            return
        weak_self = weakref.ref(self)

        def retain_grad_hook(grad):
            var = weak_self()
            if var is None:
                return
            if var._grad is None:
                if grad.is_sparse:
                    var._grad = grad.clone()
                else:
                    var._grad = grad.clone(
                        memory_format=torch.contiguous_format)
            else:
                var._grad = var._grad + grad

        self.register_hook(retain_grad_hook)
        self.retains_grad = True
Exemplo n.º 15
0
def foo(a, b, c=None):
    """A function multiple arguments and an optional argument"""
    if has_torch_function((a, b, c)):
        return handle_torch_function(foo, (a, b, c), a, b, c=c)
    if c:
        return a + b + c
    return a + b
Exemplo n.º 16
0
 def grad(self):
     relevant_args = (self, )
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.grad.__delete__, relevant_args,
                                      self)
     del self._grad
Exemplo n.º 17
0
 def grad(self):
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         # TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
         return handle_torch_function(Tensor.grad.__delete__, relevant_args, self)  # type: ignore[attr-defined]
     del self._grad
Exemplo n.º 18
0
 def __ipow__(self, other):
     relevant_args = (self, other)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and type(
             other) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__ipow__, relevant_args, self,
                                      other)
     return NotImplemented
Exemplo n.º 19
0
 def __rsub__(self, other):
     relevant_args = (self, other)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and type(
             other) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__rsub__, relevant_args, self,
                                      other)
     return _C._VariableFunctions.rsub(self, other)
Exemplo n.º 20
0
 def __format__(self, format_spec):
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__format__, relevant_args, self, format_spec)
     if self.dim() == 0:
         return self.item().__format__(format_spec)
     return object.__format__(self, format_spec)
Exemplo n.º 21
0
 def resize_as(self, tensor):
     relevant_args = (self, tensor)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and type(tensor) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.resize_as, relevant_args, self, tensor)
     warnings.warn("non-inplace resize_as is deprecated")
     from torch.autograd._functions import Resize
     return Resize.apply(self, tensor.size())
Exemplo n.º 22
0
 def __len__(self):
     relevant_args = (self, )
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__len__, relevant_args, self)
     if self.dim() == 0:
         raise TypeError("len() of a 0-d tensor")
     return self.shape[0]
Exemplo n.º 23
0
 def wrapped(*args, **kwargs):
     from torch.overrides import has_torch_function, handle_torch_function
     if not all(type(t) is Tensor for t in args) and has_torch_function(args):
         return handle_torch_function(wrapped, args, *args, **kwargs)
     try:
         return f(*args, **kwargs)
     except TypeError:
         return NotImplemented
Exemplo n.º 24
0
 def __reduce_ex__(self, proto):
     relevant_args = (self, )
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__reduce_ex__, relevant_args,
                                      self, proto)
     check_serializing_named_tensor(self)
     # See Note [Don't serialize hooks]
     torch.utils.hooks.warn_if_has_hooks(self)
     # Note: Numpy array is chosen to be the rebuild component for XLA Tensor.
     # We considered a few options:
     # 1. CPU tensor can't be used here.
     #    Otherwise in torch.load CPU storage is reconstructed with randomly
     #    initialized data, moved onto XLA device, and then storage is updated
     #    to the serialized content. This works perfectly for CPU/CUDA but not XLA.
     #    XLA tensor is disconnected with storage so it doesn't get the update.
     # 2. Python list is not a good fit due to performance reason.
     #    `tolist()` converts every single element in the tensor into python objects
     #    and serialize them one by one.
     if self.device.type == 'xla':
         args = (self.cpu().numpy(), self.dtype, str(self.device),
                 self.requires_grad)
         return (torch._utils._rebuild_xla_tensor, args)
     if self.is_quantized:
         if self.qscheme() == torch.per_tensor_affine:
             quantizer_params = (torch.per_tensor_affine, self.q_scale(),
                                 self.q_zero_point())
         elif self.qscheme() in (torch.per_channel_affine,
                                 torch.per_channel_affine_float_qparams):
             # convert scales and zero points to tuple to avoid recursive calls
             # when/if we get multi-axis quantized tensors in the future, the shape
             # is recoverable from the main tensor shape
             quantizer_params = (torch.per_channel_affine,
                                 self.q_per_channel_scales(),
                                 self.q_per_channel_zero_points(),
                                 self.q_per_channel_axis())
         else:
             raise RuntimeError(
                 f"Serialization is not supported for tensors of type {self.qscheme()}"
             )
         args = (self.storage(), self.storage_offset(), tuple(self.size()),
                 self.stride(), quantizer_params, self.requires_grad,
                 OrderedDict())
         return (torch._utils._rebuild_qtensor, args)
     elif self.is_sparse:
         if self.layout == torch.sparse_coo:
             args = (self.layout, (self._indices(), self._values(),
                                   self.size()))
         else:
             raise NotImplementedError(
                 'sparse tensor __reduce_ex__ for layout `%s`' %
                 (self.layout))
         return (torch._utils._rebuild_sparse_tensor, args)
     else:
         args = (self.storage(), self.storage_offset(), tuple(self.size()),
                 self.stride(), self.requires_grad, OrderedDict()
                 )  # previously was self._backward_hooks
         return (torch._utils._rebuild_tensor_v2, args)
Exemplo n.º 25
0
 def __array_wrap__(self, array):
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__array_wrap__, relevant_args, self, array=array)
     if array.dtype == bool:
         # Workaround, torch has no built-in bool tensor
         array = array.astype('uint8')
     return torch.from_numpy(array)
Exemplo n.º 26
0
 def __array__(self, dtype=None):
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__array__, relevant_args, self, dtype=dtype)
     if dtype is None:
         return self.numpy()
     else:
         return self.numpy().astype(dtype, copy=False)
Exemplo n.º 27
0
 def __rdiv__(self, other):
     relevant_args = (self, other)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and type(other) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__rdiv__, relevant_args, self, other)
     if self.dtype.is_floating_point or self.dtype.is_complex:
         return self.reciprocal() * other
     else:
         return (self.double().reciprocal() * other).type_as(self)
Exemplo n.º 28
0
 def __reversed__(self):
     r"""Reverses the tensor along dimension 0."""
     relevant_args = (self,)
     from torch.overrides import has_torch_function, handle_torch_function
     if type(self) is not Tensor and has_torch_function(relevant_args):
         return handle_torch_function(Tensor.__reversed__, relevant_args, self)
     if self.dim() == 0:
         return self
     else:
         return self.flip(0)
Exemplo n.º 29
0
    def _prim(*args, **kwargs):
        # TODO: allow dispatch to be overridden here
        if has_torch_function(args):
            return handle_torch_function(_prim, args, *args, **kwargs)

        # always run the meta function because aten implementation will
        # typically accept more inputs (e.g., it will do promotion and
        # broadcasting) which we want to reject
        meta(*args, **kwargs)
        return impl_aten(*args, **kwargs)
Exemplo n.º 30
0
    def is_shared(self):
        r"""Checks if tensor is in shared memory.

        This is always ``True`` for CUDA tensors.
        """
        relevant_args = (self, )
        from torch.overrides import has_torch_function, handle_torch_function
        if type(self) is not Tensor and has_torch_function(relevant_args):
            return handle_torch_function(Tensor.is_shared, relevant_args, self)
        return self.storage().is_shared()