コード例 #1
0
class Function(with_metaclass(FunctionMeta, _C._FunctionBase, _ContextMethodMixin, _HookMixin)):
    """Records operation history and defines formulas for differentiating ops.

    Every operation performed on :class:`Variable` s creates a new function
    object, that performs the computation, and records that it happened.
    The history is retained in the form of a DAG of functions, with edges
    denoting data dependencies (``input <- output``). Then, when backward is
    called, the graph is processed in the topological ordering, by calling
    :func:`backward` methods of each :class:`Function` object, and passing
    returned gradients on to next :class:`Function` s.

    Normally, the only way users interact with functions is by creating
    subclasses and defining new operations. This is a recommended way of
    extending torch.autograd.

    Since Function logic is a hotspot in most scripts, almost all of it
    was moved to our C backend, to ensure that the framework overhead is
    minimal.

    Each function is meant to be used only once (in the forward pass).

    Attributes:
        saved_tensors: Tuple of Tensors that were saved in the call to
            :func:`forward`.
        needs_input_grad: Tuple of booleans of length :attr:`num_inputs`,
            indicating whether a given input requires gradient. This can be
            used to optimize buffers saved for backward, and ignoring gradient
            computation in :func:`~Function.backward`.
        num_inputs: Number of inputs given to :func:`forward`.
        num_outputs: Number of tensors returned by :func:`forward`.
        requires_grad: Boolean indicating whether the :func:`backward` will
            ever need to be called.
    """

    # only for backward compatibility
    __call__ = _C._FunctionBase._do_forward

    @staticmethod
    def forward(*args, **kwargs):
        """Performs the operation.

        This function is to be overriden by all subclasses.

        It can take and return an arbitrary number of tensors.
        """
        raise NotImplementedError

    @staticmethod
    def backward(*grad_outputs):
        """Defines a formula for differentiating the operation.

        This function is to be overriden by all subclasses.

        All arguments are tensors. It has to accept exactly as many arguments,
        as many outputs did :func:`forward` return, and it should return as
        many tensors, as there were inputs to :func:`forward`. Each argument
        is the gradient w.r.t the given output, and each returned value should
        be the gradient w.r.t. the corresponding input.
        """
        raise NotImplementedError
コード例 #2
0
ファイル: _script.py プロジェクト: zxin1023/pytorch
    class ScriptModule(with_metaclass(ScriptMeta, Module)):  # type: ignore
        """
        ``ScriptModule``s wrap a C++ ``torch::jit::Module``. ``ScriptModule``s
        contain methods, attributes, parameters, and
        constants. These can be accessed the same as on a normal ``nn.Module``.
        """
        def __init__(self):
            super(ScriptModule, self).__init__()

        forward = _CachedForward()

        def __getattr__(self, attr):
            if "_actual_script_module" not in self.__dict__:
                return super(ScriptModule, self).__getattr__(attr)
            return getattr(self._actual_script_module, attr)

        def __setattr__(self, attr, value):
            if "_actual_script_module" not in self.__dict__:
                # Unwrap torch.jit.Attribute into a regular setattr + recording
                # the provided type in __annotations__.
                #
                # This ensures that if we use the attr again in `__init__`, it
                # will look like the actual value, not an instance of Attribute.
                if isinstance(value, Attribute):
                    # NB: Ensure that we set __annotations__ on the specific
                    # class in question, and not on a superclass (which would
                    # be wrong wrong wrong!).
                    # See also https://github.com/pytorch/pytorch/issues/39463
                    if "__annotations__" not in self.__class__.__dict__:
                        self.__class__.__annotations__ = {}
                    self.__annotations__[attr] = value.type
                    value = value.value
                return super(ScriptModule, self).__setattr__(attr, value)

            setattr(self._actual_script_module, attr, value)

        def define(self, src):
            if "_actual_script_module" in self.__dict__:
                # If we have completed initialization, just defer to the
                # backing RecursiveScriptModule to eagerly compile the provided
                # source.
                return self._actual_script_module.define(src)

            # Otherwise, we are still in the object's __init__.
            # In that case, add `src` as a stub to be compiled.
            #
            # We use frames_up=1 to get to the proper surrounding scope. The stack
            # will look like:
            # 0. createResolutionCallback
            # 1. define()
            # 2. surrounding scope.
            #
            # createResolutionCallback internally adds 1 to get us to our frame, then
            # we add 1 to get to the proper surrounding scope.
            rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
            ast = torch._C._parse_source_def(src)
            self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)

        def _replicate_for_data_parallel(self):
            return self._actual_script_module._replicate_for_data_parallel()
コード例 #3
0
ファイル: __init__.py プロジェクト: zhengsx/pytorch
class ScriptModule(with_metaclass(ScriptMeta, torch._C.ScriptModule)):

    def __setattr__(self, name, value):
        if isinstance(value, Parameter):
            self._register_or_set_parameter(name, value)
        elif isinstance(value, ScriptModule):
            self._register_module(name, value)
            # note: script modules are subclassed in python and the
            # C++ script::Module class will not hold references to them
            # to ensure that you always get the same python value here
            # we store it as a native attribute _in addition to_
            # registering it with the C++ script::Module
            object.__setattr__(self, name, value)
        else:
            object.__setattr__(self, name, value)

    def __getattr__(self, attr):
        r = self._get_attribute(attr)
        if r is None:
            raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, attr))
        return r

    def define(self, lang):
        rcb = createResolutionCallback()
        self._define(lang, rcb, True)
コード例 #4
0
    class ScriptModule(
            with_metaclass(ScriptMeta, torch._C.ScriptModule, Module)):
        def __init__(self, optimize=True):
            # must be before Module.init since the field is used in __getattr__
            Module.__init__(self)
            self._set_optimized(optimize)
            self._parameters = OrderedParameterDict(self)
            self._buffers = OrderedBufferDict(self)
            self._modules = OrderedModuleDict(self)

        def __getattr__(self, attr):
            if self._has_method(attr):
                if attr in self.__class__._original_methods:
                    original_method = self.__class__._original_methods[attr]
                    script_method = self._get_method(attr)
                    return functools.wraps(original_method)(script_method)
                else:
                    return self._get_method(attr)
            if attr == 'graph' and self._has_method('forward'):
                return self.__getattr__('forward').graph
            return Module.__getattr__(self, attr)

        def __setattr__(self, attr, value):
            if attr not in self._constants_set:
                return super(ScriptModule, self).__setattr__(attr, value)
            if hasattr(self, attr):
                raise RuntimeError(
                    "attempting to re-assign constant '{}'".format(attr))
            if isinstance(value, ModuleList):
                # special case for list of modules. Modules need to be registered with their
                # parent module. To do this, we create a ConstModuleList, which is itself a module, that
                # contains each of these modules as submodules. The ConstModuleList then
                # is set as an attribute of the parent module.
                super(ScriptModule, self).__setattr__(attr,
                                                      _ConstModuleList(value))
            elif isinstance(value, Sequential):
                super(ScriptModule, self).__setattr__(attr,
                                                      _ConstSequential(value))
            else:
                super(ScriptModule,
                      self).__setattr__(attr, _get_valid_constant(value))

        def __dir__(self):
            return sorted(Module.__dir__(self) + self._method_names())

        def define(self, lang):
            # We use frames_up=1 to get to the proper surrounding scope. The stack
            # will look like:
            # 0. createResolutionCallback
            # 1. define()
            # 2. surrounding scope.
            #
            # createResolutionCallback internally adds 1 to get us to our frame, then
            # we add 1 to get to the proper surrounding scope.
            rcb = createResolutionCallback(frames_up=1)
            self._define(lang, rcb, True)
コード例 #5
0
ファイル: __init__.py プロジェクト: sunwookimiub/torch_bgru
class ScriptModule(with_metaclass(ScriptMeta, Module, torch._C.ScriptModule)):
    def __init__(self, optimize=True):
        # must be before Module.init since the field is used in __getattr__
        Module.__init__(self)
        self._set_optimized(optimize)
        self._parameters = OrderedParameterDict(self)
        self._buffers = OrderedBufferDict(self)
        self._modules = OrderedModuleDict(self)

    def __getattr__(self, attr):
        if self._has_method(attr):
            if attr in self.__class__._original_methods:
                original_method = self.__class__._original_methods[attr]
                script_method = self._get_method(attr)
                return functools.wraps(original_method)(script_method)
            else:
                return self._get_method(attr)
        return Module.__getattr__(self, attr)

    def __setattr__(self, attr, value):
        if attr not in self._constants_set:
            return super(ScriptModule, self).__setattr__(attr, value)
        if hasattr(self, attr):
            raise RuntimeError(
                "attempting to re-assign constant '{}'".format(attr))
        if isinstance(value, ModuleList):
            # special case for list of modules. Modules need to be registered with their
            # parent module. To do this, we create a ConstModuleList, which is itself a module, that
            # contains each of these modules as submodules. The ConstModuleList then
            # is set as an attribute of the parent module.
            super(ScriptModule, self).__setattr__(attr,
                                                  _ConstModuleList(value))
        elif isinstance(value, Sequential):
            super(ScriptModule, self).__setattr__(attr,
                                                  _ConstSequential(value))
        else:
            super(ScriptModule, self).__setattr__(attr,
                                                  _get_valid_constant(value))

    def __dir__(self):
        return sorted(Module.__dir__(self) + self._method_names())

    # Module already has this method defined, so we
    # need to override it and send it through the ScriptModule lookup
    def forward(self, *args, **kwargs):
        return self.__getattr__('forward')(*args, **kwargs)

    def define(self, lang):
        rcb = createResolutionCallback(frames_up=1)
        self._define(lang, rcb, True)
コード例 #6
0
ファイル: __init__.py プロジェクト: zhushansheng/pytorch
class ScriptModule(with_metaclass(ScriptMeta, Module, torch._C.ScriptModule)):
    def __init__(self, optimize=True):
        Module.__init__(self)
        self._set_optimized(optimize)
        self._parameters = OrderedParameterDict(self)
        self._buffers = OrderedBufferDict(self)
        self._modules = OrderedModuleDict(self)

    def __getattr__(self, attr):
        if self._has_method(attr):
            return self._get_method(attr)
        return Module.__getattr__(self, attr)

    def __dir__(self):
        return sorted(Module.__dir__(self) + self._method_names())

    # Module already has this method defined, so we
    # need to override it and send it through the ScriptModule lookup
    def forward(self, *args, **kwargs):
        return self.__getattr__('forward')(*args, **kwargs)

    def define(self, lang):
        rcb = createResolutionCallback()
        self._define(lang, rcb, True)
コード例 #7
0
class Function(with_metaclass(FunctionMeta, _C._FunctionBase, _ContextMethodMixin, _HookMixin)):  # type: ignore
    r"""Records operation history and defines formulas for differentiating ops.

    See the Note on extending the autograd engine for more details on how to use
    this class: https://pytorch.org/docs/stable/notes/extending.html#extending-torch-autograd

    Every operation performed on :class:`Tensor` s creates a new function
    object, that performs the computation, and records that it happened.
    The history is retained in the form of a DAG of functions, with edges
    denoting data dependencies (``input <- output``). Then, when backward is
    called, the graph is processed in the topological ordering, by calling
    :func:`backward` methods of each :class:`Function` object, and passing
    returned gradients on to next :class:`Function` s.

    Normally, the only way users interact with functions is by creating
    subclasses and defining new operations. This is a recommended way of
    extending torch.autograd.

    Examples::

        >>> class Exp(Function):
        >>>
        >>>     @staticmethod
        >>>     def forward(ctx, i):
        >>>         result = i.exp()
        >>>         ctx.save_for_backward(result)
        >>>         return result
        >>>
        >>>     @staticmethod
        >>>     def backward(ctx, grad_output):
        >>>         result, = ctx.saved_tensors
        >>>         return grad_output * result
        >>>
        >>> #Use it by calling the apply method:
        >>> output = Exp.apply(input)
    """

    def __call__(self, *args, **kwargs):
        raise RuntimeError(
            "Legacy autograd function with non-static forward method is deprecated. "
            "Please use new-style autograd function with static forward method. "
            "(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)")

    # for the tracer
    is_traceable = False

    @staticmethod
    def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
        r"""Performs the operation.

        This function is to be overridden by all subclasses.

        It must accept a context ctx as the first argument, followed by any
        number of arguments (tensors or other types).

        The context can be used to store tensors that can be then retrieved
        during the backward pass.
        """
        raise NotImplementedError("You must implement the forward function for custom"
                                  " autograd.Function.")

    @staticmethod
    def backward(ctx: Any, *grad_outputs: Any) -> Any:
        r"""Defines a formula for differentiating the operation.

        This function is to be overridden by all subclasses.

        It must accept a context :attr:`ctx` as the first argument, followed by
        as many outputs did :func:`forward` return, and it should return as many
        tensors, as there were inputs to :func:`forward`. Each argument is the
        gradient w.r.t the given output, and each returned value should be the
        gradient w.r.t. the corresponding input.

        The context can be used to retrieve tensors saved during the forward
        pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
        of booleans representing whether each input needs gradient. E.g.,
        :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
        first input to :func:`forward` needs gradient computated w.r.t. the
        output.
        """
        raise NotImplementedError("You must implement the backward function for custom"
                                  " autograd.Function.")
コード例 #8
0
ファイル: __init__.py プロジェクト: shen-pan/pytorch
    class ScriptModule(with_metaclass(ScriptMeta, torch._C.ScriptModule, Module)):
        r"""
        The core data structure in Torch Script is the ``ScriptModule``. It is an
        analogue of torch's nn.Module and represents an entire model as a tree of
        submodules. Like normal modules, each individual module in a ScriptModule can
        have submodules, parameters, and methods. In nn.Modules methods are implemented
        as Python functions, but in ScriptModules methods typically implemented as
        *Torch Script* functions,  a statically-typed subset of Python that contains all
        of PyTorch's built-in Tensor operations. This difference allows your
        ScriptModules code to run without the need for a Python interpreter.

        ScriptModules and the Torch Script functions inside of them can be created in
        two ways:

        **Tracing:**

            Using ``torch.jit.trace``, you can take an existing module or python
            function, provide example inputs, and we run the function, recording the
            operations performed on all the tensors. We turn the resulting recording
            into a Torch Script method that is installed as the ``forward`` method of a
            ScriptModule. This module also contains any parameters that the original
            module had as well.

            Example::

                import torch
                def foo(x, y):
                    return 2*x + y
                traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))

            .. note::
                Tracing a *function* will produce a ScriptModule with a single
                ``forward`` method that implements that function, and that contains
                no parameteres.

            Example::

                import torch
                import torchvision
                traced_net = torch.jit.trace(torchvision.models.resnet18(),
                                             torch.rand(1, 3, 224, 224))

            .. note::

                Since tracing only records operations on tensors, it will not record any
                control-flow operations like if statements or loops. When this control-flow is
                constant across your module, this is fine and it often just inlines
                configuration decisions. But sometimes the control-flow is actually part of the
                model itself. For instance, a beam search in sequence-to-sequence translation is
                a loop over the (varying) sequence length of inputs. In this case tracing would
                not be appropriate and the beam search should be written using scripting.

        **Scripting:**

            You can write Torch Script code directly using Python syntax. You do this
            using the ``torch.jit.script`` annotation (for functions) or
            ``torch.jit.script_method`` annotation (for methods) on subclasses of
            ScriptModule. With this annotation the body of the annotated function is
            directly translated into Torch Script. Torch Script itself is a subset of
            the Python language, so not all features in python work, but we provide
            enough functionality to compute on tensors and do control-dependent
            operations.

            Example::

                import torch
                @torch.jit.script
                def foo(x, y):
                    if x.max() > y.max():
                        r = x
                    else:
                        r = y
                    return r

            .. note::
                A script *function* annotation will construct a ScriptModule
                with a single ``forward`` method that implements that function,
                and that contains no parameters.

            Example::

              import torch
              class MyModule(torch.jit.ScriptModule):
                  def __init__(self, N, M):
                      super(MyModule, self).__init__()
                      self.weight = torch.nn.Parameter(torch.rand(N, M))

                  @torch.jit.script_method
                  def forward(self, input):
                      return self.weight.mv(input)

            Example::

                import torch
                import torch.nn as nn
                import torch.nn.functional as F
                from torch.jit import ScriptModule, script_method, trace

                class MyScriptModule(ScriptModule):
                    def __init__(self):
                        super(MyScriptModule, self).__init__()
                        # trace produces a ScriptModule's conv1 and conv2
                        self.conv1 = trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
                        self.conv2 = trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))

                    @script_method
                    def forward(self, input):
                      input = F.relu(self.conv1(input))
                      input = F.relu(self.conv2(input))
                      return input
        """

        def __init__(self, optimize=True):
            # must be before Module.init since the field is used in __getattr__
            Module.__init__(self)
            self._set_optimized(optimize)
            self._parameters = OrderedParameterDict(self)
            self._buffers = OrderedBufferDict(self)
            self._modules = OrderedModuleDict(self)

        def __getattr__(self, attr):
            if self._has_method(attr):
                if attr in self.__class__._original_methods:
                    original_method = self.__class__._original_methods[attr]
                    script_method = self._get_method(attr)
                    return functools.wraps(original_method)(script_method)
                else:
                    return self._get_method(attr)
            if attr == 'graph' and self._has_method('forward'):
                return self.__getattr__('forward').graph
            return Module.__getattr__(self, attr)

        def __setattr__(self, attr, value):
            if attr not in self._constants_set:
                return super(ScriptModule, self).__setattr__(attr, value)
            if hasattr(self, attr):
                raise RuntimeError("attempting to re-assign constant '{}'".format(attr))
            if isinstance(value, ModuleList):
                # special case for list of modules. Modules need to be registered with their
                # parent module. To do this, we create a ConstModuleList, which is itself a module, that
                # contains each of these modules as submodules. The ConstModuleList then
                # is set as an attribute of the parent module.
                super(ScriptModule, self).__setattr__(attr, _ConstModuleList(value))
            elif isinstance(value, Sequential):
                super(ScriptModule, self).__setattr__(attr, _ConstSequential(value))
            else:
                super(ScriptModule, self).__setattr__(attr, _get_valid_constant(value))

        def __dir__(self):
            return sorted(Module.__dir__(self) + self._method_names())

        def define(self, lang):
            # We use frames_up=1 to get to the proper surrounding scope. The stack
            # will look like:
            # 0. createResolutionCallback
            # 1. define()
            # 2. surrounding scope.
            #
            # createResolutionCallback internally adds 1 to get us to our frame, then
            # we add 1 to get to the proper surrounding scope.
            rcb = createResolutionCallback(frames_up=1)
            self._define(lang, rcb, True)
コード例 #9
0
ファイル: variable.py プロジェクト: EmilioRivera/pytorch-bugs
class Variable(with_metaclass(VariableMeta,
                              torch._C._LegacyVariableBase)):  # type: ignore
    pass
コード例 #10
0
ファイル: function.py プロジェクト: vors/pytorch
class Function(
        with_metaclass(FunctionMeta, _C._FunctionBase, FunctionCtx,
                       _HookMixin)):  # type: ignore[misc]
    r"""Base class to create custom `autograd.Function`

    To create a custom `autograd.Function`, subclass this class and implement
    the :meth:`forward` and :meth`backward` static methods. Then, to use your custom
    op in the forward pass, call the class method ``apply``. Do not call
    :meth:`forward` directly.

    To ensure correctness and best performance, make sure you are calling the
    correct methods on ``ctx`` and validating your backward function using
    :func:`torch.autograd.gradcheck`.

    See :ref:`extending-autograd` for more details on how to use this class.

    Examples::

        >>> class Exp(Function):
        >>>     @staticmethod
        >>>     def forward(ctx, i):
        >>>         result = i.exp()
        >>>         ctx.save_for_backward(result)
        >>>         return result
        >>>
        >>>     @staticmethod
        >>>     def backward(ctx, grad_output):
        >>>         result, = ctx.saved_tensors
        >>>         return grad_output * result
        >>>
        >>> # Use it by calling the apply method:
        >>> output = Exp.apply(input)
    """
    def __init__(self, *args, **kwargs):
        cls = self.__class__
        warnings.warn(
            f"{cls} should not be instantiated. Methods on autograd functions"
            "are all static, so you should invoke them on the class itself. "
            "Instantiating an autograd function will raise an "
            "error in a future version of PyTorch.", DeprecationWarning)

    def __call__(self, *args, **kwargs):
        raise RuntimeError(
            "Legacy autograd function with non-static forward method is deprecated. "
            "Please use new-style autograd function with static forward method. "
            "(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)"
        )

    # for the tracer
    is_traceable = False

    @staticmethod
    def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
        r"""Performs the operation.

        This function is to be overridden by all subclasses.

        It must accept a context ctx as the first argument, followed by any
        number of arguments (tensors or other types).

        The context can be used to store arbitrary data that can be then
        retrieved during the backward pass.
        """
        raise NotImplementedError(
            "You must implement the forward function for custom"
            " autograd.Function.")

    @staticmethod
    def backward(ctx: Any, *grad_outputs: Any) -> Any:
        r"""Defines a formula for differentiating the operation.

        This function is to be overridden by all subclasses.

        It must accept a context :attr:`ctx` as the first argument, followed by
        as many outputs as the :func:`forward` returned (None will be passed in
        for non tensor outputs of the forward function),
        and it should return as many tensors, as there were inputs to
        :func:`forward`. Each argument is the gradient w.r.t the given output,
        and each returned value should be the gradient w.r.t. the
        corresponding input. If an input is not a Tensor or is a Tensor not
        requiring grads, you can just pass None as a gradient for that input.

        The context can be used to retrieve tensors saved during the forward
        pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
        of booleans representing whether each input needs gradient. E.g.,
        :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
        first input to :func:`forward` needs gradient computated w.r.t. the
        output.
        """
        raise NotImplementedError(
            "You must implement the backward function for custom"
            " autograd.Function.")
コード例 #11
0
class Function(
        with_metaclass(FunctionMeta, _C._FunctionBase, _ContextMethodMixin,
                       _HookMixin)):
    """Records operation history and defines formulas for differentiating ops.

    Every operation performed on :class:`Variable` s creates a new function
    object, that performs the computation, and records that it happened.
    The history is retained in the form of a DAG of functions, with edges
    denoting data dependencies (``input <- output``). Then, when backward is
    called, the graph is processed in the topological ordering, by calling
    :func:`backward` methods of each :class:`Function` object, and passing
    returned gradients on to next :class:`Function` s.

    Normally, the only way users interact with functions is by creating
    subclasses and defining new operations. This is a recommended way of
    extending torch.autograd.

    Each function is meant to be used only once (in the forward pass).

    Attributes:
        requires_grad: Boolean indicating whether the :func:`backward` will
            ever need to be called.

    Examples::

        >>> class Exp(Function):
        >>>
        >>>     @staticmethod
        >>>     def forward(ctx, i):
        >>>         result = i.exp()
        >>>         ctx.save_for_backward(result)
        >>>         return result
        >>>
        >>>     @staticmethod
        >>>     def backward(ctx, grad_output):
        >>>         result, = ctx.saved_variables
        >>>         return grad_output * result
    """

    # only for backward compatibility
    __call__ = _C._FunctionBase._do_forward

    # for the tracer
    is_traceable = False

    @staticmethod
    def forward(ctx, *args, **kwargs):
        """Performs the operation.

        This function is to be overriden by all subclasses.

        It must accept a context ctx as the first argument, followed by any
        number of arguments (tensors or other types).

        The context can be used to store variables that can be then retrieved
        during the backward pass.
        """
        raise NotImplementedError

    @staticmethod
    def backward(ctx, *grad_outputs):
        """Defines a formula for differentiating the operation.

        This function is to be overriden by all subclasses.

        It must accept a context ctx as the first argument, followed by as many
        outputs did :func:`forward` return, and it should return as many
        tensors, as there were inputs to :func:`forward`. Each argument is the
        gradient w.r.t the given output, and each returned value should be the
        gradient w.r.t. the corresponding input.

        The context can be used to retrieve variables saved during the forward
        pass.
        """
        raise NotImplementedError
コード例 #12
0
ファイル: variable.py プロジェクト: khabya/DeepStack
class Variable(with_metaclass(VariableMeta, torch._C._LegacyVariableBase)):
    pass