def compute_grad(objective_fn, args, kwargs, grad_fn=None):
        r"""Compute gradient of the objective function at the given point and return it along with
        the objective function forward pass (if available).

        Args:
            objective_fn (function): the objective function for optimization
            args (tuple): tuple of NumPy arrays containing the current parameters for the
                objection function
            kwargs (dict): keyword arguments for the objective function
            grad_fn (function): optional gradient function of the objective function with respect to
                the variables ``args``. If ``None``, the gradient function is computed automatically.
                Must return the same shape of tuple [array] as the autograd derivative.

        Returns:
            tuple (array): NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
            objective function output. If ``grad_fn`` is provided, the objective function
            will not be evaluted and instead ``None`` will be returned.
        """
        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(*args, **kwargs)
        forward = getattr(g, "forward", None)

        if len(args) == 1:
            grad = (grad, )

        return grad, forward
Example #2
0
    def compute_grad(self, objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at at the shifted point :math:`(x -
        m\times\text{accumulation})` and return it along with the objective function
        forward pass (if available).

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.

        Returns:
            tuple: The NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
                objective function output. If ``grad_fn`` is provided, the objective function
                will not be evaluted and instead ``None`` will be returned.
        """

        x_flat = _flatten(x)

        if self.accumulation is None:
            shifted_x_flat = list(x_flat)
        else:
            shifted_x_flat = [
                e - self.momentum * a
                for a, e in zip(self.accumulation, x_flat)
            ]

        shifted_x = unflatten(shifted_x_flat, x)

        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(shifted_x)
        forward = getattr(g, "forward", None)

        return grad, forward
Example #3
0
    def compute_grad(self, objective_fn, args, kwargs, grad_fn=None):
        r"""Compute gradient of the objective function at at the shifted point :math:`(x -
        m\times\text{accumulation})` and return it along with the objective function forward pass
        (if available).

        Args:
            objective_fn (function): the objective function for optimization.
            args (tuple): tuple of NumPy arrays containing the current values for the
                objection function.
            kwargs (dict): keyword arguments for the objective function.
            grad_fn (function): optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.
                Must return the same shape of tuple [array] as the autograd derivative.

        Returns:
            tuple [array]: the NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
            objective function output. If ``grad_fn`` is provided, the objective function
            will not be evaluted and instead ``None`` will be returned.
        """
        shifted_args = list(args)

        trainable_args = []
        for arg in args:
            if getattr(arg, "requires_grad", True):
                trainable_args.append(arg)

        if self.accumulation:
            for index, arg in enumerate(trainable_args):
                if self.accumulation[index]:
                    x_flat = _flatten(arg)
                    acc = _flatten(self.accumulation[index])

                    shifted_x_flat = [
                        e - self.momentum * a for a, e in zip(acc, x_flat)
                    ]

                    shifted_args[index] = unflatten(shifted_x_flat, arg)

                    if isinstance(shifted_args[index], ndarray):
                        # Due to a bug in unflatten, input PennyLane tensors
                        # are being unwrapped. Here, we cast them back to PennyLane
                        # tensors.
                        # TODO: remove when the following is fixed:
                        # https://github.com/PennyLaneAI/pennylane/issues/966
                        shifted_args[index] = shifted_args[index].view(tensor)
                        shifted_args[index].requires_grad = True

        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(*shifted_args, **kwargs)
        forward = getattr(g, "forward", None)

        if len(trainable_args) == 1:
            grad = (grad, )

        return grad, forward
Example #4
0
    def compute_grad(objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at the point x and return it along with the
            objective function forward pass (if available).

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.

        Returns:
            tuple: The NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
                objective function output. If ``grad_fn`` is provided, the objective function
                will not be evaluted and instead ``None`` will be returned.
        """
        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(x)
        forward = getattr(g, "forward", None)

        return grad, forward
Example #5
0
 def _optimize(self, objective_fn, parameters: ndarray, maxiter, *args,
               grad_fn, **kwargs) -> Tuple[ndarray, float, ndarray]:
     shape = parameters.shape
     shaped_fn = self._reshaping_objective_fn(objective_fn, shape, **kwargs)
     approx_grad = False if grad_fn is not None else True
     updated_parameters, cost, info = sciopt.fmin_l_bfgs_b(
         shaped_fn,
         parameters.flatten(),
         fprime=get_gradient(shaped_fn) if grad_fn is None else grad_fn,
         *args,
         approx_grad=approx_grad,
         bounds=self.bounds,
         m=self.m,
         factr=self.factr,
         pgtol=self.pgtol,
         epsilon=self.epsilon,
         iprint=self.iprint,
         maxfun=self.maxfun,
         maxiter=maxiter,
         disp=self.disp,
         callback=self.callback,
         maxls=self.maxls,
     )
     return updated_parameters.reshape(shape), cost, info["grad"]