Ejemplo n.º 1
0
    def step(self, objective_fn, x, generators, **kwargs):
        r"""Update trainable arguments with one step of the optimizer.

        Args:
            objective_fn (function): The objective function for optimization. It must have the
                signature ``objective_fn(x, generators=None)`` with a sequence of the values ``x``
                and a list of the gates ``generators`` as inputs, returning a single value.
            x (Union[Sequence[float], float]): sequence containing the initial values of the
                variables to be optimized over or a single float with the initial value
            generators (list[~.Operation]): list containing the initial ``pennylane.ops.qubit``
                operators to be used in the circuit and optimized over
            **kwargs : variable length of keyword arguments for the objective function.

        Returns:
            array: The new variable values :math:`x^{(t+1)}` as well as the new generators.
        """
        x_flat = np.fromiter(_flatten(x), dtype=float)
        # wrap the objective function so that it accepts the flattened parameter array
        objective_fn_flat = lambda x_flat, gen: objective_fn(
            unflatten(x_flat, x), generators=gen, **kwargs)

        try:
            assert len(x_flat) == len(generators)
        except AssertionError as e:
            raise ValueError(
                "Number of parameters {} must be equal to the number of generators."
                .format(x)) from e

        for d, _ in enumerate(x_flat):
            x_flat[d], generators[d] = self._find_optimal_generators(
                objective_fn_flat, x_flat, generators, d)

        return unflatten(x_flat, x), generators
Ejemplo n.º 2
0
    def test_unflatten_error_too_many_elements(self):
        """Tests that unflatten raises an error if the given iterable has
        more elements than the model"""

        reshaped = np.reshape(flat_dummy_array, (16, 2, 2))

        with pytest.raises(ValueError, match="Flattened iterable has more elements than the model"):
            pu.unflatten(np.concatenate([flat_dummy_array, flat_dummy_array]), reshaped)
Ejemplo n.º 3
0
    def step(self, objective_fn, *args, **kwargs):
        r"""Update args with one step of the optimizer.

        Args:
            objective_fn (function): the objective function for optimization. It should take a
                sequence of the values ``*args`` and a list of the gates ``generators`` as inputs, and
                return a single value.
            *args : variable length sequence containing the initial
                values of the variables to be optimized over or a single float with the initial
                value.
            **kwargs : variable length keyword arguments for the objective function.

        Returns:
            list [array]: the new variable values :math:`x^{(t+1)}`.
            If single arg is provided, list [array] is replaced by array.
        """
        # will single out one variable to change at a time
        # these hold the arguments not getting updated
        before_args = []
        after_args = list(args)

        # mutable version of args to get updated
        args_new = list(args)

        for index, arg in enumerate(args):
            # removing current arg from after_args
            del after_args[0]

            if getattr(arg, "requires_grad", True):
                x_flat = np.fromiter(_flatten(arg), dtype=float)

                # version of objective function that depends on a flattened version of
                # just the one argument.  All others held constant.
                objective_fn_flat = lambda x_flat, arg_kw=arg: objective_fn(
                    *before_args, unflatten(x_flat, arg_kw), *after_args, **
                    kwargs)

                # updating each parameter in current arg
                for d, _ in enumerate(x_flat):
                    x_flat = self._rotosolve(objective_fn_flat, x_flat, d)

                args_new[index] = unflatten(x_flat, arg)

            # updating before_args for next loop
            before_args.append(args_new[index])

        # unwrap arguments if only one, backward compatible and cleaner
        if len(args_new) == 1:
            return args_new[0]
        return args_new
Ejemplo n.º 4
0
    def apply_grad(self, grad, args):
        r"""Update the variables to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (tuple [array]): the gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            args (tuple): the current value of the variables :math:`x^{(t)}`

        Returns:
            list [array]: the new values :math:`x^{(t+1)}`
        """
        args_new = list(args)

        trained_index = 0
        for index, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                x_flat = _flatten(arg)
                grad_flat = _flatten(grad[trained_index])
                trained_index += 1

                x_new_flat = [e - self._stepsize * g for g, e in zip(grad_flat, x_flat)]

                args_new[index] = unflatten(x_new_flat, args[index])

                if isinstance(arg, ndarray):
                    # Due to a bug in unflatten, input PennyLane tensors
                    # are being unwrapped. Here, we cast them back to PennyLane
                    # tensors.
                    # TODO: remove when the following is fixed:
                    # https://github.com/PennyLaneAI/pennylane/issues/966
                    args_new[index] = args_new[index].view(tensor)
                    args_new[index].requires_grad = True

        return args_new
Ejemplo n.º 5
0
        def backward(ctx, grad_output): #pragma: no cover
            """Implements the backwards pass QNode vector-Jacobian product"""
            # NOTE: This method is definitely tested by the `test_torch.py` test suite,
            # however does not show up in the coverage. This is likely due to
            # subtleties in the torch.autograd.FunctionMeta metaclass, specifically
            # the way in which the backward class is created on the fly

            # evaluate the Jacobian matrix of the QNode
            jacobian = qnode.jacobian(ctx.args, ctx.kwargs)

            if grad_output.is_cuda: # pragma: no cover
                grad_output_np = grad_output.cpu().detach().numpy()
            else:
                grad_output_np = grad_output.detach().numpy()

            # perform the vector-Jacobian product
            if not grad_output_np.shape:
                temp = grad_output_np * jacobian
            else:
                temp = grad_output_np.T @ jacobian

            # restore the nested structure of the input args
            temp = [np.array(i) if not isinstance(i, np.ndarray) else i for i in unflatten(temp.flat, ctx.args)]

            # convert the result to torch tensors, matching
            # the type of the input tensors
            grad_input = []
            for i, j in zip(temp, ctx.saved_tensors):
                res = torch.as_tensor(torch.from_numpy(i), dtype=j.dtype)
                if j.is_cuda: # pragma: no cover
                    cuda_device = j.get_device()
                    res = torch.as_tensor(res, device=cuda_device)
                grad_input.append(res)

            return (None,) + tuple(grad_input)
Ejemplo n.º 6
0
    def compute_grad(self, objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at at
        the shifted point :math:`(x - m\times\text{accumulation})`.

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the
                objective function with respect to the variables ``x``.
                If ``None``, the gradient function is computed automatically.

        Returns:
            array: NumPy array containing the gradient :math:`\nabla f(x^{(t)})`
        """

        x_flat = _flatten(x)

        if self.accumulation is None:
            shifted_x_flat = list(x_flat)
        else:
            shifted_x_flat = [
                e - self.momentum * a
                for a, e in zip(self.accumulation, x_flat)
            ]

        shifted_x = unflatten(shifted_x_flat, x)

        if grad_fn is not None:
            g = grad_fn(shifted_x)  # just call the supplied grad function
        else:
            # default is autograd
            g = autograd.grad(objective_fn)(shifted_x)  # pylint: disable=no-value-for-parameter
        return g
Ejemplo n.º 7
0
    def compute_grad(self, objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at at the shifted point :math:`(x -
        m\times\text{accumulation})` and return it along with the objective function
        forward pass (if available).

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.

        Returns:
            tuple: The NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
                objective function output. If ``grad_fn`` is provided, the objective function
                will not be evaluted and instead ``None`` will be returned.
        """

        x_flat = _flatten(x)

        if self.accumulation is None:
            shifted_x_flat = list(x_flat)
        else:
            shifted_x_flat = [
                e - self.momentum * a
                for a, e in zip(self.accumulation, x_flat)
            ]

        shifted_x = unflatten(shifted_x_flat, x)

        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(shifted_x)
        forward = getattr(g, "forward", None)

        return grad, forward
Ejemplo n.º 8
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        grad_flat = _flatten(grad)
        x_flat = _flatten(x)

        if self.accumulation is None:
            self.accumulation = [self._stepsize * g for g in grad_flat]
        else:
            self.accumulation = [
                self.momentum * a + self._stepsize * g
                for a, g in zip(self.accumulation, grad_flat)
            ]

        x_new_flat = [e - a for a, e in zip(self.accumulation, x_flat)]

        return unflatten(x_new_flat, x)
Ejemplo n.º 9
0
        def grad(grad_output, **tfkwargs):
            """Returns the vector-Jacobian product"""
            # evaluate the Jacobian matrix of the QNode
            variables = tfkwargs.get("variables", None)

            jacobian = qnode.jacobian(args, kwargs)
            grad_output_np = grad_output.numpy()

            # perform the vector-Jacobian product
            if not grad_output_np.shape:
                temp = grad_output_np * jacobian
            else:
                temp = grad_output_np.T @ jacobian

            # restore the nested structure of the input args
            grad_input = unflatten(temp.flat, args)

            if isinstance(grad_input, list):
                grad_input = [
                    tf.convert_to_tensor(i, dtype=dtype) for i in grad_input
                ]
            elif isinstance(grad_input, tuple):
                grad_input = tuple(
                    tf.convert_to_tensor(i, dtype=dtype) for i in grad_input)
            else:
                grad_input = tf.convert_to_tensor(grad_input, dtype=dtype)

            if variables is not None:
                return grad_input, variables

            return grad_input
Ejemplo n.º 10
0
        def grad(grad_output, **tfkwargs):
            """Returns the vector-Jacobian product"""
            # evaluate the Jacobian matrix of the QNode
            variables = tfkwargs.get('variables', None)

            if hasattr(qnode, "to_autograd"):
                # new style QNode.jacobian has a different signature
                jacobian = qnode.jacobian(args, kwargs)
            else:
                jacobian = qnode.jacobian(args, **kwargs)

            grad_output_np = grad_output.numpy()

            # perform the vector-Jacobian product
            if not grad_output_np.shape:
                temp = grad_output_np * jacobian
            else:
                temp = grad_output_np.T @ jacobian

            # restore the nested structure of the input args
            grad_input = unflatten(temp.flat, args)

            if isinstance(grad_input, list):
                grad_input = [tf.convert_to_tensor(i) for i in grad_input]
            elif isinstance(grad_input, tuple):
                grad_input = tuple(tf.convert_to_tensor(i) for i in grad_input)
            else:
                grad_input = tf.convert_to_tensor(grad_input)

            if variables is not None:
                return grad_input, variables

            return grad_input
Ejemplo n.º 11
0
    def test_unflatten(self, shape):
        """Tests that _unflatten successfully unflattens multidimensional arrays."""

        reshaped = np.reshape(flat_dummy_array, shape)
        unflattened = np.array([x for x in pu.unflatten(flat_dummy_array, reshaped)])

        assert unflattened.shape == reshaped.shape
        assert np.array_equal(unflattened, reshaped)
Ejemplo n.º 12
0
def successive_params(par1, par2):
    """Return a list of parameter configurations, successively walking from
    par1 to par2 coordinate-wise."""
    par1_flat = np.fromiter(_flatten(par1), dtype=float)
    par2_flat = np.fromiter(_flatten(par2), dtype=float)
    walking_param = []
    for i in range(len(par1_flat) + 1):
        walking_param.append(unflatten(np.append(par2_flat[:i], par1_flat[i:]), par1))
    return walking_param
Ejemplo n.º 13
0
    def compute_grad(self, objective_fn, args, kwargs, grad_fn=None):
        r"""Compute gradient of the objective function at at the shifted point :math:`(x -
        m\times\text{accumulation})` and return it along with the objective function forward pass
        (if available).

        Args:
            objective_fn (function): the objective function for optimization.
            args (tuple): tuple of NumPy arrays containing the current values for the
                objection function.
            kwargs (dict): keyword arguments for the objective function.
            grad_fn (function): optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.
                Must return the same shape of tuple [array] as the autograd derivative.

        Returns:
            tuple [array]: the NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
            objective function output. If ``grad_fn`` is provided, the objective function
            will not be evaluted and instead ``None`` will be returned.
        """
        shifted_args = list(args)

        trainable_args = []
        for arg in args:
            if getattr(arg, "requires_grad", True):
                trainable_args.append(arg)

        if self.accumulation:
            for index, arg in enumerate(trainable_args):
                if self.accumulation[index]:
                    x_flat = _flatten(arg)
                    acc = _flatten(self.accumulation[index])

                    shifted_x_flat = [
                        e - self.momentum * a for a, e in zip(acc, x_flat)
                    ]

                    shifted_args[index] = unflatten(shifted_x_flat, arg)

                    if isinstance(shifted_args[index], ndarray):
                        # Due to a bug in unflatten, input PennyLane tensors
                        # are being unwrapped. Here, we cast them back to PennyLane
                        # tensors.
                        # TODO: remove when the following is fixed:
                        # https://github.com/PennyLaneAI/pennylane/issues/966
                        shifted_args[index] = shifted_args[index].view(tensor)
                        shifted_args[index].requires_grad = True

        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(*shifted_args, **kwargs)
        forward = getattr(g, "forward", None)

        if len(trainable_args) == 1:
            grad = (grad, )

        return grad, forward
Ejemplo n.º 14
0
    def step(self, objective_fn, x):
        r"""Update x with one step of the optimizer.

        Args:
            objective_fn (function): The objective function for optimization. It should take a
                sequence of the values ``x`` and a list of the gates ``generators`` as inputs, and
                return a single value.
            x (Union[Sequence[float], float]): sequence containing the initial values of the
                variables to be optimized over or a single float with the initial value

        Returns:
            array: the new variable values :math:`x^{(t+1)}`
        """
        x_flat = np.fromiter(_flatten(x), dtype=float)
        objective_fn_flat = lambda x_flat: objective_fn(unflatten(x_flat, x))

        for d, _ in enumerate(x_flat):
            x_flat = self._rotosolve(objective_fn_flat, x_flat, d)

        return unflatten(x_flat, x)
Ejemplo n.º 15
0
    def apply_grad(self, grad, args):
        r"""Update the variables args to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (tuple[array]): the gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            args (tuple): the current value of the variables :math:`x^{(t)}`

        Returns:
            list: the new values :math:`x^{(t+1)}`
        """
        args_new = list(args)

        if self.accumulation is None:
            self.accumulation = {
                "fm": [None] * len(args),
                "sm": [None] * len(args),
                "t": 0
            }

        self.accumulation["t"] += 1

        # Update step size (instead of correcting for bias)
        new_stepsize = (self.stepsize *
                        math.sqrt(1 - self.beta2**self.accumulation["t"]) /
                        (1 - self.beta1**self.accumulation["t"]))

        trained_index = 0
        for index, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                x_flat = _flatten(arg)
                grad_flat = list(_flatten(grad[trained_index]))
                trained_index += 1

                self._update_moments(index, grad_flat)

                x_new_flat = [
                    e - new_stepsize * f / (math.sqrt(s) + self.eps)
                    for f, s, e in zip(self.accumulation["fm"][index],
                                       self.accumulation["sm"][index], x_flat)
                ]
                args_new[index] = unflatten(x_new_flat, arg)

                if isinstance(arg, ndarray):
                    # Due to a bug in unflatten, input PennyLane tensors
                    # are being unwrapped. Here, we cast them back to PennyLane
                    # tensors.
                    # TODO: remove when the following is fixed:
                    # https://github.com/PennyLaneAI/pennylane/issues/966
                    args_new[index] = args_new[index].view(tensor)
                    args_new[index].requires_grad = True

        return args_new
Ejemplo n.º 16
0
    def apply_grad(self, grad, args):
        r"""Update the parameter array :math:`x` for a single optimization step. Flattens and
        unflattens the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            args (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """
        grad_flat = np.array(list(_flatten(grad)))
        x_flat = np.array(list(_flatten(args)))
        x_new_flat = x_flat - self.stepsize * np.linalg.solve(self.metric_tensor, grad_flat)
        return unflatten(x_new_flat, args)
Ejemplo n.º 17
0
        def grad(grad_output):
            """Returns the vector-Jacobian product"""
            # evaluate the Jacobian matrix of the QNode
            jacobian = qnode.jacobian(args, **kwargs)

            grad_output_np = grad_output.numpy()

            # perform the vector-Jacobian product
            if not grad_output_np.shape:
                temp = grad_output_np * jacobian
            else:
                temp = grad_output_np.T @ jacobian

            # restore the nested structure of the input args
            grad_input = unflatten(temp.flat, args)
            return tuple(grad_input)
Ejemplo n.º 18
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """
        grad_flat = np.array(list(_flatten(grad)))
        x_flat = np.array(list(_flatten(x)))
        x_new_flat = x_flat - self._stepsize * self.metric_tensor_inv @ grad_flat
        return unflatten(x_new_flat, x)
Ejemplo n.º 19
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        self.t += 1

        grad_flat = list(_flatten(grad))
        x_flat = _flatten(x)

        # Update first moment
        if self.fm is None:
            self.fm = grad_flat
        else:
            self.fm = [
                self.beta1 * f + (1 - self.beta1) * g
                for f, g in zip(self.fm, grad_flat)
            ]

        # Update second moment
        if self.sm is None:
            self.sm = [g * g for g in grad_flat]
        else:
            self.sm = [
                self.beta2 * f + (1 - self.beta2) * g * g
                for f, g in zip(self.sm, grad_flat)
            ]

        # Update step size (instead of correcting for bias)
        new_stepsize = (self._stepsize * np.sqrt(1 - self.beta2**self.t) /
                        (1 - self.beta1**self.t))

        x_new_flat = [
            e - new_stepsize * f / (np.sqrt(s) + self.eps)
            for f, s, e in zip(self.fm, self.sm, x_flat)
        ]

        return unflatten(x_new_flat, x)
Ejemplo n.º 20
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        x_flat = _flatten(x)
        grad_flat = _flatten(grad)

        x_new_flat = [e - self.stepsize * g for g, e in zip(grad_flat, x_flat)]

        return unflatten(x_new_flat, x)
Ejemplo n.º 21
0
            def gradient_product(g):
                """Vector-Jacobian product operator.

                Args:
                    g (array[float]): scalar or vector multiplying the Jacobian
                        from the left (output side)

                Returns:
                    nested Sequence[float]: vector-Jacobian product, arranged
                    into the nested structure of the input arguments in ``args``
                """
                diff_indices = None
                non_diff_indices = set()

                for arg, arg_variable in zip(args, self.arg_vars):
                    if not getattr(arg, "requires_grad", True):
                        indices = [i.idx for i in _flatten(arg_variable)]
                        non_diff_indices.update(indices)

                if non_diff_indices:
                    diff_indices = set(range(
                        self.num_variables)) - non_diff_indices

                # Jacobian matrix of the circuit
                jac = self.jacobian(args, kwargs, wrt=diff_indices)

                if not g.shape:
                    vjp = g * jac  # numpy treats 0d arrays as scalars, hence @ cannot be used
                else:
                    vjp = g @ jac

                if non_diff_indices:
                    # Autograd requires we return a gradient of size (num_variables,)
                    res = zeros([self.num_variables])
                    indices = fromiter(diff_indices, dtype=int64)
                    res[indices] = vjp
                    vjp = res

                # Restore the nested structure of the input args.
                vjp = unflatten(vjp.flat, args)
                return vjp
Ejemplo n.º 22
0
    def gradient_product(g):
        """Vector Jacobian product operator.

        Args:
            g (array): scalar or vector multiplying the Jacobian
                from the left (output side).

        Returns:
            nested Sequence[float]: vector-Jacobian product, arranged
            into the nested structure of the QNode input arguments.
        """
        # Jacobian matrix of the circuit
        jac = self.jacobian(args, **kwargs)
        if not g.shape:
            temp = g * jac  # numpy treats 0d arrays as scalars, hence @ cannot be used
        else:
            temp = g @ jac

        # restore the nested structure of the input args
        temp = unflatten(temp.flat, args)
        return temp
Ejemplo n.º 23
0
            def gradient_product(g):
                """Vector-Jacobian product operator.

                Args:
                    g (array[float]): scalar or vector multiplying the Jacobian
                        from the left (output side)

                Returns:
                    nested Sequence[float]: vector-Jacobian product, arranged
                    into the nested structure of the input arguments in ``args``
                """
                # Jacobian matrix of the circuit
                self.set_trainable(args)
                jac = self.jacobian(args, kwargs)

                if not g.shape:
                    vjp = g * jac  # numpy treats 0d arrays as scalars, hence @ cannot be used
                else:
                    vjp = g @ jac

                # Restore the nested structure of the input args.
                vjp = unflatten(vjp.flat, args)
                return vjp
Ejemplo n.º 24
0
        def grad(grad_output):
            """Returns the vector-Jacobian product"""
            # evaluate the Jacobian matrix of the QNode
            jacobian = qnode.jacobian(args, **kwargs)

            grad_output_np = grad_output.numpy()

            # perform the vector-Jacobian product
            if not grad_output_np.shape:
                temp = grad_output_np * jacobian
            else:
                temp = grad_output_np.T @ jacobian

            # restore the nested structure of the input args
            grad_input = unflatten(temp.flat, args)

            if isinstance(grad_input, list):
                grad_input = [tf.convert_to_tensor(i) for i in grad_input]
            elif isinstance(grad_input, tuple):
                grad_input = tuple(tf.convert_to_tensor(i) for i in grad_input)
            else:
                grad_input = tf.convert_to_tensor(grad_input)

            return grad_input
Ejemplo n.º 25
0
    def test_unflatten_error_unsupported_model(self):
        """Tests that unflatten raises an error if the given model is not supported"""

        with pytest.raises(TypeError, match="Unsupported type in the model"):
            model = lambda x: x  # not a valid model for unflatten
            pu.unflatten(flat_dummy_array, model)
Ejemplo n.º 26
0
    def _make_variables(self, args, kwargs):
        """Create the :class:`~.variable.Variable` instances representing the QNode's arguments.

        The created :class:`~.variable.Variable` instances are given in the same nested structure
        as the original arguments. The :class:`~.variable.Variable` instances are named according
        to the argument names given in the QNode definition. Consider the following example:

        .. code-block:: python3

            @qml.qnode(dev)
            def qfunc(a, w):
                qml.Hadamard(0)
                qml.CRX(a, wires=[0, 1])
                qml.Rot(w[0], w[1], w[2], wires=[1])
                qml.CRX(-a, wires=[0, 1])

                return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))

        In this example, ``_make_variables`` will return the following :class:`~.variable.Variable` instances

        .. code-block:: python3

            >>> qfunc(3.4, [1.2, 3.4, 5.6])
            -0.031664133410566786
            >>> qfunc._make_variables([3.4, [1.2, 3.4, 5.6]], {})
            ["a", ["w[0]", "w[1]", "w[2]"]], {}

        where the Variable instances are replaced with their name for readability.

        Args:
            args (tuple[Any]): Positional arguments passed to the quantum function.
                During the construction we are not concerned with the numerical values, but with
                the nesting structure.
                Each positional argument is replaced with a :class:`~.variable.Variable` instance.
            kwargs (dict[str, Any]): Auxiliary arguments passed to the quantum function.
        """
        # Get the name of the qfunc's arguments
        full_argspec = inspect.getfullargspec(self.func)

        # args
        variable_name_strings = []
        for variable_name, variable_value in zip(full_argspec.args, args):
            variable_name_strings.append(
                self._determine_structured_variable_name(
                    variable_value, variable_name))

        # varargs
        len_diff = len(args) - len(full_argspec.args)
        if len_diff > 0:
            for idx, variable_value in enumerate(args[-len_diff:]):
                variable_name = "{}[{}]".format(full_argspec.varargs, idx)

                variable_name_strings.append(
                    self._determine_structured_variable_name(
                        variable_value, variable_name))

        arg_vars = [
            Variable(idx, name)
            for idx, name in enumerate(_flatten(variable_name_strings))
        ]
        self.num_variables = len(arg_vars)

        # Arrange the newly created Variables in the nested structure of args.
        # Make sure that NumPy scalars are converted into Python scalars.
        arg_vars = [
            i.item() if isinstance(i, np.ndarray) and i.ndim == 0 else i
            for i in unflatten(arg_vars, args)
        ]

        if self._trainable_args is not None and len(
                self._trainable_args) != len(args):
            # If some of the input arguments are marked as non-differentiable,
            # then replace the variable instances in arg_vars back with the
            # original objects.
            for a, _ in enumerate(args):
                if a not in self._trainable_args:
                    arg_vars[a] = args[a]

        # kwargs
        # if not mutable: must convert auxiliary arguments to named Variables so they can be updated without re-constructing the circuit
        # kwarg_vars = {}
        # for key, val in kwargs.items():
        #    temp = [Variable(idx, name=key) for idx, _ in enumerate(_flatten(val))]
        #    kwarg_vars[key] = unflatten(temp, val)

        variable_name_strings = {}
        kwarg_vars = {}
        for variable_name in full_argspec.kwonlyargs:
            if variable_name in kwargs:
                variable_value = kwargs[variable_name]
            else:
                variable_value = full_argspec.kwonlydefaults[variable_name]

            if isinstance(variable_value, np.ndarray):
                variable_name_string = np.empty_like(variable_value,
                                                     dtype=object)

                for index in np.ndindex(*variable_name_string.shape):
                    variable_name_string[index] = "{}[{}]".format(
                        variable_name, ",".join([str(i) for i in index]))

                kwarg_variable = [
                    Variable(idx, name=name, is_kwarg=True)
                    for idx, name in enumerate(_flatten(variable_name_string))
                ]
            else:
                kwarg_variable = Variable(0, name=variable_name, is_kwarg=True)

            kwarg_vars[variable_name] = kwarg_variable

        return arg_vars, kwarg_vars
Ejemplo n.º 27
0
    def evaluate(self, args, **kwargs):
        """Evaluates the quantum function on the specified device.

        Args:
            args (tuple): input parameters to the quantum function

        Returns:
            float, array[float]: output measured value(s)
        """
        if not self.ops or not self.cache:
            if self.num_variables is not None:
                # circuit construction has previously been called
                if len(list(_flatten(args))) == self.num_variables:
                    # only construct the circuit if the number
                    # of arguments matches the allowed number
                    # of variables.
                    # This avoids construction happening
                    # via self._pd_analytic, where temporary
                    # variables are appended to the argument list.

                    # flatten and unflatten arguments
                    flat_args = list(_flatten(args))
                    shaped_args = unflatten(flat_args, self.model)

                    # construct the circuit
                    self.construct(shaped_args, kwargs)
            else:
                # circuit has not yet been constructed
                # construct the circuit
                self.construct(args, kwargs)

        # temporarily store keyword arguments
        keyword_values = {}
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in self.keyword_defaults.items()})
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in kwargs.items()})

        # Try and insert kwargs-as-positional back into the kwargs dictionary.
        # NOTE: this works, but the creation of new, temporary arguments
        # by pd_analytic breaks this.
        # positional = []
        # kwargs_as_position = {}
        # for idx, v in enumerate(args):
        #     if idx not in self.keyword_positions:
        #     positional.append(v)
        #     else:
        #         kwargs_as_position[self.keyword_positions[idx]] = np.array(list(_flatten(v)))
        # keyword_values.update(kwargs_as_position)

        # temporarily store the free parameter values in the Variable class
        Variable.free_param_values = np.array(list(_flatten(args)))
        Variable.kwarg_values = keyword_values

        self.device.reset()

        # check that no wires are measured more than once
        m_wires = list(w for ex in self.ev for w in ex.wires)
        if len(m_wires) != len(set(m_wires)):
            raise QuantumFunctionError('Each wire in the quantum circuit can only be measured once.')

        def check_op(op):
            """Make sure only existing wires are referenced."""
            for w in op.wires:
                if w < 0 or w >= self.num_wires:
                    raise QuantumFunctionError("Operation {} applied to invalid wire {} "
                                               "on device with {} wires.".format(op.name, w, self.num_wires))

        # check every gate/preparation and ev measurement
        for op in self.ops:
            check_op(op)

        ret = self.device.execute(self.queue, self.ev)
        return self.output_type(ret)
Ejemplo n.º 28
0
    def construct(self, args, kwargs=None):
        """Constructs a representation of the quantum circuit.

        The user should never have to call this method.

        This method is called automatically the first time :meth:`QNode.evaluate`
        or :meth:`QNode.jacobian` is called. It executes the quantum function,
        stores the resulting sequence of :class:`~.operation.Operation` instances,
        and creates the variable mapping.

        Args:
            args (tuple): Represent the free parameters passed to the circuit.
                Here we are not concerned with their values, but with their structure.
                Each free param is replaced with a :class:`~.variable.Variable` instance.
            kwargs (dict): Additional keyword arguments may be passed to the quantum circuit function,
                however PennyLane does not support differentiating with respect to keyword arguments.
                Instead, keyword arguments are useful for providing data or 'placeholders'
                to the quantum circuit function.
        """
        # pylint: disable=too-many-branches,too-many-statements
        self.queue = []
        self.ev = []  # temporary queue for EVs

        if kwargs is None:
            kwargs = {}

        # flatten the args, replace each with a Variable instance with a unique index
        temp = [Variable(idx) for idx, val in enumerate(_flatten(args))]
        self.num_variables = len(temp)

        # store the nested shape of the arguments for later unflattening
        self.model = args

        # arrange the newly created Variables in the nested structure of args
        variables = unflatten(temp, args)

        # get default kwargs that weren't passed
        keyword_sig = _get_default_args(self.func)
        self.keyword_defaults = {k: v[1] for k, v in keyword_sig.items()}
        self.keyword_positions = {v[0]: k for k, v in keyword_sig.items()}

        keyword_values = {}
        keyword_values.update(self.keyword_defaults)
        keyword_values.update(kwargs)

        if self.cache:
            # caching mode, must use variables for kwargs
            # wrap each keyword argument as a Variable
            kwarg_variables = {}
            for key, val in keyword_values.items():
                temp = [Variable(idx, name=key) for idx, _ in enumerate(_flatten(val))]
                kwarg_variables[key] = unflatten(temp, val)

        Variable.free_param_values = np.array(list(_flatten(args)))
        Variable.kwarg_values = {k: np.array(list(_flatten(v))) for k, v in keyword_values.items()}

        # set up the context for Operation entry
        if QNode._current_context is None:
            QNode._current_context = self
        else:
            raise QuantumFunctionError('QNode._current_context must not be modified outside this method.')
        # generate the program queue by executing the quantum circuit function
        try:
            if self.cache:
                # caching mode, must use variables for kwargs
                # so they can be updated without reconstructing
                res = self.func(*variables, **kwarg_variables)
            else:
                # no caching, fine to directly pass kwarg values
                res = self.func(*variables, **keyword_values)
        finally:
            # remove the context
            QNode._current_context = None

        #----------------------------------------------------------
        # check the validity of the circuit

        # quantum circuit function return validation
        if isinstance(res, pennylane.operation.Observable):
            self.output_type = float
            self.output_dim = 1
            res = (res,)
        elif isinstance(res, Sequence) and res and all(isinstance(x, pennylane.operation.Observable) for x in res):
            # for multiple observables values, any valid Python sequence of observables
            # (i.e., lists, tuples, etc) are supported in the QNode return statement.
            self.output_dim = len(res)
            self.output_type = np.asarray
            res = tuple(res)
        else:
            raise QuantumFunctionError("A quantum function must return either a single measured observable "
                                       "or a nonempty sequence of measured observables.")

        # check that all returned observables have a return_type specified
        for x in res:
            if x.return_type is None:
                raise QuantumFunctionError("Observable '{}' does not have the measurement "
                                           "type specified.")

        # check that all ev's are returned, in the correct order
        if res != tuple(self.ev):
            raise QuantumFunctionError("All measured observables must be returned in the "
                                       "order they are measured.")

        self.ev = list(res)  #: list[Observable]: returned observables
        self.ops = self.queue + self.ev  #: list[Operation]: combined list of circuit operations

        # classify the circuit contents
        temp = [isinstance(op, pennylane.operation.CV) for op in self.ops if not isinstance(op, pennylane.ops.Identity)]
        if all(temp):
            self.type = 'CV'
        elif not True in temp:
            self.type = 'qubit'
        else:
            raise QuantumFunctionError("Continuous and discrete operations are not "
                                       "allowed in the same quantum circuit.")

        #----------------------------------------------------------

        # map each free variable to the operations which depend on it
        self.variable_ops = {}
        for k, op in enumerate(self.ops):
            for idx, p in enumerate(_flatten(op.params)):
                if isinstance(p, Variable):
                    if p.name is None: # ignore keyword arguments
                        self.variable_ops.setdefault(p.idx, []).append((k, idx))

        #: dict[int->str]: map from free parameter index to the gradient method to be used with that parameter
        self.grad_method_for_par = {k: self._best_method(k) for k in self.variable_ops}
Ejemplo n.º 29
0
def apply_grad(grad, x, stepsize, metric_tensor):
    grad_flat = np.array(list(_flatten(grad)))
    x_flat = np.array(list(_flatten(x)))
    x_new_flat = x_flat - stepsize * np.linalg.solve(metric_tensor, grad_flat)
    return unflatten(x_new_flat, x)
Ejemplo n.º 30
0
    def _construct(self, args, kwargs):
        """Construct the quantum circuit graph by calling the quantum function.

        For immutable nodes this method is called the first time :meth:`QNode.evaluate`
        or :meth:`QNode.jacobian` is called, and for mutable nodes *each time*
        they are called. It executes the quantum function,
        stores the resulting sequence of :class:`.Operator` instances,
        converts it into a circuit graph, and creates the Variable mapping.

        .. note::
           The Variables are only required for analytic differentiation,
           for evaluation we could simply reconstruct the circuit each time.

        Args:
            args (tuple[Any]): Positional arguments passed to the quantum function.
                During the construction we are not concerned with the numerical values, but with
                the nesting structure.
                Each positional argument is replaced with a :class:`~.variable.Variable` instance.
            kwargs (dict[str, Any]): Auxiliary arguments passed to the quantum function.

        Raises:
            QuantumFunctionError: if the :class:`pennylane.QNode`'s _current_context is attempted to be modified
                inside of this method, the quantum function returns incorrect values or if
                both continuous and discrete operations are specified in the same quantum circuit
        """
        # pylint: disable=attribute-defined-outside-init, too-many-branches

        # flatten the args, replace each argument with a Variable instance carrying a unique index
        arg_vars = [Variable(idx) for idx, _ in enumerate(_flatten(args))]
        self.num_variables = len(arg_vars)
        # arrange the newly created Variables in the nested structure of args
        arg_vars = unflatten(arg_vars, args)

        # temporary queues for operations and observables
        self.queue = []  #: list[Operation]: applied operations
        self.obs_queue = []  #: list[Observable]: applied observables

        # set up the context for Operator entry
        if qml._current_context is None:
            qml._current_context = self
        else:
            raise QuantumFunctionError(
                "qml._current_context must not be modified outside this method."
            )
        try:
            # generate the program queue by executing the quantum circuit function
            if self.mutable:
                # it's ok to directly pass auxiliary arguments since the circuit is re-constructed each time
                # (positional args must be replaced because parameter-shift differentiation requires Variables)
                res = self.func(*arg_vars, **kwargs)
            else:
                # must convert auxiliary arguments to named Variables so they can be updated without re-constructing the circuit
                kwarg_vars = {}
                for key, val in kwargs.items():
                    temp = [Variable(idx, name=key) for idx, _ in enumerate(_flatten(val))]
                    kwarg_vars[key] = unflatten(temp, val)

                res = self.func(*arg_vars, **kwarg_vars)
        finally:
            qml._current_context = None

        # check the validity of the circuit
        self._check_circuit(res)

        # map each free variable to the operators which depend on it
        self.variable_deps = {k: [] for k in range(self.num_variables)}
        for k, op in enumerate(self.ops):
            for j, p in enumerate(_flatten(op.params)):
                if isinstance(p, Variable):
                    if p.name is None:  # ignore auxiliary arguments
                        self.variable_deps[p.idx].append(ParameterDependency(op, j))

        # generate the DAG
        self.circuit = CircuitGraph(self.ops, self.variable_deps)

        # check for unused positional params
        if self.properties.get("par_check", False):
            unused = [k for k, v in self.variable_deps.items() if not v]
            if unused:
                raise QuantumFunctionError(
                    "The positional parameters {} are unused.".format(unused)
                )

        # check for operations that cannot affect the output
        if self.properties.get("vis_check", False):
            visible = self.circuit.ancestors(self.circuit.observables)
            invisible = set(self.circuit.operations) - visible
            if invisible:
                raise QuantumFunctionError(
                    "The operations {} cannot affect the output of the circuit.".format(invisible)
                )