Beispiel #1
0
    def test_single_step(self, qnode, param, nums_frequency, spectra,
                         substep_optimizer, substep_kwargs):
        """Test executing a single step of the RotosolveOptimizer on a QNode."""
        param = tuple(np.array(p, requires_grad=True) for p in param)
        opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)

        repack_param = len(param) == 1
        new_param_step = opt.step(
            qnode,
            *param,
            nums_frequency=nums_frequency,
            spectra=spectra,
        )
        if repack_param:
            new_param_step = (new_param_step, )

        assert (np.isscalar(new_param_step)
                and np.isscalar(param)) or len(new_param_step) == len(param)
        new_param_step_and_cost, old_cost = opt.step_and_cost(
            qnode,
            *param,
            nums_frequency=nums_frequency,
            spectra=spectra,
        )
        if repack_param:
            new_param_step_and_cost = (new_param_step_and_cost, )

        assert np.allclose(
            np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
        )
        assert np.isclose(qnode(*param), old_cost)
Beispiel #2
0
    def test_single_step(self, qnode, param, num_freq, optimizer, optimizer_kwargs):
        opt = RotosolveOptimizer()

        repack_param = len(param) == 1
        new_param_step = opt.step(
            qnode,
            *param,
            num_freqs=num_freq,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        if repack_param:
            new_param_step = (new_param_step,)

        assert (np.isscalar(new_param_step) and np.isscalar(param)) or len(new_param_step) == len(
            param
        )
        new_param_step_and_cost, old_cost = opt.step_and_cost(
            qnode,
            *param,
            num_freqs=num_freq,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        if repack_param:
            new_param_step_and_cost = (new_param_step_and_cost,)

        assert np.allclose(
            np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
        )
        assert np.isclose(qnode(*param), old_cost)
Beispiel #3
0
    def apply_grad(self, grad, args):
        r"""Update the variables to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (tuple [array]): the gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            args (tuple): the current value of the variables :math:`x^{(t)}`

        Returns:
            list [array]: the new values :math:`x^{(t+1)}`
        """
        args_new = list(args)

        trained_index = 0
        for index, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                x_flat = _flatten(arg)
                grad_flat = _flatten(grad[trained_index])
                trained_index += 1

                x_new_flat = [e - self._stepsize * g for g, e in zip(grad_flat, x_flat)]

                args_new[index] = unflatten(x_new_flat, args[index])

                if isinstance(arg, ndarray):
                    # Due to a bug in unflatten, input PennyLane tensors
                    # are being unwrapped. Here, we cast them back to PennyLane
                    # tensors.
                    # TODO: remove when the following is fixed:
                    # https://github.com/PennyLaneAI/pennylane/issues/966
                    args_new[index] = args_new[index].view(tensor)
                    args_new[index].requires_grad = True

        return args_new
Beispiel #4
0
    def evaluate_obs(self, obs, args, **kwargs):
        """Evaluate the value of the given observables.

        Assumes :meth:`construct` has already been called.

        Args:
            obs  (Iterable[Observable]): observables to measure
            args (array[float]): circuit input parameters

        Returns:
            array[float]: measured values
        """
        # temporarily store keyword arguments
        keyword_values = {}
        keyword_values.update({
            k: np.array(list(_flatten(v)))
            for k, v in self.keyword_defaults.items()
        })
        keyword_values.update(
            {k: np.array(list(_flatten(v)))
             for k, v in kwargs.items()})

        # temporarily store the free parameter values in the Variable class
        Variable.free_param_values = args
        Variable.kwarg_values = keyword_values

        self.device.reset()
        ret = self.device.execute(self.circuit.operations, obs,
                                  self.circuit.variable_deps)
        return ret
Beispiel #5
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        grad_flat = _flatten(grad)
        x_flat = _flatten(x)

        if self.accumulation is None:
            self.accumulation = [self._stepsize * g for g in grad_flat]
        else:
            self.accumulation = [
                self.momentum * a + self._stepsize * g
                for a, g in zip(self.accumulation, grad_flat)
            ]

        x_new_flat = [e - a for a, e in zip(self.accumulation, x_flat)]

        return unflatten(x_new_flat, x)
Beispiel #6
0
    def test_single_step_convergence(self, fun, x_min, param, nums_freq,
                                     exp_num_calls, substep_optimizer,
                                     substep_kwargs):
        """Tests convergence for easy classical functions in a single Rotosolve step.
        Includes testing of the parameter output shape and the old cost when using step_and_cost."""
        opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)

        # Make only the first argument trainable
        param = (np.array(param[0], requires_grad=True), ) + param[1:]
        # Only one argument is marked as trainable -> All other arguments have to stay fixed
        new_param_step = opt.step(
            fun,
            *param,
            nums_frequency=nums_freq,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step = (new_param_step, )

        assert all(
            np.allclose(p, new_p)
            for p, new_p in zip(param[1:], new_param_step[1:]))

        # With trainable parameters, training should happen
        param = tuple(np.array(p, requires_grad=True) for p in param)
        new_param_step = opt.step(
            fun,
            *param,
            nums_frequency=nums_freq,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step = (new_param_step, )

        assert len(x_min) == len(new_param_step)
        assert np.allclose(
            np.fromiter(_flatten(x_min), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
            atol=1e-5,
        )

        # Now with step_and_cost and trainable params
        new_param_step_and_cost, old_cost = opt.step_and_cost(
            fun,
            *param,
            nums_frequency=nums_freq,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step_and_cost = (new_param_step_and_cost, )

        assert len(x_min) == len(new_param_step_and_cost)
        assert np.allclose(
            np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
            atol=1e-5,
        )
        assert np.isclose(old_cost, fun(*param))
Beispiel #7
0
def successive_params(par1, par2):
    """Return a list of parameter configurations, successively walking from
    par1 to par2 coordinate-wise."""
    par1_flat = np.fromiter(_flatten(par1), dtype=float)
    par2_flat = np.fromiter(_flatten(par2), dtype=float)
    walking_param = []
    for i in range(len(par1_flat) + 1):
        walking_param.append(unflatten(np.append(par2_flat[:i], par1_flat[i:]), par1))
    return walking_param
Beispiel #8
0
    def compute_grad(self, objective_fn, args, kwargs, grad_fn=None):
        r"""Compute gradient of the objective function at at the shifted point :math:`(x -
        m\times\text{accumulation})` and return it along with the objective function forward pass
        (if available).

        Args:
            objective_fn (function): the objective function for optimization.
            args (tuple): tuple of NumPy arrays containing the current values for the
                objection function.
            kwargs (dict): keyword arguments for the objective function.
            grad_fn (function): optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.
                Must return the same shape of tuple [array] as the autograd derivative.

        Returns:
            tuple [array]: the NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
            objective function output. If ``grad_fn`` is provided, the objective function
            will not be evaluted and instead ``None`` will be returned.
        """
        shifted_args = list(args)

        trainable_args = []
        for arg in args:
            if getattr(arg, "requires_grad", True):
                trainable_args.append(arg)

        if self.accumulation:
            for index, arg in enumerate(trainable_args):
                if self.accumulation[index]:
                    x_flat = _flatten(arg)
                    acc = _flatten(self.accumulation[index])

                    shifted_x_flat = [
                        e - self.momentum * a for a, e in zip(acc, x_flat)
                    ]

                    shifted_args[index] = unflatten(shifted_x_flat, arg)

                    if isinstance(shifted_args[index], ndarray):
                        # Due to a bug in unflatten, input PennyLane tensors
                        # are being unwrapped. Here, we cast them back to PennyLane
                        # tensors.
                        # TODO: remove when the following is fixed:
                        # https://github.com/PennyLaneAI/pennylane/issues/966
                        shifted_args[index] = shifted_args[index].view(tensor)
                        shifted_args[index].requires_grad = True

        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(*shifted_args, **kwargs)
        forward = getattr(g, "forward", None)

        if len(trainable_args) == 1:
            grad = (grad, )

        return grad, forward
Beispiel #9
0
    def apply_grad(self, grad, args):
        r"""Update the variables args to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (tuple[array]): the gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            args (tuple): the current value of the variables :math:`x^{(t)}`

        Returns:
            list: the new values :math:`x^{(t+1)}`
        """
        args_new = list(args)

        if self.accumulation is None:
            self.accumulation = {
                "fm": [None] * len(args),
                "sm": [None] * len(args),
                "t": 0
            }

        self.accumulation["t"] += 1

        # Update step size (instead of correcting for bias)
        new_stepsize = (self.stepsize *
                        math.sqrt(1 - self.beta2**self.accumulation["t"]) /
                        (1 - self.beta1**self.accumulation["t"]))

        trained_index = 0
        for index, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                x_flat = _flatten(arg)
                grad_flat = list(_flatten(grad[trained_index]))
                trained_index += 1

                self._update_moments(index, grad_flat)

                x_new_flat = [
                    e - new_stepsize * f / (math.sqrt(s) + self.eps)
                    for f, s, e in zip(self.accumulation["fm"][index],
                                       self.accumulation["sm"][index], x_flat)
                ]
                args_new[index] = unflatten(x_new_flat, arg)

                if isinstance(arg, ndarray):
                    # Due to a bug in unflatten, input PennyLane tensors
                    # are being unwrapped. Here, we cast them back to PennyLane
                    # tensors.
                    # TODO: remove when the following is fixed:
                    # https://github.com/PennyLaneAI/pennylane/issues/966
                    args_new[index] = args_new[index].view(tensor)
                    args_new[index].requires_grad = True

        return args_new
Beispiel #10
0
    def evaluate(self, args, **kwargs):
        """Evaluates the quantum function on the specified device.

        Args:
            args (tuple): input parameters to the quantum function

        Returns:
            float, array[float]: output expectation value(s)
        """
        if not self.ops:
            # construct the circuit
            self.construct(args, **kwargs)

        # temporarily store keyword arguments
        keyword_values = {}
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in self.keyword_defaults.items()})
        keyword_values.update({k: np.array(list(_flatten(v))) for k, v in kwargs.items()})

        # Try and insert kwargs-as-positional back into the kwargs dictionary.
        # NOTE: this works, but the creation of new, temporary arguments
        # by pd_analytic breaks this.
        # positional = []
        # kwargs_as_position = {}
        # for idx, v in enumerate(args):
        #     if idx not in self.keyword_positions:
        #     positional.append(v)
        #     else:
        #         kwargs_as_position[self.keyword_positions[idx]] = np.array(list(_flatten(v)))
        # keyword_values.update(kwargs_as_position)

        # temporarily store the free parameter values in the Variable class
        Variable.free_param_values = np.array(list(_flatten(args)))
        Variable.kwarg_values = keyword_values

        self.device.reset()

        # check that no wires are measured more than once
        m_wires = list(w for ex in self.ev for w in ex.wires)
        if len(m_wires) != len(set(m_wires)):
            raise QuantumFunctionError('Each wire in the quantum circuit can only be measured once.')

        def check_op(op):
            """Make sure only existing wires are referenced."""
            for w in op.wires:
                if w < 0 or w >= self.num_wires:
                    raise QuantumFunctionError("Operation {} applied to invalid wire {} "
                                               "on device with {} wires.".format(op.name, w, self.num_wires))

        # check every gate/preparation and ev measurement
        for op in self.ops:
            check_op(op)

        ret = self.device.execute(self.queue, self.ev)
        return self.output_type(ret)
Beispiel #11
0
    def _set_variables(self, args, kwargs):
        """Store the current values of the quantum function parameters in the Variable class
        so the Operators may access them.

        Args:
            args (tuple[Any]): positional (differentiable) arguments
            kwargs (dict[str, Any]): auxiliary arguments
        """
        Variable.free_param_values = np.array(list(_flatten(args)))
        if not self.mutable:
            # only immutable circuits access auxiliary arguments through Variables
            Variable.kwarg_values = {k: np.array(list(_flatten(v))) for k, v in kwargs.items()}
Beispiel #12
0
    def step(self, objective_fn, x, generators, **kwargs):
        r"""Update trainable arguments with one step of the optimizer.

        Args:
            objective_fn (function): The objective function for optimization. It must have the
                signature ``objective_fn(x, generators=None)`` with a sequence of the values ``x``
                and a list of the gates ``generators`` as inputs, returning a single value.
            x (Union[Sequence[float], float]): sequence containing the initial values of the
                variables to be optimized over or a single float with the initial value
            generators (list[~.Operation]): list containing the initial ``pennylane.ops.qubit``
                operators to be used in the circuit and optimized over
            **kwargs : variable length of keyword arguments for the objective function.

        Returns:
            array: The new variable values :math:`x^{(t+1)}` as well as the new generators.
        """
        x_flat = np.fromiter(_flatten(x), dtype=float)
        # wrap the objective function so that it accepts the flattened parameter array
        objective_fn_flat = lambda x_flat, gen: objective_fn(
            unflatten(x_flat, x), generators=gen, **kwargs)

        try:
            assert len(x_flat) == len(generators)
        except AssertionError as e:
            raise ValueError(
                "Number of parameters {} must be equal to the number of generators."
                .format(x)) from e

        for d, _ in enumerate(x_flat):
            x_flat[d], generators[d] = self._find_optimal_generators(
                objective_fn_flat, x_flat, generators, d)

        return unflatten(x_flat, x), generators
Beispiel #13
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """
        grad_flat = np.array(list(_flatten(grad)))
        x_flat = np.array(list(_flatten(x)))
        x_new_flat = x_flat - self._stepsize * self.metric_tensor_inv @ grad_flat
        return unflatten(x_new_flat, x)
    def compute_grad(self, objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at at the shifted point :math:`(x -
        m\times\text{accumulation})` and return it along with the objective function
        forward pass (if available).

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the objective function with respect to
                the variables ``x``. If ``None``, the gradient function is computed automatically.

        Returns:
            tuple: The NumPy array containing the gradient :math:`\nabla f(x^{(t)})` and the
                objective function output. If ``grad_fn`` is provided, the objective function
                will not be evaluted and instead ``None`` will be returned.
        """

        x_flat = _flatten(x)

        if self.accumulation is None:
            shifted_x_flat = list(x_flat)
        else:
            shifted_x_flat = [
                e - self.momentum * a
                for a, e in zip(self.accumulation, x_flat)
            ]

        shifted_x = unflatten(shifted_x_flat, x)

        g = get_gradient(objective_fn) if grad_fn is None else grad_fn
        grad = g(shifted_x)
        forward = getattr(g, "forward", None)

        return grad, forward
Beispiel #15
0
    def apply_grad(self, grad, args):
        r"""Update the parameter array :math:`x` for a single optimization step. Flattens and
        unflattens the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            args (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """
        grad_flat = np.array(list(_flatten(grad)))
        x_flat = np.array(list(_flatten(args)))
        x_new_flat = x_flat - self.stepsize * np.linalg.solve(self.metric_tensor, grad_flat)
        return unflatten(x_new_flat, args)
Beispiel #16
0
    def _append_operator(self, operator):
        if operator.num_wires == ActsOn.AllWires:
            if set(operator.wires) != set(range(self.num_wires)):
                raise QuantumFunctionError(
                    "Operator {} must act on all wires".format(operator.name)
                )

        # Make sure only existing wires are used.
        for w in _flatten(operator.wires):
            if w < 0 or w >= self.num_wires:
                raise QuantumFunctionError(
                    "Operation {} applied to invalid wire {} "
                    "on device with {} wires.".format(operator.name, w, self.num_wires)
                )

        # observables go to their own, temporary queue
        if isinstance(operator, Observable):
            if operator.return_type is None:
                self.queue.append(operator)
            else:
                self.obs_queue.append(operator)
        else:
            if self.obs_queue:
                raise QuantumFunctionError(
                    "State preparations and gates must precede measured observables."
                )
            self.queue.append(operator)
Beispiel #17
0
    def test_number_of_function_calls(
        self, fun, x_min, param, num_freq, optimizer, optimizer_kwargs
    ):
        """Tests that per parameter 2R+1 function calls are used for an update step."""
        global num_calls
        num_calls = 0

        def _fun(*args, **kwargs):
            global num_calls
            num_calls += 1
            return fun(*args, **kwargs)

        opt = RotosolveOptimizer()
        new_param = opt.step(
            _fun,
            *param,
            num_freqs=num_freq,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )

        expected_num_calls = np.sum(
            np.fromiter(_flatten(expand_num_freq(num_freq, param)), dtype=int) * 2 + 1
        )
        assert num_calls == expected_num_calls
    def compute_grad(self, objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at at
        the shifted point :math:`(x - m\times\text{accumulation})`.

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the
                objective function with respect to the variables ``x``.
                If ``None``, the gradient function is computed automatically.

        Returns:
            array: NumPy array containing the gradient :math:`\nabla f(x^{(t)})`
        """

        x_flat = _flatten(x)

        if self.accumulation is None:
            shifted_x_flat = list(x_flat)
        else:
            shifted_x_flat = [
                e - self.momentum * a
                for a, e in zip(self.accumulation, x_flat)
            ]

        shifted_x = unflatten(shifted_x_flat, x)

        if grad_fn is not None:
            g = grad_fn(shifted_x)  # just call the supplied grad function
        else:
            # default is autograd
            g = autograd.grad(objective_fn)(shifted_x)  # pylint: disable=no-value-for-parameter
        return g
Beispiel #19
0
    def _append_op(self, op):
        """Append a quantum operation into the circuit queue.

        Args:
            op (:class:`~.operation.Operation`): quantum operation to be added to the circuit

        Raises:
            ValueError: if `op` does not act on all wires
            QuantumFunctionError: if state preparations and gates do not precede measured observables
        """
        if op.num_wires == Wires.All:
            if set(op.wires) != set(range(self.num_wires)):
                raise QuantumFunctionError("Operator {} must act on all wires".format(op.name))

        # Make sure only existing wires are used.
        for w in _flatten(op.wires):
            if w < 0 or w >= self.num_wires:
                raise QuantumFunctionError(
                    "Operation {} applied to invalid wire {} "
                    "on device with {} wires.".format(op.name, w, self.num_wires)
                )

        # observables go to their own, temporary queue
        if isinstance(op, Observable):
            if op.return_type is None:
                self.queue.append(op)
            else:
                self.obs_queue.append(op)
        else:
            if self.obs_queue:
                raise QuantumFunctionError(
                    "State preparations and gates must precede measured observables."
                )
            self.queue.append(op)  # TODO rename self.queue to self.op_queue
Beispiel #20
0
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        self.t += 1

        grad_flat = list(_flatten(grad))
        x_flat = _flatten(x)

        # Update first moment
        if self.fm is None:
            self.fm = grad_flat
        else:
            self.fm = [
                self.beta1 * f + (1 - self.beta1) * g
                for f, g in zip(self.fm, grad_flat)
            ]

        # Update second moment
        if self.sm is None:
            self.sm = [g * g for g in grad_flat]
        else:
            self.sm = [
                self.beta2 * f + (1 - self.beta2) * g * g
                for f, g in zip(self.sm, grad_flat)
            ]

        # Update step size (instead of correcting for bias)
        new_stepsize = (self._stepsize * np.sqrt(1 - self.beta2**self.t) /
                        (1 - self.beta1**self.t))

        x_new_flat = [
            e - new_stepsize * f / (np.sqrt(s) + self.eps)
            for f, s, e in zip(self.fm, self.sm, x_flat)
        ]

        return unflatten(x_new_flat, x)
Beispiel #21
0
 def check_op(op):
     """Make sure only existing wires are referenced."""
     for w in _flatten(op.wires):
         if w < 0 or w >= self.num_wires:
             raise QuantumFunctionError(
                 "Operation {} applied to invalid wire {} "
                 "on device with {} wires.".format(
                     op.name, w, self.num_wires))
Beispiel #22
0
    def test_flatten(self, shape):
        """Tests that _flatten successfully flattens multidimensional arrays."""

        reshaped = np.reshape(flat_dummy_array, shape)
        flattened = np.array([x for x in pu._flatten(reshaped)])

        assert flattened.shape == flat_dummy_array.shape
        assert np.array_equal(flattened, flat_dummy_array)
Beispiel #23
0
    def test_nested_and_flat_returns_same_update(self):
        """Tests that gradient descent optimizer has the same output for
         nested and flat lists."""
        self.logTestName()

        nested = self.sgd_opt.step(self.hybrid_fun_nested, self.nested_list)
        flat = self.sgd_opt.step(self.hybrid_fun_flat, self.flat_list)

        self.assertAllAlmostEqual(flat, list(_flatten(nested)), delta=self.tol)
    def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        x_flat = _flatten(x)
        grad_flat = _flatten(grad)

        x_new_flat = [e - self.stepsize * g for g, e in zip(grad_flat, x_flat)]

        return unflatten(x_new_flat, x)
Beispiel #25
0
    def test_single_step_convergence(
        self, fun, x_min, param, num_freq, optimizer, optimizer_kwargs
    ):
        """Tests convergence for easy classical functions in a single Rotosolve step.
        Includes testing of the parameter output shape and the old cost when using step_and_cost."""
        opt = RotosolveOptimizer()

        new_param_step = opt.step(
            fun,
            *param,
            num_freqs=num_freq,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step = (new_param_step,)

        assert len(x_min) == len(new_param_step)
        assert np.allclose(
            np.fromiter(_flatten(x_min), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
            atol=1e-5,
        )

        new_param_step_and_cost, old_cost = opt.step_and_cost(
            fun,
            *param,
            num_freqs=num_freq,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step_and_cost = (new_param_step_and_cost,)

        assert len(x_min) == len(new_param_step_and_cost)
        assert np.allclose(
            np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
            atol=1e-5,
        )
        assert np.isclose(old_cost, fun(*param))
Beispiel #26
0
    def test_single_step(self, fun, x_min, param, num_freq):
        """Tests convergence for easy classical functions in a single Rotosolve step
        with some arguments deactivated for training.
        Includes testing of the parameter output shape and the old cost when using step_and_cost."""
        substep_optimizer = "brute"
        substep_kwargs = None
        opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)

        new_param_step = opt.step(
            fun,
            *param,
            nums_frequency=num_freq,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step = (new_param_step, )

        assert len(x_min) == len(new_param_step)
        assert np.allclose(
            np.fromiter(_flatten(x_min), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
            atol=1e-5,
        )

        new_param_step_and_cost, old_cost = opt.step_and_cost(
            fun,
            *param,
            nums_frequency=num_freq,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(param) == 1:
            new_param_step_and_cost = (new_param_step_and_cost, )

        assert len(x_min) == len(new_param_step_and_cost)
        assert np.allclose(
            np.fromiter(_flatten(new_param_step_and_cost), dtype=float),
            np.fromiter(_flatten(new_param_step), dtype=float),
            atol=1e-5,
        )
        assert np.isclose(old_cost, fun(*param))
    def test_nested_and_flat_returns_same_update(self, bunch, tol):
        """Tests that gradient descent optimizer has the same output for
         nested and flat lists."""
        def hybrid_fun_flat(var):
            return quant_fun_flat(var) + var[4]

        def hybrid_fun_nested(var):
            return quant_fun_nested(var) + var[2]

        nested = bunch.sgd_opt.step(hybrid_fun_nested, nested_list)
        flat = bunch.sgd_opt.step(hybrid_fun_flat, flat_list)

        assert flat == pytest.approx(list(_flatten(nested)), abs=tol)
Beispiel #28
0
    def step(self, objective_fn, *args, **kwargs):
        r"""Update args with one step of the optimizer.

        Args:
            objective_fn (function): the objective function for optimization. It should take a
                sequence of the values ``*args`` and a list of the gates ``generators`` as inputs, and
                return a single value.
            *args : variable length sequence containing the initial
                values of the variables to be optimized over or a single float with the initial
                value.
            **kwargs : variable length keyword arguments for the objective function.

        Returns:
            list [array]: the new variable values :math:`x^{(t+1)}`.
            If single arg is provided, list [array] is replaced by array.
        """
        # will single out one variable to change at a time
        # these hold the arguments not getting updated
        before_args = []
        after_args = list(args)

        # mutable version of args to get updated
        args_new = list(args)

        for index, arg in enumerate(args):
            # removing current arg from after_args
            del after_args[0]

            if getattr(arg, "requires_grad", True):
                x_flat = np.fromiter(_flatten(arg), dtype=float)

                # version of objective function that depends on a flattened version of
                # just the one argument.  All others held constant.
                objective_fn_flat = lambda x_flat, arg_kw=arg: objective_fn(
                    *before_args, unflatten(x_flat, arg_kw), *after_args, **
                    kwargs)

                # updating each parameter in current arg
                for d, _ in enumerate(x_flat):
                    x_flat = self._rotosolve(objective_fn_flat, x_flat, d)

                args_new[index] = unflatten(x_flat, arg)

            # updating before_args for next loop
            before_args.append(args_new[index])

        # unwrap arguments if only one, backward compatible and cleaner
        if len(args_new) == 1:
            return args_new[0]
        return args_new
Beispiel #29
0
def test_multiple_steps(fun, x_min, param, num_freq):
    """Tests that repeated steps execute as expected."""
    param = tuple(np.array(p, requires_grad=True) for p in param)
    substep_optimizer = "brute"
    substep_kwargs = None
    opt = RotosolveOptimizer(substep_optimizer, substep_kwargs)

    for _ in range(3):
        param = opt.step(
            fun,
            *param,
            nums_frequency=num_freq,
        )
        # The following accounts for the unpacking functionality for length-one param
        if len(x_min) == 1:
            param = (param, )

    assert (np.isscalar(x_min)
            and np.isscalar(param)) or len(x_min) == len(param)
    assert np.allclose(
        np.fromiter(_flatten(x_min), dtype=float),
        np.fromiter(_flatten(param), dtype=float),
        atol=1e-5,
    )
Beispiel #30
0
def test_multiple_steps(fun, x_min, param, num_freq):
    """Tests that repeated steps execute as expected."""
    opt = RotosolveOptimizer()

    optimizer = "brute"
    optimizer_kwargs = None
    for _ in range(3):
        param = opt.step(
            fun,
            *param,
            num_freqs=num_freq,
            optimizer=optimizer,
            optimizer_kwargs=optimizer_kwargs,
        )
        # The following accounts for the unpacking functionality for length-1 param
        if len(x_min) == 1:
            param = (param,)

    assert (np.isscalar(x_min) and np.isscalar(param)) or len(x_min) == len(param)
    assert np.allclose(
        np.fromiter(_flatten(x_min), dtype=float),
        np.fromiter(_flatten(param), dtype=float),
        atol=1e-5,
    )