def minimize(
        self, cost_function: CallableWithGradient, initial_params: np.ndarray
    ) -> OptimizeResult:
        """Minimize using the Covariance Matrix Adaptation Evolution Strategy
        (CMA-ES).

        Args:
            cost_function: object representing cost function we want to minimize
            initial_params: initial guess for the ansatz parameters.

        Returns:
            tuple: A tuple containing an optimization results dict and a numpy array
                with the optimized parameters.
        """

        # Optimization Results Object
        cost_function = recorder(cost_function)

        strategy = cma.CMAEvolutionStrategy(initial_params, self.sigma_0, self.options)
        result = strategy.optimize(cost_function).result

        return optimization_result(
            opt_value=result.fbest,
            opt_params=result.xbest,
            history=cost_function.history,
            nfev=result.evaluations,
            nit=result.iterations,
            cma_xfavorite=list(result.xfavorite),
        )
    def _minimize(
        self,
        cost_function: CallableWithGradient,
        initial_params: Optional[np.ndarray] = None,
        keep_history: bool = False,
    ) -> OptimizeResult:
        """
        Finds the parameters which minimize given cost function, by trying all the parameters from the provided list of points.

        Args:
            cost_function: object representing cost function we want to minimize
            inital_params: initial parameters for the cost function
            keep_history: flag indicating whether history of cost function
                evaluations should be recorded.

        """
        assert hasattr(cost_function, "gradient")

        current_parameters = copy.deepcopy(initial_params)
        for _ in range(self.number_of_iterations):
            gradients = cost_function.gradient(current_parameters)
            current_parameters = current_parameters - (self.learning_rate *
                                                       gradients)

        final_value = cost_function(current_parameters)

        return optimization_result(
            opt_value=final_value,
            opt_params=current_parameters,
            nit=self.number_of_iterations,
            nfev=None,
            **construct_history_info(cost_function, keep_history),
        )
Beispiel #3
0
    def _minimize(
        self,
        cost_function: CallableWithGradient,
        initial_params: np.ndarray,
        keep_history: bool = False,
    ) -> OptimizeResult:
        """Minimize using the Covariance Matrix Adaptation Evolution Strategy
        (CMA-ES).

        Note:
            Original CMA-ES implementation stores optimization history by default.
            This is a separate mechanism from the one controlled by recorder, and
            therefore is turned on even if keep_history is set to false, which might
            lead to memory issues in some extreme cases.
            However, we expose only the recording performed using provided recorder.

        Args:
            cost_function: object representing cost function we want to minimize
            initial_params: initial guess for the ansatz parameters.
            keep_history: flag indicating whether history of cost function
                evaluations should be recorded.
        """
        strategy = cma.CMAEvolutionStrategy(initial_params, self.sigma_0, self.options)
        result = strategy.optimize(cost_function).result

        return optimization_result(
            opt_value=result.fbest,
            opt_params=result.xbest,
            nfev=result.evaluations,
            nit=result.iterations,
            cma_xfavorite=list(result.xfavorite),
            **construct_history_info(cost_function, keep_history)
        )
def test_optimization_result_contains_opt_value_and_opt_params():
    opt_value = 2.0
    opt_params = [-1, 0, 3.2]

    result = optimization_result(opt_value=opt_value, opt_params=opt_params)

    assert result.opt_value == opt_value
    assert result.opt_params == opt_params
def test_optimization_result_contains_other_attributes_passed_as_kwargs():
    opt_value = 0.0
    opt_params = [1, 2, 3]
    kwargs = {"bitstring": "01010", "foo": 3.0}

    result = optimization_result(opt_value=opt_value,
                                 opt_params=opt_params,
                                 **kwargs)

    assert all(getattr(result, key) == value for key, value in kwargs.items())
Beispiel #6
0
    def minimize(self, cost_function, initial_params=None):
        """
        Minimizes given cost function using optimizers from Qiskit Aqua.

        Args:
            cost_function(): python method which takes numpy.ndarray as input
            initial_params(np.ndarray): initial parameters to be used for optimization

        Returns:
            optimization_results(scipy.optimize.OptimizeResults): results of the optimization.
        """
        history = []

        if self.method == "SPSA":
            optimizer = SPSA(**self.options)
        elif self.method == "ADAM" or self.method == "AMSGRAD":
            if self.method == "AMSGRAD":
                self.options["amsgrad"] = True
            optimizer = ADAM(**self.options)

        number_of_variables = len(initial_params)

        if self.keep_value_history:
            cost_function_wrapper = recorder(cost_function)
        else:
            cost_function_wrapper = _CostFunctionWrapper(cost_function)

        gradient_function = None
        if hasattr(cost_function, "gradient") and callable(
                getattr(cost_function, "gradient")):
            gradient_function = cost_function.gradient

        solution, value, nit = optimizer.optimize(
            num_vars=number_of_variables,
            objective_function=cost_function_wrapper,
            initial_point=initial_params,
            gradient_function=gradient_function,
        )

        if self.keep_value_history:
            nfev = len(cost_function_wrapper.history)
            history = cost_function_wrapper.history
        else:
            nfev = cost_function_wrapper.number_of_calls
            history = []

        return optimization_result(
            opt_value=value,
            opt_params=solution,
            nit=nit,
            history=history,
            nfev=nfev,
        )
Beispiel #7
0
    def _minimize(
        self,
        cost_function: CallableWithGradient,
        initial_params: np.ndarray = None,
        keep_history: bool = False,
    ):
        """
        Minimizes given cost function using functions from scipy.optimize.basinhopping.

        Args:
            cost_function(): python method which takes numpy.ndarray as input
            initial_params(np.ndarray): initial parameters to be used for optimization
            keep_history: flag indicating whether history of cost function
                evaluations should be recorded.
        """
        jacobian = None
        if hasattr(cost_function, "gradient") and callable(
                getattr(cost_function, "gradient")):
            jacobian = cost_function.gradient
        if (self.minimizer_kwargs is not None
                and self.minimizer_kwargs.get("options", None) is not None):
            self.minimizer_kwargs["options"]["jacobian"] = jacobian

        result = scipy.optimize.basinhopping(
            cost_function,
            initial_params,
            niter=self.niter,
            T=self.T,
            stepsize=self.stepsize,
            minimizer_kwargs=self.minimizer_kwargs,
            take_step=self.take_step,
            accept_test=self.accept_test,
            interval=self.interval,
            disp=self.disp,
            niter_success=self.niter_success,
        )

        opt_value = result.fun
        opt_params = result.x

        nit = result.get("nit", None)
        nfev = result.get("nfev", None)

        return optimization_result(opt_value=opt_value,
                                   opt_params=opt_params,
                                   nit=nit,
                                   nfev=nfev,
                                   **construct_history_info(
                                       cost_function, keep_history))
Beispiel #8
0
    def _minimize(
        self,
        cost_function: CallableWithGradient,
        initial_params: np.ndarray = None,
        keep_history: bool = False,
    ):
        """
        Minimizes given cost function using optimizers from Qiskit Aqua.

        Args:
            cost_function: python method which takes numpy.ndarray as input
            initial_params(np.ndarray): initial parameters to be used for optimization

        Returns:
            optimization_results(scipy.optimize.OptimizeResults): results of the optimization.
        """
        history = []

        number_of_variables = len(initial_params)

        gradient_function = None
        if hasattr(cost_function, "gradient") and callable(
            getattr(cost_function, "gradient")
        ):
            gradient_function = cost_function.gradient

        solution, value, nfev = self.optimizer.optimize(
            num_vars=number_of_variables,
            objective_function=cost_function,
            initial_point=initial_params,
            gradient_function=gradient_function,
        )

        if self.method == "ADAM" or self.method == "AMSGRAD":
            nit = self.optimizer._t
        else:
            nit = self.optimizer.maxiter

        return optimization_result(
            opt_value=value,
            opt_params=solution,
            nit=nit,
            nfev=nfev,
            **construct_history_info(cost_function, keep_history)
        )
Beispiel #9
0
    def minimize(self, cost_function, initial_params=None, callback=None):
        """
        Minimizes given cost function using functions from scipy.minimize.

        Args:
            cost_function(): python method which takes numpy.ndarray as input
            initial_params(np.ndarray): initial parameters to be used for optimization
            callback(): callback function. If none is provided, a default one will be used.

        Returns:
            optimization_results(scipy.optimize.OptimizeResults): results of the optimization.
        """

        if self.keep_value_history:
            cost_function = recorder(cost_function)

        jacobian = None
        if hasattr(cost_function, "gradient") and callable(
                getattr(cost_function, "gradient")):
            jacobian = cost_function.gradient

        result = scipy.optimize.minimize(
            cost_function,
            initial_params,
            method=self.method,
            options=self.options,
            constraints=self.constraints,
            jac=jacobian,
        )
        opt_value = result.fun
        opt_params = result.x

        nit = result.get("nit", None)
        nfev = result.get("nfev", None)

        return optimization_result(
            opt_value=opt_value,
            opt_params=opt_params,
            nit=nit,
            nfev=nfev,
            history=cost_function.history if self.keep_value_history else [],
        )
Beispiel #10
0
    def minimize(
            self,
            cost_function: CallableWithGradient,
            initial_params: Optional[np.ndarray] = None) -> OptimizeResult:
        """
        Finds the parameters which minimize given cost function, by trying all the parameters from the grid.

        Args:
            cost_function(zquantum.core.interfaces.cost_function.CostFunction): object representing cost function we want to minimize
            inital_params (np.ndarray): initial parameters for the cost function

        Returns:
            OptimizeResults
        """
        if initial_params is not None and len(initial_params) != 0:
            Warning(
                "Grid search doesn't use initial parameters, they will be ignored."
            )
        history = []
        min_value = None
        nfev = 0

        if self.keep_value_history:
            cost_function = recorder(cost_function)

        for params in self.grid.params_list:
            value = cost_function(params)
            nfev += 1
            if min_value is None or value < min_value:
                min_value = value
                optimal_params = params

        return optimization_result(
            opt_value=min_value,
            opt_params=optimal_params,
            nfev=nfev,
            nit=None,
            history=cost_function.history if self.keep_value_history else [])
Beispiel #11
0
    def _minimize(
        self,
        cost_function: CallableWithGradient,
        initial_params: Optional[np.ndarray] = None,
        keep_history: bool = False,
    ) -> OptimizeResult:
        """
        Finds the parameters which minimize given cost function, by trying all the parameters from the grid.

        Args:
            cost_function: object representing cost function we want to minimize
            inital_params: initial parameters for the cost function
            keep_history: flag indicating whether history of cost function
                evaluations should be recorded.

        """
        if initial_params is not None and len(initial_params) != 0:
            Warning(
                "Grid search doesn't use initial parameters, they will be ignored."
            )

        min_value = None
        nfev = 0

        for params in self.grid.params_list:
            value = cost_function(params)
            nfev += 1
            if min_value is None or value < min_value:
                min_value = value
                optimal_params = params

        return optimization_result(opt_value=min_value,
                                   opt_params=optimal_params,
                                   nfev=nfev,
                                   nit=None,
                                   **construct_history_info(
                                       cost_function, keep_history))
    def _recursive_minimize(
        self,
        cost_function_factory,
        initial_params,
        keep_history,
        cost_hamiltonian,
        qubit_map,
        nit,
        nfev,
        histories,
    ):
        """A method that recursively calls itself with each recursion reducing 1 term
        of the cost hamiltonian
        """

        # Set up QAOA circuit
        ansatz = copy(self._ansatz)

        ansatz.cost_hamiltonian = cost_hamiltonian

        cost_function = cost_function_factory(
            cost_hamiltonian,
            ansatz,
        )

        if keep_history:
            cost_function = self.recorder(cost_function)

        # Run & optimize QAOA
        opt_results = self.inner_optimizer.minimize(cost_function, initial_params)
        nit += opt_results.nit
        nfev += opt_results.nfev
        if keep_history:
            histories = extend_histories(cost_function, histories)

        # Reduce the cost hamiltonian
        (
            term_with_largest_expval,
            largest_expval,
        ) = _find_term_with_strongest_correlation(
            cost_hamiltonian,
            ansatz,
            opt_results.opt_params,
            cost_function_factory,
        )

        new_qubit_map = _update_qubit_map(
            qubit_map, term_with_largest_expval, largest_expval
        )

        reduced_cost_hamiltonian = _create_reduced_hamiltonian(
            cost_hamiltonian,
            term_with_largest_expval,
            largest_expval,
        )

        # Check new cost hamiltonian has correct amount of qubits
        assert (
            count_qubits(change_operator_type(reduced_cost_hamiltonian, QubitOperator))
            == count_qubits(change_operator_type(cost_hamiltonian, QubitOperator)) - 1
            # If we have 1 qubit, the reduced cost hamiltonian would be empty and say it has
            # 0 qubits.
            or count_qubits(
                change_operator_type(reduced_cost_hamiltonian, QubitOperator)
            )
            == 0
            and count_qubits(change_operator_type(cost_hamiltonian, QubitOperator)) == 2
            and self._n_c == 1
        )

        # Check qubit map has correct amount of qubits
        assert (
            count_qubits(change_operator_type(cost_hamiltonian, QubitOperator)) - 1
            == max([l[0] for l in new_qubit_map.values()]) + 1
        )

        if (
            count_qubits(change_operator_type(reduced_cost_hamiltonian, QubitOperator))
            > self._n_c
        ):
            # If we didn't reach threshold `n_c`, we repeat the the above with the reduced
            # cost hamiltonian.
            return self._recursive_minimize(
                cost_function_factory,
                initial_params,
                keep_history,
                cost_hamiltonian=reduced_cost_hamiltonian,
                qubit_map=new_qubit_map,
                nit=nit,
                nfev=nfev,
                histories=histories,
            )

        else:
            best_value, reduced_solutions = solve_problem_by_exhaustive_search(
                change_operator_type(reduced_cost_hamiltonian, QubitOperator)
            )

            solutions = _map_reduced_solutions_to_original_solutions(
                reduced_solutions, new_qubit_map
            )

            opt_result = optimization_result(
                opt_solutions=solutions,
                opt_value=best_value,
                opt_params=None,
                nit=nit,
                nfev=nfev,
                **histories,
            )

            return opt_result
# It does not matter though, as we are only testing serialization and it contains variety
# of data to be serialized.
EXAMPLE_OPTIMIZATION_RESULT = optimization_result(
    opt_value=0.5,
    opt_params=np.array([0, 0.5, 2.5]),
    nit=3,
    fev=10,
    history=[
        HistoryEntry(
            call_number=0,
            params=np.array([0.1, 0.2, 0.3j]),
            value=ValueEstimate(0.5, precision=6),
        ),
        HistoryEntry(call_number=1, params=np.array([1, 2, 3]), value=-10.0),
        HistoryEntryWithArtifacts(
            call_number=2,
            params=np.array([-1, -0.5, -0.6]),
            value=-20.0,
            artifacts={
                "bitstring":
                "0111",
                "bitstring_distribution":
                BitstringDistribution({
                    "111": 0.25,
                    "010": 0.75
                }),
            },
        ),
    ],
)

EXPECTED_DESERIALIZED_RESULT = {