Пример #1
0
def QCBMCostFunction(
    ansatz: Ansatz,
    backend: QuantumBackend,
    distance_measure: Callable,
    distance_measure_parameters: dict,
    target_bitstring_distribution: BitstringDistribution,
    gradient_type: str = "finite_difference",
):
    """Cost function used for evaluating QCBM.

    Args:
        ansatz (zquantum.core.interfaces.ansatz.Ansatz): the ansatz used to construct the variational circuits
        backend (zquantum.core.interfaces.backend.QuantumBackend): backend used for QCBM evaluation
        distance_measure (callable): function used to calculate the distance measure
        distance_measure_parameters (dict): dictionary containing the relevant parameters for the chosen distance measure
        target_bitstring_distribution (zquantum.core.bitstring_distribution.BitstringDistribution): bistring distribution which QCBM aims to learn
        gradient_type (str): parameter indicating which type of gradient should be used.

    Returns:
        Callable that evaluates the parametrized circuit produced by the ansatz with the given parameters and returns
            the distance between the produced bitstring distribution and the target distribution
    """

    assert (int(target_bitstring_distribution.get_qubits_number()) ==
            ansatz.number_of_qubits)

    def cost_function(parameters: np.ndarray,
                      store_artifact: StoreArtifact = None) -> ValueEstimate:
        """
        Evaluates the value of the cost function for given parameters.

        Args:
            parameters: parameters for which the evaluation should occur.

        Returns:
            (float): cost function value for given parameters
            zquantum.core.bitstring_distribution.BitstringDistribution: distribution obtained
        """
        circuit = ansatz.get_executable_circuit(parameters)
        distribution = backend.get_bitstring_distribution(circuit)
        value = evaluate_distribution_distance(
            target_bitstring_distribution,
            distribution,
            distance_measure,
            distance_measure_parameters=distance_measure_parameters,
        )

        if store_artifact:
            store_artifact("bitstring_distribution", distribution)

        return ValueEstimate(value)

    if gradient_type == "finite_difference":
        cost_function = function_with_gradient(
            cost_function, finite_differences_gradient(cost_function))
    else:
        raise RuntimeError("Unsupported gradient type: ", gradient_type)

    return cost_function
Пример #2
0
def test_adding_gradient_to_function_not_storing_artifacts_makes_a_callable_not_storing_artifacts(
):
    def _test_function(params):
        return (params**2).sum()

    function = function_with_gradient(
        _test_function, finite_differences_gradient(_test_function))
    assert not has_store_artifact_param(function)
Пример #3
0
def test_adding_gradient_to_function_storing_artifacts_makes_a_callable_that_stores_artifacts(
):
    def _test_function(params, store_artifact=None):
        if store_artifact:
            store_artifact("x", params[0])
        return (params**2).sum()

    function = function_with_gradient(
        _test_function, finite_differences_gradient(_test_function))
    assert has_store_artifact_param(function)
Пример #4
0
def QCBMCostFunction(
    ansatz: Ansatz,
    backend: QuantumBackend,
    n_samples: int,
    distance_measure: DistanceMeasure,
    distance_measure_parameters: dict,
    target_bitstring_distribution: BitstringDistribution,
    gradient_type: str = "finite_difference",
    gradient_kwargs: dict = None,
) -> CostFunction:
    """Cost function used for evaluating QCBM.

    Args:
        ansatz: the ansatz used to construct the variational circuits
        backend: backend used for QCBM evaluation
        distance_measure: function used to calculate the distance measure
        distance_measure_parameters: dictionary containing the relevant parameters for the chosen distance measure
        target_bitstring_distribution: bistring distribution which QCBM aims to learn
        gradient_type: parameter indicating which type of gradient should be used.

    Returns:
        Callable CostFunction object that evaluates the parametrized circuit produced by the ansatz with the given
        parameters and returns the distance between the produced bitstring distribution and the target distribution
    """

    warnings.warn(
        "QCBMCostFunction is deprecated in favour of create_QCBM_cost_function.",
        DeprecationWarning,
    )

    cost_function = _create_QCBM_cost_function(
        ansatz,
        backend,
        n_samples,
        distance_measure,
        distance_measure_parameters,
        target_bitstring_distribution,
    )

    if gradient_kwargs is None:
        gradient_kwargs = {}

    if gradient_type == "finite_difference":
        cost_function = function_with_gradient(
            cost_function,
            finite_differences_gradient(cost_function, **gradient_kwargs))
    else:
        raise RuntimeError("Unsupported gradient type: ", gradient_type)

    return cost_function
def test_finite_differences_gradient_uses_supplied_epsilon_to_compute_gradient_estimate(
    epsilon, parameters
):
    gradient = finite_differences_gradient(sum_x_squared, epsilon)
    eps_vectors = np.eye(len(parameters)) * epsilon

    expected_gradient_value = np.array(
        [
            sum_x_squared(parameters + vector) - sum_x_squared(parameters - vector)
            for vector in eps_vectors
        ]
    ) / (2 * epsilon)

    assert np.array_equal(expected_gradient_value, gradient(parameters))
    def test_SLSQP_with_inequality_constraints(self):
        # Given
        cost_function = FunctionWithGradient(
            rosenbrock_function,
            finite_differences_gradient(rosenbrock_function))
        constraints = {"type": "ineq", "fun": lambda x: x[0] + x[1] - 3}
        optimizer = ScipyOptimizer(method="SLSQP")
        initial_params = np.array([0, 0])

        # When
        results_without_constraints = optimizer.minimize(
            cost_function, initial_params=initial_params)
        optimizer.constraints = constraints
        results_with_constraints = optimizer.minimize(
            cost_function, initial_params=initial_params)

        # Then
        assert results_without_constraints.opt_value == pytest.approx(
            results_with_constraints.opt_value, abs=1e-1)
        assert results_with_constraints.opt_params.sum() >= 3
    def test_SLSQP_with_equality_constraints(self):
        # Given
        cost_function = FunctionWithGradient(
            rosenbrock_function,
            finite_differences_gradient(rosenbrock_function))
        constraint_cost_function = sum_x_squared

        constraints = ({"type": "eq", "fun": constraint_cost_function}, )
        optimizer = ScipyOptimizer(method="SLSQP", constraints=constraints)
        initial_params = np.array([1, 1])
        target_params = np.array([0, 0])
        target_value = 1

        # When
        results = optimizer.minimize(cost_function,
                                     initial_params=initial_params)

        # Then
        assert results.opt_value == pytest.approx(target_value, abs=1e-3)
        assert results.opt_params == pytest.approx(target_params, abs=1e-3)
Пример #8
0
    def test_history_info_contains_gradient_history_for_function_with_gradient(
            self):
        cost_function = recorder(
            FunctionWithGradient(sum_x_squared,
                                 finite_differences_gradient(sum_x_squared)))

        cost_function(np.array([1, 2, 3]))
        cost_function.gradient(np.array([0, -1, 1]))

        history_info = construct_history_info(cost_function, True)

        assert len(history_info["history"]) == 1
        assert len(history_info["gradient_history"]) == 1

        history_entry = history_info["history"][0]
        assert history_entry.call_number == 0
        np.testing.assert_array_equal(history_entry.params, [1, 2, 3])
        assert history_entry.value == cost_function(np.array([1, 2, 3]))

        history_entry = history_info["gradient_history"][0]
        assert history_entry.call_number == 0
        np.testing.assert_array_equal(history_entry.params, [0, -1, 1])
        np.testing.assert_array_equal(
            history_entry.value, cost_function.gradient(np.array([0, -1, 1])))
Пример #9
0
 def __init__(self, client, epsilon: float = 1e-5):
     self.client = client
     self.current_iteration = 0
     self.epsilon = epsilon
     self.gradient = finite_differences_gradient(self.__call__)
Пример #10
0
    def sum_x_squared(self):
        def _sum_x_squared(x):
            return sum(x**2)

        return function_with_gradient(
            _sum_x_squared, finite_differences_gradient(_sum_x_squared))
def test_finite_differences_gradient_returns_vectors_with_correct_length(parameters):
    gradient = finite_differences_gradient(sum_x_squared)

    assert len(parameters) == len(gradient(parameters))