def test_optimize_trial_result_data_methods():
    result1 = OptimizationResult(optimal_value=5.7,
                                 optimal_parameters=numpy.array([1.3, 8.7]),
                                 num_evaluations=59,
                                 cost_spent=3.1,
                                 seed=60,
                                 status=54,
                                 message='ZibVTBNe8')
    result2 = OptimizationResult(optimal_value=4.7,
                                 optimal_parameters=numpy.array([1.7, 2.1]),
                                 num_evaluations=57,
                                 cost_spent=9.3,
                                 seed=51,
                                 status=32,
                                 message='cicCZ8iCg0D')
    trial = OptimizationTrialResult([result1, result2],
                                    params=OptimizationParams(
                                        ExampleAlgorithm()))

    assert trial.repetitions == 2
    assert trial.optimal_value == 4.7
    numpy.testing.assert_allclose(trial.optimal_parameters,
                                  numpy.array([1.7, 2.1]))
    assert trial.optimal_value_quantile() == 5.2
    assert trial.num_evaluations_quantile() == 58
    assert trial.cost_spent_quantile() == 6.2
Exemple #2
0
def test_optimization_trial_result_extend():
    result1 = OptimizationResult(optimal_value=4.7,
                                 optimal_parameters=numpy.array([2.3, 2.7]),
                                 num_evaluations=39,
                                 cost_spent=3.9,
                                 seed=63,
                                 status=44,
                                 message='di382j2f')
    result2 = OptimizationResult(optimal_value=3.7,
                                 optimal_parameters=numpy.array([1.2, 3.1]),
                                 num_evaluations=47,
                                 cost_spent=9.9,
                                 seed=21,
                                 status=22,
                                 message='i328d8ie3')

    trial = OptimizationTrialResult([result1],
                                    params=OptimizationParams(
                                        ExampleAlgorithm()))

    assert len(trial.results) == 1
    assert trial.repetitions == 1

    trial.extend([result2])

    assert len(trial.results) == 2
    assert trial.repetitions == 2
    def _get_trial_result_list(
            self, param_sweep: Iterable[OptimizationParams],
            identifiers: Optional[Iterable[Hashable]],
            reevaluate_final_params: bool, save_x_vals: bool,
            seeds: Optional[Sequence[int]],
            num_processes: Optional[int]) -> List[OptimizationTrialResult]:

        if num_processes is None:
            # coverage: ignore
            num_processes = multiprocessing.cpu_count()
        pool = multiprocessing.Pool(num_processes)
        try:
            arg_tuples = ((self.ansatz, self.objective,
                           self._preparation_circuit, self.initial_state,
                           optimization_params, reevaluate_final_params,
                           save_x_vals, seeds[0] if seeds is not None else
                           numpy.random.randint(4294967296),
                           self.ansatz.default_initial_params(),
                           self._black_box_type)
                          for optimization_params in param_sweep)
            result_list = pool.map(_run_optimization, arg_tuples)
            trial_results = [
                OptimizationTrialResult([result], optimization_params) for
                optimization_params, result in zip(param_sweep, result_list)
            ]
        finally:
            pool.terminate()

        return trial_results
Exemple #4
0
def test_optimize_trial_result_init():
    result1 = OptimizationResult(optimal_value=5.7,
                                 optimal_parameters=numpy.array([1.3, 8.7]),
                                 num_evaluations=59,
                                 cost_spent=3.1,
                                 seed=60,
                                 status=54,
                                 message='ZibVTBNe8')
    result2 = OptimizationResult(optimal_value=4.7,
                                 optimal_parameters=numpy.array([1.7, 2.1]),
                                 num_evaluations=57,
                                 cost_spent=9.3,
                                 seed=51,
                                 status=32,
                                 message='cicCZ8iCg0D')
    trial = OptimizationTrialResult([result1, result2],
                                    params=OptimizationParams(
                                        ExampleAlgorithm()))

    assert all(trial.data_frame['optimal_value'] == [5.7, 4.7])
    numpy.testing.assert_allclose(trial.data_frame['optimal_parameters'][0],
                                  numpy.array([1.3, 8.7]))
    numpy.testing.assert_allclose(trial.data_frame['optimal_parameters'][1],
                                  numpy.array([1.7, 2.1]))
    assert all(trial.data_frame['num_evaluations'] == [59, 57])
    assert all(trial.data_frame['cost_spent'] == [3.1, 9.3])
    assert all(trial.data_frame['seed'] == [60, 51])
    assert all(trial.data_frame['status'] == [54, 32])
    assert all(trial.data_frame['message'] == ['ZibVTBNe8', 'cicCZ8iCg0D'])
Exemple #5
0
    def optimize_sweep(
            self,
            param_sweep: Iterable[OptimizationParams],
            identifiers: Optional[Iterable[Hashable]] = None,
            reevaluate_final_params: bool = False,
            save_x_vals: bool = False,
            repetitions: int = 1,
            seeds: Optional[Sequence[int]] = None,
            use_multiprocessing: bool = False,
            num_processes: Optional[int] = None
    ) -> List[OptimizationTrialResult]:
        """Perform multiple optimization runs and save the results.

        This is like `optimize`, but lets you specify multiple
        OptimizationParams to use for separate runs.

        Args:
            param_sweep: The parameters for the optimization runs.
            identifiers: Optional identifiers for the runs, one for each
                OptimizationParams object provided. This is used as the key
                to `self.results`, where results are saved. If not specified,
                then it will be set to a sequence of non-negative integers
                that are not already keys.
            reevaluate_final_params: Whether the optimal parameters returned
                by the optimization algorithm should be reevaluated using the
                `evaluate` method of the study and the optimal value adjusted
                accordingly. This is useful when the optimizer only has access
                to the noisy `evaluate_with_cost` method of the study (because
                `cost_of_evaluate` is set), but you are interested in the true
                noiseless value of the returned parameters.
            save_x_vals: Whether to save all points (x values) that the
                black box was queried at. Only used if the black box type is
                a subclass of StatefulBlackBox.
            repetitions: The number of times to run the algorithm for each
                set of optimization parameters.
            seeds: Random number generator seeds to use for the repetitions.
                The default behavior is to randomly generate an independent seed
                for each repetition.
            use_multiprocessing: Whether to use multiprocessing to run
                repetitions in different processes.
            num_processes: The number of processes to use for multiprocessing.
                The default behavior is to use the output of
                `multiprocessing.cpu_count()`.

        Side effects:
            Saves the returned OptimizationTrialResult into the results
            dictionary
        """
        if seeds is not None and len(seeds) < repetitions:
            raise ValueError(
                "Provided fewer RNG seeds than the number of repetitions.")

        if identifiers is None:
            # Choose a sequence of integers as identifiers
            existing_integer_keys = {
                key
                for key in self.trial_results if isinstance(key, int)
            }
            if existing_integer_keys:
                start = max(existing_integer_keys) + 1
            else:
                start = 0
            identifiers = itertools.count(cast(int, start))  # type: ignore

        trial_results = []

        for identifier, optimization_params in zip(identifiers, param_sweep):

            result_list = self._get_result_list(optimization_params,
                                                reevaluate_final_params,
                                                save_x_vals, repetitions,
                                                seeds, use_multiprocessing,
                                                num_processes)

            trial_result = OptimizationTrialResult(result_list,
                                                   optimization_params)
            trial_results.append(trial_result)

            # Save the result into the trial_results dictionary
            self.trial_results[identifier] = trial_result

        return trial_results