Example #1
0
def test_variational_study_initial_state():
    preparation_circuit = cirq.Circuit.from_ops(cirq.X(test_ansatz.qubits[0]))
    initial_state = numpy.array([0.0, 0.0, 1.0, 0.0])

    class TestObjective(VariationalObjective):
        def value(self, circuit_output):
            return circuit_output[0].real

    study1 = VariationalStudy(
        'study1',
        test_ansatz,
        TestObjective(),
        preparation_circuit=preparation_circuit,
        black_box_type=variational_black_box.UNITARY_SIMULATE_STATEFUL)
    study2 = VariationalStudy(
        'study2',
        test_ansatz,
        TestObjective(),
        initial_state=initial_state,
        black_box_type=variational_black_box.UNITARY_SIMULATE_STATEFUL)

    initial_guess = numpy.random.randn(2)
    result1 = study1.optimize(
        OptimizationParams(LazyAlgorithm(), initial_guess=initial_guess))
    result2 = study2.optimize(
        OptimizationParams(LazyAlgorithm(), initial_guess=initial_guess))

    numpy.testing.assert_allclose(result1.optimal_value, result2.optimal_value)
Example #2
0
def test_variational_study_optimize_and_extend_and_summary():
    numpy.random.seed(63351)

    study = VariationalStudy(
        'study',
        test_ansatz,
        test_objective,
        black_box_type=variational_black_box.XMON_SIMULATE_STATEFUL,
        target=-10.5)
    assert len(study.trial_results) == 0
    assert study.target == -10.5

    # Optimization run 1
    result = study.optimize(OptimizationParams(test_algorithm), 'run1')
    assert len(study.trial_results) == 1
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1

    # Extend optimization run 1
    study.extend_result('run1', repetitions=2)
    assert study.trial_results['run1'].repetitions == 3

    # Optimization run 2
    study.optimize(OptimizationParams(test_algorithm),
                   repetitions=2,
                   use_multiprocessing=True)
    result = study.trial_results[0]
    assert len(study.trial_results) == 2
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 2

    # Optimization run 3
    study.optimize(OptimizationParams(test_algorithm,
                                      initial_guess=numpy.array([4.5, 8.8]),
                                      initial_guess_array=numpy.array(
                                          [[7.2, 6.3], [3.6, 9.8]]),
                                      cost_of_evaluate=1.0),
                   reevaluate_final_params=True,
                   save_x_vals=True)
    result = study.trial_results[1]
    assert len(study.trial_results) == 3
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1
    assert all(result.data_frame['optimal_parameters'].apply(
        lambda x: XmonSimulateVariationalBlackBox(test_ansatz, test_objective).
        evaluate(x)) == result.data_frame['optimal_value'])
    assert isinstance(result.results[0].cost_spent, float)

    # Try extending non-existent run
    with pytest.raises(KeyError):
        study.extend_result('run100')

    # Check that getting a summary works
    assert str(study).startswith('This study contains')
Example #3
0
def test_variational_study_optimize_and_extend_and_summary():
    numpy.random.seed(63351)

    study = ExampleStudy('study', test_ansatz)
    assert len(study.results) == 0

    # Optimization run 1
    result = study.optimize(
            OptimizationParams(test_algorithm),
            'run1')
    assert len(study.results) == 1
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1

    # Extend optimization run 1
    study.extend_result('run1',
                        repetitions=2)
    assert study.results['run1'].repetitions == 3

    # Optimization run 2
    study.optimize(OptimizationParams(test_algorithm),
                   repetitions=2,
                   use_multiprocessing=True)
    result = study.results[0]
    assert len(study.results) == 2
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 2

    # Optimization run 3
    study.optimize(
            OptimizationParams(
                test_algorithm,
                initial_guess=numpy.array([4.5, 8.8]),
                initial_guess_array=numpy.array([[7.2, 6.3],
                                                 [3.6, 9.8]]),
                cost_of_evaluate=1.0),
            reevaluate_final_params=True,
            stateful=True,
            save_x_vals=True)
    result = study.results[1]
    assert len(study.results) == 3
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1
    assert all(result.data_frame['optimal_parameters'].apply(study.evaluate) ==
               result.data_frame['optimal_value'])
    assert isinstance(result.results[0].black_box, StatefulBlackBox)

    # Try extending non-existent run
    with pytest.raises(KeyError):
        study.extend_result('run100')

    # Check that getting a summary works
    assert isinstance(study.summary, str)
def test_hamiltonian_variational_study_save_load():
    datadir = 'tmp_ffETr2rB49RGP8WE8jer'
    study_name = 'test_hamiltonian_study'

    ansatz = SwapNetworkTrotterAnsatz(test_hamiltonian)
    study = HamiltonianVariationalStudy(study_name,
                                        ansatz,
                                        test_fermion_op,
                                        datadir=datadir)
    study.optimize(
        OptimizationParams(ExampleAlgorithm(), cost_of_evaluate=1.0),
        'example')
    study.save()
    loaded_study = HamiltonianVariationalStudy.load(study_name,
                                                    datadir=datadir)

    assert loaded_study.name == study.name
    assert str(loaded_study.circuit) == str(study.circuit)
    assert loaded_study.datadir == datadir
    assert loaded_study.hamiltonian == test_fermion_op
    assert len(loaded_study.results) == 1

    result = loaded_study.results['example']
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1
    assert result.params.cost_of_evaluate == 1.0

    # Clean up
    os.remove('{}/{}.study'.format(datadir, study_name))
    os.rmdir(datadir)
Example #5
0
def test_optimize_trial_result_data_methods():
    result1 = OptimizationResult(optimal_value=5.7,
                                 optimal_parameters=numpy.array([1.3, 8.7]),
                                 num_evaluations=59,
                                 cost_spent=3.1,
                                 seed=60,
                                 status=54,
                                 message='ZibVTBNe8',
                                 time=0.1)
    result2 = OptimizationResult(optimal_value=4.7,
                                 optimal_parameters=numpy.array([1.7, 2.1]),
                                 num_evaluations=57,
                                 cost_spent=9.3,
                                 seed=51,
                                 status=32,
                                 message='cicCZ8iCg0D',
                                 time=0.2)
    trial = OptimizationTrialResult([result1, result2],
                                    params=OptimizationParams(
                                        ExampleAlgorithm()))

    assert trial.repetitions == 2
    assert trial.optimal_value == 4.7
    numpy.testing.assert_allclose(trial.optimal_parameters,
                                  numpy.array([1.7, 2.1]))
    assert trial.num_evaluations_quantile() == 58
    numpy.testing.assert_allclose(trial.optimal_value_quantile(), 5.2)
    numpy.testing.assert_allclose(trial.cost_spent_quantile(), 6.2)
    numpy.testing.assert_allclose(trial.time_spent_quantile(), 0.15)
Example #6
0
def test_optimize_trial_result_init():
    result1 = OptimizationResult(optimal_value=5.7,
                                 optimal_parameters=numpy.array([1.3, 8.7]),
                                 num_evaluations=59,
                                 cost_spent=3.1,
                                 seed=60,
                                 status=54,
                                 message='ZibVTBNe8')
    result2 = OptimizationResult(optimal_value=4.7,
                                 optimal_parameters=numpy.array([1.7, 2.1]),
                                 num_evaluations=57,
                                 cost_spent=9.3,
                                 seed=51,
                                 status=32,
                                 message='cicCZ8iCg0D')
    trial = OptimizationTrialResult([result1, result2],
                                    params=OptimizationParams(
                                        ExampleAlgorithm()))

    assert all(trial.data_frame['optimal_value'] == [5.7, 4.7])
    numpy.testing.assert_allclose(trial.data_frame['optimal_parameters'][0],
                                  numpy.array([1.3, 8.7]))
    numpy.testing.assert_allclose(trial.data_frame['optimal_parameters'][1],
                                  numpy.array([1.7, 2.1]))
    assert all(trial.data_frame['num_evaluations'] == [59, 57])
    assert all(trial.data_frame['cost_spent'] == [3.1, 9.3])
    assert all(trial.data_frame['seed'] == [60, 51])
    assert all(trial.data_frame['status'] == [54, 32])
    assert all(trial.data_frame['message'] == ['ZibVTBNe8', 'cicCZ8iCg0D'])
Example #7
0
def test_optimization_trial_result_extend():
    result1 = OptimizationResult(optimal_value=4.7,
                                 optimal_parameters=numpy.array([2.3, 2.7]),
                                 num_evaluations=39,
                                 cost_spent=3.9,
                                 seed=63,
                                 status=44,
                                 message='di382j2f')
    result2 = OptimizationResult(optimal_value=3.7,
                                 optimal_parameters=numpy.array([1.2, 3.1]),
                                 num_evaluations=47,
                                 cost_spent=9.9,
                                 seed=21,
                                 status=22,
                                 message='i328d8ie3')

    trial = OptimizationTrialResult([result1],
                                    params=OptimizationParams(
                                        ExampleAlgorithm()))

    assert len(trial.results) == 1
    assert trial.repetitions == 1

    trial.extend([result2])

    assert len(trial.results) == 2
    assert trial.repetitions == 2
def test_hamiltonian_variational_study_optimize():
    ansatz = SwapNetworkTrotterAnsatz(test_hamiltonian)
    study = HamiltonianVariationalStudy('study', ansatz, test_fermion_op)
    study.optimize(OptimizationParams(ExampleAlgorithm(),
                                      cost_of_evaluate=1.0),
                   'run',
                   reevaluate_final_params=True)
    result = study.results['run']
    assert all(result.data_frame['optimal_parameters'].apply(study.evaluate) ==
               result.data_frame['optimal_value'])
    assert result.params.cost_of_evaluate == 1.0
Example #9
0
def test_variational_study_save_load():
    datadir = 'tmp_yulXPXnMBrxeUVt7kYVw'
    study_name = 'test_study'

    study = VariationalStudy(
        study_name,
        test_ansatz,
        test_objective,
        initial_state=numpy.array([0.0, 1.0, 0.0,
                                   0.0]).astype(numpy.complex64),
        datadir=datadir,
        black_box_type=variational_black_box.XMON_SIMULATE_STATEFUL)
    study.optimize(
        OptimizationParams(ScipyOptimizationAlgorithm(
            kwargs={'method': 'COBYLA'}, options={'maxiter': 2}),
                           initial_guess=numpy.array([7.9, 3.9]),
                           initial_guess_array=numpy.array([[7.5, 7.6],
                                                            [8.8, 1.1]]),
                           cost_of_evaluate=1.0), 'example')
    study.save()

    loaded_study = VariationalStudy.load(study_name, datadir=datadir)

    assert loaded_study.name == study.name
    assert str(loaded_study.circuit) == str(study.circuit)
    assert loaded_study.datadir == datadir
    assert len(loaded_study.trial_results) == 1
    numpy.testing.assert_allclose(loaded_study.initial_state,
                                  numpy.array([0.0, 1.0, 0.0, 0.0]))

    result = loaded_study.trial_results['example']
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1
    assert isinstance(result.params.algorithm, ScipyOptimizationAlgorithm)
    assert result.params.algorithm.kwargs == {'method': 'COBYLA'}
    assert result.params.algorithm.options == {'maxiter': 2}
    assert result.params.cost_of_evaluate == 1.0

    loaded_study = VariationalStudy.load('{}.study'.format(study_name),
                                         datadir=datadir)

    assert loaded_study.name == study.name

    # Clean up
    os.remove(os.path.join(datadir, '{}.study'.format(study_name)))
    os.rmdir(datadir)
Example #10
0
def OptimizeVQEOpenFermion(u,v,t):
    '''
    Function to optimize a VQE for the hamiltonian and ansatz we are looking at by using the built in optimizer in openfermion.

    Parameters
    ----------
    U : float
        One the parameters in the Hamiltonian, see report for description.
    V : float
        One the parameters in the Hamiltonian, see report for description.
    t : float
        One the parameters in the Hamiltonian, see report for description.

    Returns
    -------
    List
        List containing 4 values: 1) theta at minimum, 2) phi at minimum, 3) energy at minimum, 4) Error in the energy w.r.t. the exact ground state energy.

    '''
    ansatz = Ansatz()
    hamiltonian = Hamiltonian(u,v,t)
    
    objective = ofc.HamiltonianObjective(hamiltonian)
    
    study = ofc.VariationalStudy(
        name='TritonToy',
        ansatz=ansatz,
        objective=objective)
        
    optimization_params = OptimizationParams(
        algorithm=COBYLA,
        initial_guess=[1,1])
    result = study.optimize(optimization_params)
        
    minimum_energy = result.optimal_value + (8*t + 3*u/4 + v/16)
    minimum_energy_error = result.optimal_value - np.amin(of.eigenspectrum(hamiltonian))
    
    minimum_theta = result.optimal_parameters[0]
    minimum_phi = result.optimal_parameters[1]
    
    text = "Minimum E = {}, at theta = {} and phi = {} error = {}.".format(minimum_energy, minimum_theta, minimum_phi, minimum_energy_error)   
    print(text)
    
    return [minimum_theta, minimum_phi, minimum_energy, minimum_energy_error]
Example #11
0
def test_variational_study_save_load():
    datadir = 'tmp_yulXPXnMBrxeUVt7kYVw'
    study_name = 'test_study'

    study = ExampleStudy(
            study_name,
            test_ansatz,
            datadir=datadir)
    study.optimize(
            OptimizationParams(
                ScipyOptimizationAlgorithm(
                    kwargs={'method': 'COBYLA'},
                    options={'maxiter': 2}),
                initial_guess=numpy.array([7.9, 3.9]),
                initial_guess_array=numpy.array([[7.5, 7.6],
                                                 [8.8, 1.1]]),
                cost_of_evaluate=1.0),
            'example')
    study.save()

    loaded_study = VariationalStudy.load(study_name, datadir=datadir)

    assert loaded_study.name == study.name
    assert str(loaded_study.circuit) == str(study.circuit)
    assert loaded_study.datadir == datadir
    assert len(loaded_study.results) == 1

    result = loaded_study.results['example']
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1
    assert isinstance(result.params.algorithm, ScipyOptimizationAlgorithm)
    assert result.params.algorithm.kwargs == {'method': 'COBYLA'}
    assert result.params.algorithm.options == {'maxiter': 2}
    assert result.params.cost_of_evaluate == 1.0

    loaded_study = VariationalStudy.load('{}.study'.format(study_name),
                                         datadir=datadir)

    assert loaded_study.name == study.name

    # Clean up
    os.remove(os.path.join(datadir, '{}.study'.format(study_name)))
    os.rmdir(datadir)
Example #12
0
def test_variational_study_run_too_few_seeds_raises_error():
    with pytest.raises(ValueError):
        test_study.optimize(OptimizationParams(test_algorithm),
                            'run',
                            repetitions=2,
                            seeds=[0])
Example #13
0
def test_variational_study_optimize_and_summary():
    numpy.random.seed(63351)

    study = ExampleStudy('study', test_ansatz)
    assert len(study.results) == 0

    result = study.optimize(OptimizationParams(test_algorithm), 'run1')
    assert len(study.results) == 1
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1

    study.optimize(OptimizationParams(test_algorithm),
                   repetitions=2,
                   use_multiprocessing=True)
    result = study.results[0]
    assert len(study.results) == 2
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 2

    study.optimize(OptimizationParams(test_algorithm,
                                      initial_guess=numpy.array([4.5, 8.8]),
                                      initial_guess_array=numpy.array(
                                          [[7.2, 6.3], [3.6, 9.8]]),
                                      cost_of_evaluate=1.0),
                   reevaluate_final_params=True)
    result = study.results[1]
    assert len(study.results) == 3
    assert isinstance(result, OptimizationTrialResult)
    assert result.repetitions == 1
    assert all(result.data_frame['optimal_parameters'].apply(study.evaluate) ==
               result.data_frame['optimal_value'])
    numpy.testing.assert_allclose(result.params.initial_guess,
                                  numpy.array([4.5, 8.8]))
    numpy.testing.assert_allclose(result.params.initial_guess_array,
                                  numpy.array([[7.2, 6.3], [3.6, 9.8]]))
    assert result.params.cost_of_evaluate == 1.0

    assert study.summary.replace("u'", "'").strip() == """
This study contains 3 results.
The optimal value found among all results is 0.
It was found by the run with identifier 'run1'.
Result details:
    Identifier: run1
        Optimal value: 0
        Number of repetitions: 1
        Optimal value 1st, 2nd, 3rd quartiles:
            [0.0, 0.0, 0.0]
        Num evaluations 1st, 2nd, 3rd quartiles:
            [2.0, 2.0, 2.0]
        Cost spent 1st, 2nd, 3rd quartiles:
            [1.0, 1.0, 1.0]
    Identifier: 0
        Optimal value: 0
        Number of repetitions: 2
        Optimal value 1st, 2nd, 3rd quartiles:
            [0.0, 0.0, 0.0]
        Num evaluations 1st, 2nd, 3rd quartiles:
            [2.0, 2.0, 2.0]
        Cost spent 1st, 2nd, 3rd quartiles:
            [1.0, 1.0, 1.0]
    Identifier: 1
        Optimal value: 0
        Number of repetitions: 1
        Optimal value 1st, 2nd, 3rd quartiles:
            [0.0, 0.0, 0.0]
        Num evaluations 1st, 2nd, 3rd quartiles:
            [2.0, 2.0, 2.0]
        Cost spent 1st, 2nd, 3rd quartiles:
            [2.0, 2.0, 2.0]
""".strip()
    preparation_circuit=preparation_circuit)

print("Created a variational study with {} qubits and {} parameters".format(
    len(study.ansatz.qubits), study.num_params))

print(
    "The value of the objective with default initial parameters is {}".format(
        study.value_of(kc_simulator, ansatz.default_initial_params())))

print("The circuit of the study is")
print(study.circuit.to_text_diagram(transpose=True))

# Perform an optimization run.
from openfermioncirq.optimization import ScipyOptimizationAlgorithm, OptimizationParams
algorithm = ScipyOptimizationAlgorithm(kwargs={'method': 'COBYLA'},
                                       options={'maxiter': 100},
                                       uses_bounds=False)
optimization_params = OptimizationParams(algorithm=algorithm)
result = study.optimize(optimization_params, kc_simulator)
print(result.optimal_value)

optimization_params = OptimizationParams(algorithm=algorithm,
                                         cost_of_evaluate=1e6)
study.optimize(optimization_params,
               kc_simulator,
               identifier='COBYLA with maxiter=100, noisy',
               repetitions=3,
               reevaluate_final_params=True,
               use_multiprocessing=False)
print(study)
Example #15
0
kc_simulator = cirq.KnowledgeCompilationSimulator(preparation_circuit +
                                                  ansatz.circuit,
                                                  initial_state=0)

study = openfermioncirq.VariationalStudy(
    name='my_hydrogen_study',
    ansatz=ansatz,
    objective=objective,
    preparation_circuit=preparation_circuit)
print(study.circuit)

# Perform optimization.
import numpy
from openfermioncirq.optimization import COBYLA, OptimizationParams
optimization_params = OptimizationParams(algorithm=COBYLA,
                                         initial_guess=[0.01],
                                         cost_of_evaluate=4096)
result = study.optimize(optimization_params, kc_simulator)
print("Initial state energy in Hartrees: {}".format(molecule.hf_energy))
print("Optimized energy result in Hartree: {}".format(result.optimal_value))
print("Exact energy result in Hartees for reference: {}".format(
    molecule.fci_energy))

bond_lengths = ['{0:.1f}'.format(0.3 + 0.1 * x) for x in range(23)]
hartree_fock_energies = []
optimized_energies = []
exact_energies = []

for diatomic_bond_length in bond_lengths:
    geometry = [('H', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
Example #16
0

#%%

# Molecule Properties
bond_lengths = [float('{0:.1f}'.format(0.3 + 0.1 * x)) for x in range(23)]
basis = 'sto-3g'
multiplicity = 1
charge = 0

# Optimzation Parameters
algorithm = ScipyOptimizationAlgorithm(kwargs={'method': 'COBYLA'},
                                       options={'maxiter': 100},
                                       uses_bounds=False)

optimization_params = OptimizationParams(algorithm=algorithm)

# Wave Function Ansatz (parameterized guess) U(theta)
ansatz = MyAnsatz()
q0, q1, _, _ = ansatz.qubits

# Initialize to |11> state
##  U(theta)|11> --> best guess ground state
preparation_circuit = cq.Circuit.from_ops(cq.X(q0), cq.X(q1))

#%%

## Run a study on a single bond length
molecule = of.MolecularData([('H', (0., 0., 0.)), ('H', (0., 0., 0.7414))],
                            'sto-3g', 1, 0)
molecule.load()