results = np.zeros(shape=[len(times_to_try), 2 + num_CRAB_pars])

for idx, tf in enumerate(times_to_try):
    # crab hyperparameters
    nuk = np.random.rand(num_frequencies)

    parametrized_model = functools.partial(
        optimization.make_CRAB_final_ramp_fun,
        nuk=nuk, tf=tf, y0=0, y1=1.
    )
    logging.info('Starting optimization for tf={}'.format(tf))
    result = optimization.optimize_model_parameters(
        hamiltonians=[optim_objs['H0'], optim_objs['H1']],
        initial_state=optim_objs['initial_state'],
        target_state=optim_objs['target_state'],
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=np.random.randn(2 * nuk.shape[0]),
        optimization_method='Nelder-Mead', stfu=True
    )
    results[idx] = [tf, (1 - result.fun)**2, *nuk, *result.x]
    logging.info('    Result: {}'.format(1 - result.fun))

columns_strings = ['tf', 'fid'] + ['nu' + str(k + 1) for k in range(num_frequencies)]
columns_strings += ['A' + str(k + 1) for k in range(num_frequencies)]
columns_strings += ['B' + str(k + 1) for k in range(num_frequencies)]

results = pd.DataFrame(results, columns=columns_strings)
results.to_csv('lmg_N50_crab_2freq_neldermead.csv')

import src.optimization as optimization
from src.utils import ground_state

# model parameters
num_spins = 50
optim_objs = lmg_model.prepare_hamiltonian_and_states_for_optimization(
    num_spins=num_spins)
# run optimization
times_to_try = np.linspace(0.01, 1, 100)
results = np.zeros(shape=[len(times_to_try), 6])

for idx, tf in enumerate(times_to_try):
    parametrized_model = functools.partial(
        optimization.make_bangramp_pulse_fun, tf=tf)
    logging.info('Starting optimization for tf={}'.format(tf))
    result = optimization.optimize_model_parameters(
        hamiltonians=[optim_objs['H0'], optim_objs['H1']],
        initial_state=optim_objs['initial_state'],
        target_state=optim_objs['target_state'],
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=[1, tf / 2, 1, 1],
        optimization_method='Nelder-Mead',
        stfu=True)
    obtained_fidelity = (1 - result.fun)**2
    results[idx] = [tf, obtained_fidelity, *result.x]
    logging.info('    Result: {}'.format(obtained_fidelity))
results = pd.DataFrame(results, columns=['tf', 'fid', 'y0', 't1', 'y1', 'y2'])

results.to_csv('lmg_N50_bangramp_neldermead.csv')
for idx, tf in enumerate(times_to_try):
    # crab hyperparameters
    nuk = np.random.rand(num_frequencies)

    parametrized_model = functools.partial(
        optimization.make_CRAB_final_ramp_fun,
        nuk=nuk,
        tf=tf,
        y0=0,
        y1=lambda_c)
    logging.info('Starting optimization for tf={}'.format(tf))
    result = optimization.optimize_model_parameters(
        hamiltonians=[H0, H1],
        initial_state=initial_state,
        target_state=target_state,
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=np.random.randn(2 * nuk.shape[0]),
        optimization_method='Nelder-Mead',
        stfu=True)
    results[idx] = [tf, (1 - result.fun)**2, *nuk, *result.x]
    logging.info('    Result: {}'.format(1 - result.fun))

columns_strings = ['tf', 'fid'
                   ] + ['nu' + str(k + 1) for k in range(num_frequencies)]
columns_strings += ['A' + str(k + 1) for k in range(num_frequencies)]
columns_strings += ['B' + str(k + 1) for k in range(num_frequencies)]

results = pd.DataFrame(results, columns=columns_strings)
results.to_csv('rabi_Omega100_crab_2freq_neldermead.csv')
Exemplo n.º 4
0
Omega = 100
omega_0 = 1
lambda_c = np.sqrt(Omega * omega_0) / 2.
# build Hamiltonians
H0 = rabi_model.QRM_free_term(N, Omega, omega_0)
H1 = rabi_model.QRM_interaction_term(N)
# compute initial and target states
initial_state = ground_state(H0)
target_state = ground_state(H0 + lambda_c * H1)
# run optimization
times_to_try = np.linspace(0.1, 4, 100)
results = np.zeros(shape=[len(times_to_try), 5])

for idx, tf in enumerate(times_to_try):
    def parametrized_model(pars):
        return optimization.make_doublebang_pulse_fun([pars[0], pars[1], pars[2], tf])
    print('Starting optimization for tf={}... result='.format(tf), end='')
    result = optimization.optimize_model_parameters(
        hamiltonians=[H0, H1],
        initial_state=initial_state, target_state=target_state,
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=[1, tf / 2, 1],
        optimization_method='Powell', stfu=True
    )
    results[idx] = [tf, (1 - result.fun)**2, *result.x]
    print('{}'.format(1 - result.fun))
results = pd.DataFrame(results, columns=['tf', 'fid', 'y0', 't1', 'y1'])

results.to_csv('rabi_Omega100_doublebang_powell.csv')
for idx, tf in enumerate(times_to_try):
    # crab hyperparameters
    nuk = np.random.rand(num_frequencies)

    parametrized_model = functools.partial(
        optimization.make_CRAB_final_ramp_fun,
        nuk=nuk,
        tf=tf,
        y0=0,
        y1=lambda_c)
    logging.info('Starting optimization for tf={}'.format(tf))
    result = optimization.optimize_model_parameters(
        hamiltonians=[H0, H1],
        initial_state=initial_state,
        target_state=target_state,
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=np.random.randn(2 * num_frequencies),
        optimization_method='Powell',
        stfu=True)
    results[idx] = [tf, (1 - result.fun)**2, *nuk, *result.x]
    logging.info('    Result: {}'.format(1 - result.fun))

columns_strings = ['tf', 'fid'
                   ] + ['nu' + str(k) for k in range(num_frequencies)]
columns_strings += ['A' + str(k) for k in range(num_frequencies)]
columns_strings += ['B' + str(k) for k in range(num_frequencies)]

results = pd.DataFrame(results, columns=columns_strings)
results.to_csv('rabi_Omega100_crab_3freq_powell.csv')
# build Hamiltonians
H0 = rabi_model.QRM_free_term(N, Omega, omega_0)
H1 = rabi_model.QRM_interaction_term(N)
# compute initial and target states
initial_state = ground_state(H0)
target_state = ground_state(H0 + lambda_c * H1)
# run optimization
times_to_try = np.linspace(0.1, 4, 100)
results = np.zeros(shape=[len(times_to_try), 6])

for idx, tf in enumerate(times_to_try):
    parametrized_model = functools.partial(
        optimization.make_bangramp_pulse_fun, tf=tf)
    logging.info('Starting optimization for tf={}'.format(tf))
    result = optimization.optimize_model_parameters(
        hamiltonians=[H0, H1],
        initial_state=initial_state,
        target_state=target_state,
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=[1, tf / 2, 1, 1],
        optimization_method='Nelder-Mead',
        stfu=True,
        parameters_constraints=[[-50, 50], [0, tf], [-50, 50], [-50, 50]])
    obtained_fidelity = (1 - result.fun)**2
    results[idx] = [tf, obtained_fidelity, *result.x]
    logging.info('    Result: {}'.format(obtained_fidelity))
results = pd.DataFrame(results, columns=['tf', 'fid', 'y0', 't1', 'y1', 'y2'])

results.to_csv('rabi_Omega100_bangramp_neldermead.csv')
# model parameters
num_spins = 50
optim_objs = lmg_model.prepare_hamiltonian_and_states_for_optimization(
    num_spins=num_spins)
# run optimization
times_to_try = np.linspace(0.01, 1, 100)
results = np.zeros(shape=[len(times_to_try), 6])

for idx, tf in enumerate(times_to_try):
    parametrized_model = functools.partial(
        optimization.make_bangramp_pulse_fun, tf=tf)
    logging.info('Starting optimization for tf={}'.format(tf))
    result = optimization.optimize_model_parameters(
        hamiltonians=[optim_objs['H0'], optim_objs['H1']],
        initial_state=optim_objs['initial_state'],
        target_state=optim_objs['target_state'],
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=[1, tf / 2, 1, 1],
        optimization_method='Powell',
        stfu=True,
        parameters_constraints=[[-100, 100], [0, tf], [-100, 100], [-100,
                                                                    100]])
    obtained_fidelity = (1 - result.fun)**2
    results[idx] = [tf, obtained_fidelity, *result.x]
    logging.info('    Result: {}'.format(obtained_fidelity))
results = pd.DataFrame(results, columns=['tf', 'fid', 'y0', 't1', 'y1', 'y2'])

results.to_csv('lmg_N50_bangramp_constrained100_powell.csv')
Exemplo n.º 8
0
num_spins = 50
optim_objs = lmg_model.prepare_hamiltonian_and_states_for_optimization(
    num_spins=num_spins)
# run optimization
times_to_try = np.linspace(0.01, 1, 100)
results = np.zeros(shape=[len(times_to_try), 6])

for idx, tf in enumerate(times_to_try):
    parametrized_model = functools.partial(
        optimization.make_bangramp_pulse_fun, tf=tf)
    # let's try here using random initial parameters
    rnd_initial_parameters = np.insert(np.random.rand(3) + 0.1, 1, tf / 2)
    logging.info('Starting optimization for tf={}'.format(tf))
    logging.info('Randomized parameters: {}'.format(rnd_initial_parameters))
    result = optimization.optimize_model_parameters(
        hamiltonians=[optim_objs['H0'], optim_objs['H1']],
        initial_state=optim_objs['initial_state'],
        target_state=optim_objs['target_state'],
        evolution_time=tf,
        parametrized_model=parametrized_model,
        initial_parameters=rnd_initial_parameters,
        optimization_method='Powell',
        stfu=True,
    )
    obtained_fidelity = (1 - result.fun)**2
    results[idx] = [tf, obtained_fidelity, *result.x]
    logging.info('    Result: {}'.format(obtained_fidelity))
results = pd.DataFrame(results, columns=['tf', 'fid', 'y0', 't1', 'y1', 'y2'])

results.to_csv('lmg_N50_bangramp_powell_rerun2.csv')