Ejemplo n.º 1
0
dico_test['fom'] = fom_test
dico_test['track_learning'] = False
model_test = bh1d.BH1D(**dico_test)
model_test.control_fun = model.control_fun
optim_params = res['params']
res_test = model_test(optim_params)

#plot func optimal
func_used = model_test.control_fun
import numpy as np
x_to_plot = np.linspace(-0.2, T + 0.1, 500)
func_used.plot_function(x_to_plot)

## Benchmarking results vs Linear
ow = pFunc_base.OwriterYWrap(input_min=[-100, T],
                             input_max=[0, 100 + T],
                             output_ow=[0, 1])
linear = ow * pFunc_base.LinearFunc(bias=0, w=1 / T)
dico_test_linear = copy.copy(dico_test)
dico_test_linear['control_obj'] = linear
model_test_linear = bh1d.BH1D(**dico_test_linear)
res_test_linear = model_test_linear([])

## Looking at convergenge
if (optim_type == 'BO2'):
    import matplotlib.pylab as plt

    def dist(x, y):
        d = np.squeeze(x) - np.squeeze(y)
        return np.dot(d, d)
Ejemplo n.º 2
0
    'T': T,
    'dt': 0.01,
    'flag_intermediate': False,
    'setup': '1',
    'state_init': 'GS_i',
    'state_tgt': 'GS_inf',
    'fom': fom,
    'fom_print': True,
    'track_learning': True,
    'ctl_shortcut': 'owbds01_pwl15',
    'kblock': 0,
    'pblock': 1
}

ow = pFunc_base.OwriterYWrap(input_min=[-np.inf, T_long],
                             input_max=[0, np.inf],
                             output_ow=[0, 1])
linear = ow * pFunc_base.LinearFunc(bias=0, w=1 / T_long)
dico_linear = copy.copy(dico_simul)
dico_linear['control_obj'] = linear
dico_linear['T'] = T_long
model_linear = bh1d.BH1D(**dico_linear)
res_test_linear = model_linear([], trunc_res=False)
state_tmp = model_linear.EvolutionPopAdiab(nb_ev=2)
model_linear.plot_pop_adiab(plot_gap=True)

optim_args = {
    'algo': 'BO2',
    'maxiter': 100,
    'num_cores': 4,
    'init_obj': 75,