def gen_ramped_h(basis, v=1, L=5, mu=0.0): T = 1 / v U = pf.LinearFunc(w=v, bias=0) #gen_linear_ramp(v) J = (lambda t: 1 - U(t)) args_U, args_J = [], [] hop = [[-1, i, (i + 1) % L] for i in range(L)] #PBC dynamic_hop = [['+-', hop, J, args_J], ['-+', hop, J, args_J]] inter_nn = [[0.5, i, i] for i in range(L)] inter_n = [[-0.5, i] for i in range(L)] dynamic_inter = [['nn', inter_nn, U, args_U], ['n', inter_n, U, args_U]] dynamic = dynamic_inter + dynamic_hop pot_n = [[mu, i] for i in range(L)] static = [['n', pot_n]] H = hamiltonian(static, dynamic, basis=basis, dtype=np.float64) return H, T
model_test = bh1d.BH1D(**dico_test) model_test.control_fun = model.control_fun optim_params = res['params'] res_test = model_test(optim_params) #plot func optimal func_used = model_test.control_fun import numpy as np x_to_plot = np.linspace(-0.2, T + 0.1, 500) func_used.plot_function(x_to_plot) ## Benchmarking results vs Linear ow = pFunc_base.OwriterYWrap(input_min=[-100, T], input_max=[0, 100 + T], output_ow=[0, 1]) linear = ow * pFunc_base.LinearFunc(bias=0, w=1 / T) dico_test_linear = copy.copy(dico_test) dico_test_linear['control_obj'] = linear model_test_linear = bh1d.BH1D(**dico_test_linear) res_test_linear = model_test_linear([]) ## Looking at convergenge if (optim_type == 'BO2'): import matplotlib.pylab as plt def dist(x, y): d = np.squeeze(x) - np.squeeze(y) return np.dot(d, d) def get_dist_successive(X, n_ev=None): distance = [dist(x_n, X[n - 1]) for n, x_n in enumerate(X[1:])]
'flag_intermediate': False, 'setup': '1', 'state_init': 'GS_i', 'state_tgt': 'GS_inf', 'fom': fom, 'fom_print': True, 'track_learning': True, 'ctl_shortcut': 'owbds01_pwl15', 'kblock': 0, 'pblock': 1 } ow = pFunc_base.OwriterYWrap(input_min=[-np.inf, T_long], input_max=[0, np.inf], output_ow=[0, 1]) linear = ow * pFunc_base.LinearFunc(bias=0, w=1 / T_long) dico_linear = copy.copy(dico_simul) dico_linear['control_obj'] = linear dico_linear['T'] = T_long model_linear = bh1d.BH1D(**dico_linear) res_test_linear = model_linear([], trunc_res=False) state_tmp = model_linear.EvolutionPopAdiab(nb_ev=2) model_linear.plot_pop_adiab(plot_gap=True) optim_args = { 'algo': 'BO2', 'maxiter': 100, 'num_cores': 4, 'init_obj': 75, 'exploit_steps': 49, 'acq': 'EI',
else: raise NotImplementedError() ### ======================= ### # TESTING ### ======================= ### if (__name__ == '__main__'): BH1D.info() # Create a 1D BH chain linearly driven # evolve the GS of H(t=0) to T # observe psi(T) avg(Var(n_i)) F(psi(T), GS_MI) etc.. T = 3 linear_ramp = pf.LinearFunc(w=1 / T, bias=0) fom_name = [ 'f2t2:neg_fluence:0.0001_smooth:0.0005', 'f2t2', 'varN:sqrt', 'fluence', 'smooth', 'varN' ] fom_name_last = ['last:' + f for f in fom_name] dico_simul = { 'control_obj': linear_ramp, 'L': 6, 'Nb': 6, 'mu': 0, 'T': T, 'dt': 0.01, 'flag_intermediate': False, 'setup': '1',
'fom_print': True, 'track_learning': True, 'control_obj': func } model = bh1d.BH1D(**dico_simul) optim_params = func.theta res_model = model(optim_params, trunc_res=False) #plot func optimal x_to_plot = np.linspace(-0.2, T + 0.1, 500) #Benchmarking Linear ow = pFunc_base.OwriterYWrap(input_min=[-100, T], input_max=[0, 100 + T], output_ow=[1, 0]) linear = ow * pFunc_base.LinearFunc(bias=1, w=-1 / T) dico_linear = copy.copy(dico_simul) dico_linear['control_obj'] = linear model_test_linear = bh1d.BH1D(**dico_linear) res_linear = model_test_linear([], trunc_res=False) print("MI->SF: with optimized control: {}".format(res_model)) print("MI->SF: with linear ramp: {}".format(res_linear)) func.plot_function(x_to_plot) linear.plot_function(x_to_plot) basis = model._ss optim_state_t = model.EvolutionPopAdiab(nb_ev=basis.Ns) plot_pop_adiab(model, save_fig='MI2SF_adiabpop.pdf')