'mu': 0, 'T': T, 'dt': 0.01, 'flag_intermediate': False, 'setup': '1', 'state_init': 'GS_i', 'state_tgt': 'GS_inf', 'fom': fom_GS, 'fom_print': True, 'track_learning': True, 'ctl_shortcut': 'owbds01_pwl15', 'kblock': 0, 'pblock': 1 } dico_GS = learner1DBH._process_controler(dico_GS) dico_GS['control_obj'] = learner1DBH._build_control_from_string( dico_GS['control_obj'], None, context_dico=dico_GS) model_GS = bh1d.BH1D(**dico_GS) optim_GS = Learner.learner_Opt(model=model_GS, **optim_args) res_GS = optim_GS(track_learning=True) model_GS.control_fun.plot_function(np.arange(-0.01, T + 0.01, 0.01)) state_tmp = model_GS.EvolutionPopAdiab(nb_ev=2) model_GS.plot_pop_adiab(plot_gap=True) #============================================================================== # Try to reach ES at the end #============================================================================== fom_firstE = ['projSS:neg_fluence:0.0001_smooth:0.05'] dico_firstE = {
dico_simul = { 'L': 5, 'Nb': 5, 'mu': 0, 'T': T, 'dt': 0.01, 'flag_intermediate': False, 'setup': '1', 'state_init': 'GS_i', 'state_tgt': 'GS_inf', 'fom': fom, 'fom_print': True, 'track_learning': True, 'ctl_shortcut': 'owbds01r_pwc10' } dico_simul = learner1DBH._process_controler(dico_simul) dico_simul['control_obj'] = learner1DBH._build_control_from_string( dico_simul['control_obj'], None, context_dico=dico_simul) model = bh1d.BH1D(**dico_simul) optim_main = { 'algo': 'BO2', 'maxiter': 50, 'num_cores': 4, 'init_obj': 30, 'exploit_steps': 30, 'acq': 'EI', 'optim_num_anchor': 15, 'optim_num_samples': 10000, 'ARD': False, 'model_type': 'GP',
'L': L, 'Nb': L, 'mu': 0, 'T': T, 'dt': 0.01, 'flag_intermediate': False, 'setup': '1', 'state_init': 'GS_i', 'state_tgt': 'GS_inf', 'fom': fom, 'sps': sps, 'fom_print': True, 'track_learning': True, 'ctl_shortcut': 'owbds01_linear' } dico_model = learner1DBH._process_controler(dico_model) dico_model['control_obj'] = learner1DBH._build_control_from_string( dico_model['control_obj'], None, context_dico=dico_model) ### Run it model = bh1d.BH1D(**dico_model) res = model([], trunc_res=False) basis = model._ss state_t = model.EvolutionPopAdiab(nb_ev=basis.Ns) ### Get data t = model.t_array st = state_t en = model.adiab_en cf = model.adiab_cf EV = model.adiab_evect
dico_linear['control_obj'] = linear dico_linear['T'] = T_long model_linear = bh1d.BH1D(**dico_linear) model_linear._ss.Ns res_test_linear = model_linear([], trunc_res=False) min_gap = model_linear.FindMinDelta() print(min_gap) state_tmp = model_linear.EvolutionPopAdiab(nb_ev=10) model_linear.plot_pop_adiab(plot_gap=True) #plot_pop_adiab(model_linear) #============================================================================== # Try to reach GS at the end #============================================================================== dico_simul = learner1DBH._process_controler(dico_simul) dico_simul['control_obj'] = learner1DBH._build_control_from_string( dico_simul['control_obj'], None, context_dico=dico_simul) model = bh1d.BH1D(**dico_simul) optim_args = { 'algo': 'BO2', 'maxiter': 100, 'num_cores': 4, 'init_obj': 75, 'exploit_steps': 49, 'acq': 'EI', 'optim_num_anchor': 25, 'optim_num_samples': 10000 } optim = Learner.learner_Opt(model=model, **optim_args) resBO2 = optim(track_learning=True)
import sys sys.path.append("../../../QuantumSimulation") from QuantumSimulation.ToyModels.BH.learn_1DBH import learner1DBH from QuantumSimulation.ToyModels.BH import BH1D as bh1d from QuantumSimulation.Utility.Optim import Learner, pFunc_base import copy optim_type = 'BO2' # Create a model fom = ['f2t2:neg_fluence:0.0001_smooth:0.05'] T=4.889428431607287 dico_simul = {'L':5, 'Nb':5, 'mu':0, 'sps':5, 'T':T, 'dt':0.01, 'flag_intermediate':False, 'setup':'1', 'state_init':'GS_i', 'state_tgt':'GS_inf', 'fom':fom, 'fom_print':True, 'track_learning': True, 'ctl_shortcut':'owbds01r_pwlr15'} dico_simul = learner1DBH._process_controler(dico_simul) dico_simul['control_obj'] = learner1DBH._build_control_from_string( dico_simul['control_obj'], None, context_dico = dico_simul) model = bh1d.BH1D(**dico_simul) #============================================================================== # ***OPTIMIZATION*** #============================================================================== optim_args = {'algo': 'BO2', 'maxiter':500, 'num_cores':4, 'init_obj':75, 'exploit_steps':49, 'acq':'EI', 'optim_num_anchor':25, 'optim_num_samples':10000} optim = Learner.learner_Opt(model = model, **optim_args) resBO2 = optim(track_learning=True) resBO2['last_func'] = model.control_fun res = resBO2 print(res.keys())