def _optimize_pulse(system): """ Unpack the `system` record type, optimise the result and assert that it succeeded. """ result = cpo.optimize_pulse(system.system, system.controls, system.initial, system.target, **system.kwargs) error = " ".join(["Infidelity: {:7.4e}".format(result.fid_err), "reason:", result.termination_reason]) assert result.goal_achieved, error return result
def test_symplectic(self): """ Optimise pulse for coupled oscillators with Symplectic dynamics assert that fidelity error is below threshold """ g1 = 1.0 g2 = 0.2 A0 = Qobj(np.array([[1, 0, g1, 0], [0, 1, 0, g2], [g1, 0, 1, 0], [0, g2, 0, 1]])) A_rot = Qobj(np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] ])) A_sqz = Qobj(0.4*np.array([ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] ])) A_c = [A_rot, A_sqz] n_ctrls = len(A_c) initial = identity(4) A_targ = Qobj(np.array([ [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0] ])) Omg = Qobj(sympl.calc_omega(2)) S_targ = (-A_targ*Omg*np.pi/2.0).expm() n_ts = 20 evo_time = 10 result = cpo.optimize_pulse(A0, A_c, initial, S_targ, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, dyn_type='SYMPL', init_pulse_type='ZERO', gen_stats=True) assert_(result.goal_achieved, msg="Symplectic goal not achieved") assert_almost_equal(result.fid_err, 0.0, decimal=2, err_msg="Symplectic infidelity too high") # Repeat with Qobj integration resultq = cpo.optimize_pulse(A0, A_c, initial, S_targ, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, dyn_type='SYMPL', init_pulse_type='ZERO', dyn_params={'oper_dtype':Qobj}, gen_stats=True) assert_(result.goal_achieved, msg="Symplectic goal not achieved " "(Qobj integration)") # Check same result is achieved using the create objects method optim = cpo.create_pulse_optimizer(A0, list(A_c), initial, S_targ, n_ts, evo_time, fid_err_targ=1e-3, dyn_type='SYMPL', init_pulse_type='ZERO', gen_stats=True) dyn = optim.dynamics p_gen = optim.pulse_generator init_amps = np.zeros([n_ts, n_ctrls]) for j in range(n_ctrls): init_amps[:, j] = p_gen.gen_pulse() dyn.initialize_controls(init_amps) # Check the exact gradient func = optim.fid_err_func_wrapper grad = optim.fid_err_grad_wrapper x0 = dyn.ctrl_amps.flatten() grad_diff = check_grad(func, grad, x0) assert_almost_equal(grad_diff, 0.0, decimal=5, err_msg="Frechet gradient outside tolerance " "(SYMPL)") result2 = optim.run_optimization() assert_almost_equal(result.fid_err, result2.fid_err, decimal=6, err_msg="Direct and indirect methods produce " "different results for Symplectic")
def test_07_symplectic(self): """ control.pulseoptim: coupled oscillators (symplectic dynamics) assert that fidelity error is below threshold """ g1 = 1.0 g2 = 0.2 A0 = Qobj( np.array([[1, 0, g1, 0], [0, 1, 0, g2], [g1, 0, 1, 0], [0, g2, 0, 1]])) A_rot = Qobj( np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])) A_sqz = Qobj(0.4 * np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])) A_c = [A_rot, A_sqz] n_ctrls = len(A_c) initial = identity(4) A_targ = Qobj( np.array([[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]])) Omg = Qobj(sympl.calc_omega(2)) S_targ = (-A_targ * Omg * np.pi / 2.0).expm() n_ts = 20 evo_time = 10 result = cpo.optimize_pulse(A0, A_c, initial, S_targ, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, dyn_type='SYMPL', init_pulse_type='ZERO', gen_stats=True) assert_(result.goal_achieved, msg="Symplectic goal not achieved. " "Terminated due to: {}, with infidelity: {}".format( result.termination_reason, result.fid_err)) assert_almost_equal(result.fid_err, 0.0, decimal=2, err_msg="Symplectic infidelity too high") # Repeat with Qobj integration resultq = cpo.optimize_pulse(A0, A_c, initial, S_targ, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, dyn_type='SYMPL', init_pulse_type='ZERO', dyn_params={'oper_dtype': Qobj}, gen_stats=True) assert_(resultq.goal_achieved, msg="Symplectic goal not achieved " "(Qobj integration). " "Terminated due to: {}, with infidelity: {}".format( resultq.termination_reason, result.fid_err)) # Check same result is achieved using the create objects method optim = cpo.create_pulse_optimizer(A0, list(A_c), initial, S_targ, n_ts, evo_time, fid_err_targ=1e-3, dyn_type='SYMPL', init_pulse_type='ZERO', gen_stats=True) dyn = optim.dynamics p_gen = optim.pulse_generator init_amps = np.zeros([n_ts, n_ctrls]) for j in range(n_ctrls): init_amps[:, j] = p_gen.gen_pulse() dyn.initialize_controls(init_amps) # Check the exact gradient func = optim.fid_err_func_wrapper grad = optim.fid_err_grad_wrapper x0 = dyn.ctrl_amps.flatten() grad_diff = check_grad(func, grad, x0) assert_almost_equal(grad_diff, 0.0, decimal=5, err_msg="Frechet gradient outside tolerance " "(SYMPL)") result2 = optim.run_optimization() assert_almost_equal(result.fid_err, result2.fid_err, decimal=6, err_msg="Direct and indirect methods produce " "different results for Symplectic")
def test_lindbladian(self): """ Optimise pulse for amplitude damping channel with Lindbladian dyn assert that fidelity error is below threshold """ Sx = sigmax() Sz = sigmaz() Si = identity(2) Sd = Qobj(np.array([[0, 1], [0, 0]])) Sm = Qobj(np.array([[0, 0], [1, 0]])) Sd_m = Qobj(np.array([[1, 0], [0, 0]])) gamma = 0.1 L0_Ad = gamma*(2*tensor(Sm, Sd.trans()) - (tensor(Sd_m, Si) + tensor(Si, Sd_m.trans()))) LC_x = -1j*(tensor(Sx, Si) - tensor(Si, Sx)) LC_z = -1j*(tensor(Sz, Si) - tensor(Si, Sz)) drift = L0_Ad ctrls = [LC_z, LC_x] n_ctrls = len(ctrls) initial = tensor(Si, Si) had_gate = hadamard_transform(1) target_DP = tensor(had_gate, had_gate) n_ts = 10 evo_time = 5 result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, init_pulse_type='LIN', gen_stats=True) assert_(result.fid_err < 0.1, msg="Fidelity higher than expected") # Repeat with Qobj propagation result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, init_pulse_type='LIN', dyn_params={'oper_dtype':Qobj}, gen_stats=True) assert_(result.fid_err < 0.1, msg="Fidelity higher than expected (Qobj propagation)") # Check same result is achieved using the create objects method optim = cpo.create_pulse_optimizer(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=1e-3, init_pulse_type='LIN', gen_stats=True) dyn = optim.dynamics p_gen = optim.pulse_generator init_amps = np.zeros([n_ts, n_ctrls]) for j in range(n_ctrls): init_amps[:, j] = p_gen.gen_pulse() dyn.initialize_controls(init_amps) # Check the exact gradient func = optim.fid_err_func_wrapper grad = optim.fid_err_grad_wrapper x0 = dyn.ctrl_amps.flatten() grad_diff = check_grad(func, grad, x0) assert_almost_equal(grad_diff, 0.0, decimal=7, err_msg="Frechet gradient outside tolerance") result2 = optim.run_optimization() assert_almost_equal(result.fid_err, result2.fid_err, decimal=3, err_msg="Direct and indirect methods produce " "different results for ADC")
def main(): H0 = setH0() Hq_I = q + qd Hq_Q = 1j*(q - qd) H = [H0, [Hq_I, qI], [Hq_Q, qQ]] psitarg = settarg() # Time-Independent hamiltonian H0 H_d = H0 # Time-Dependent hamiltonian of the drives, ~~ H = H_d + u1(t)*Hq_I + u2(t)*Hq_Q ~~ H_c = [Hq_I, Hq_Q] # Fidelity error target fid_err_targ = 1e-10 # Maximum iterations for the optisation algorithm max_iter = 200 # Maximum (elapsed) time allowed in seconds max_wall_time = 120 # Minimum gradient (sum of gradients squared) # as this tends to 0 -> local minima has been found min_grad = 1e-20 # Type of initial guess of the drives p_type = 'SAW' # Save the results to a text file f_ext = "{}_n_ts{}_ptype{}.txt".format("Hi", Ns, p_type) # The function that does the GRAPE algorithm result = cpo.optimize_pulse(H_d, H_c, psi0, psitarg, Ns, T, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=p_type, log_level=log_level, gen_stats=True) # Plot fig1 = plt.figure() # Initial ax1 = fig1.add_subplot(2, 1, 1) ax1.set_title("Initial control amps") ax1.set_xlabel("Time") ax1.set_ylabel("Control amplitude") ax1.step(result.time, np.hstack((result.initial_amps[:, 0], result.initial_amps[-1, 0])), where='post') ax1.step(result.time, np.hstack((result.initial_amps[:, 1], result.initial_amps[-1, 0])), where='post') # Final ax2 = fig1.add_subplot(2, 1, 2) ax2.set_title("Optimised Control Sequences") ax2.set_xlabel("Time") ax2.set_ylabel("Control amplitude") ax2.step(result.time, np.hstack((result.final_amps[:, 0], result.final_amps[-1, 0])), where='post') ax2.step(result.time, np.hstack((result.final_amps[:, 1], result.final_amps[-1, 0])), where='post') plt.tight_layout() plt.show() # Reports the results result.stats.report() print("Final evolution\n{}\n".format(result.evo_full_final)) print("********* Summary *****************") print("Final fidelity error {}".format(result.fid_err)) print("Final gradient normal {}".format(result.grad_norm_final)) print("Terminated due to {}".format(result.termination_reason)) print("Number of iterations {}".format(result.num_iter)) # Displays what the Qutip built-in grape algorithm thinks he managed to achive with the drives b = Bloch() b.add_states(result.evo_full_final) b.show() # Set results into variables QI = result.final_amps[:, 0] QQ = result.final_amps[:, 1] # Calls our own simulation of the qubit with the given drives from the built in GRAPE algorithm fidelitytarg(H, psi0, psitarg, QI, QQ)
def test_06_lindbladian(self): """ control.pulseoptim: amplitude damping channel Lindbladian dynamics assert that fidelity error is below threshold """ Sx = sigmax() Sz = sigmaz() Si = identity(2) Sd = Qobj(np.array([[0, 1], [0, 0]])) Sm = Qobj(np.array([[0, 0], [1, 0]])) Sd_m = Qobj(np.array([[1, 0], [0, 0]])) gamma = 0.1 L0_Ad = gamma * (2 * tensor(Sm, Sd.trans()) - (tensor(Sd_m, Si) + tensor(Si, Sd_m.trans()))) LC_x = -1j * (tensor(Sx, Si) - tensor(Si, Sx)) LC_z = -1j * (tensor(Sz, Si) - tensor(Si, Sz)) drift = L0_Ad ctrls = [LC_z, LC_x] n_ctrls = len(ctrls) initial = tensor(Si, Si) had_gate = hadamard_transform(1) target_DP = tensor(had_gate, had_gate) n_ts = 10 evo_time = 5 result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, init_pulse_type='LIN', gen_stats=True) assert_(result.fid_err < 0.1, msg="Fidelity higher than expected") # Repeat with Qobj propagation result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=1e-3, max_iter=200, init_pulse_type='LIN', dyn_params={'oper_dtype': Qobj}, gen_stats=True) assert_(result.fid_err < 0.1, msg="Fidelity higher than expected (Qobj propagation)") # Check same result is achieved using the create objects method optim = cpo.create_pulse_optimizer(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=1e-3, init_pulse_type='LIN', gen_stats=True) dyn = optim.dynamics p_gen = optim.pulse_generator init_amps = np.zeros([n_ts, n_ctrls]) for j in range(n_ctrls): init_amps[:, j] = p_gen.gen_pulse() dyn.initialize_controls(init_amps) # Check the exact gradient func = optim.fid_err_func_wrapper grad = optim.fid_err_grad_wrapper x0 = dyn.ctrl_amps.flatten() grad_diff = check_grad(func, grad, x0) assert_almost_equal(grad_diff, 0.0, decimal=7, err_msg="Frechet gradient outside tolerance") result2 = optim.run_optimization() assert_almost_equal(result.fid_err, result2.fid_err, decimal=3, err_msg="Direct and indirect methods produce " "different results for ADC")
# dyn_type='GEN_MAT' # This means that matrices that describe the dynamics are assumed to be # general, i.e. the propagator can be calculated using: # expm(combined_dynamics*dt) # prop_type='FRECHET' # and the propagators and their gradients will be calculated using the # Frechet method, i.e. an exact gradent # fid_type='TRACEDIFF' # and that the fidelity error, i.e. distance from the target, is give # by the trace of the difference between the target and evolved operators result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, amp_lbound=-10.0, amp_ubound=10.0, # dyn_params={'oper_dtype':Qobj}, # prop_type='AUG_MAT', # fid_type='UNIT', out_file_ext=f_ext, init_pulse_type=p_type, log_level=log_level, gen_stats=True) print("***********************************") print("\nOptimising complete. Stats follow:") result.stats.report() print("Final evolution\n{}\n".format(result.evo_full_final)) print("********* Summary *****************") print("Initial fidelity error {}".format(result.initial_fid_err)) print("Final fidelity error {}".format(result.fid_err)) print("Terminated due to {}".format(result.termination_reason)) print("Number of iterations {}".format(result.num_iter)) #print("wall time: ", result.wall_time
# and the propagators and their gradients will be calculated using the # Frechet method, i.e. an exact gradent # fid_type='TRACEDIFF' # and that the fidelity error, i.e. distance from the target, is give # by the trace of the difference between the target and evolved operators result = cpo.optimize_pulse( drift, ctrls, initial, target_DP, n_ts, evo_time, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, amp_lbound=-10.0, amp_ubound=10.0, # dyn_params={'oper_dtype':Qobj}, # prop_type='AUG_MAT', # fid_type='UNIT', out_file_ext=f_ext, init_pulse_type=p_type, log_level=log_level, gen_stats=True) print("***********************************") print("\nOptimising complete. Stats follow:") result.stats.report() print("Final evolution\n{}\n".format(result.evo_full_final)) print("********* Summary *****************")
# Fidelity error target fid_err_targ = 1e-3 # Maximum iterations for the optisation algorithm max_iter = 3500 # Maximum (elapsed) time allowed in seconds max_wall_time = 1600 # Minimum gradient (sum of gradients squared) # as this tends to 0 -> local minima has been found min_grad = 1e-20 p_type = 'CUSTOM' for evo_time in evo_times: n_ts = int(float(evo_time/0.222)) result = cpo.optimize_pulse(drift, ctrls, E0, E_targ, n_ts, evo_time, amp_lbound=0,amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=None, init_pulse_type=p_type, log_level=log_level, gen_stats=True) result.stats.report() print("Final evolution\n{}\n".format(result.evo_full_final)) print("********* Summary *****************") print("Initial fidelity error {}".format(result.initial_fid_err)) print("Final fidelity error {}".format(result.fid_err)) print("Final gradient normal {}".format(result.grad_norm_final)) print("Terminated due to {}".format(result.termination_reason)) print("Number of iterations {}".format(result.num_iter)) print("Completed in {} HH:MM:SS.US".format(datetime.timedelta(seconds=result.wall_time))) print("results for evolution time{}".format(evo_time))
def testQutipGrape(): #sys.stdout = sys.stdout = open('error file.txt', 'w') H_d = Qobj(0 * np.kron(np.kron(I, I), I)) H_c, labels = get_hks(3) U_0 = identity(8) #U_targ = hadamard_transform(1) #U_targ = Qobj([[np.cos(-10000), 1j*np.sin(-10000)], [1j*np.sin(-10000), np.cos(-10000)]]) w = np.exp(1j * (math.pi / 4)) U_targ = Qobj((1 / np.sqrt(8)) * np.array([[1, 1, 1, 1, 1, 1, 1, 1], [ 1, w, np.power(w, 2), np.power(w, 3), np.power(w, 4), np.power(w, 5), np.power(w, 6), np.power(w, 7) ], [ 1, np.power(w, 2), np.power(w, 4), np.power(w, 6), 1, np.power(w, 2), np.power(w, 4), np.power(w, 6) ], [ 1, np.power(w, 3), np.power(w, 6), w, np.power(w, 4), np.power(w, 7), np.power(w, 2), np.power(w, 5) ], [ 1, np.power(w, 4), 1, np.power(w, 4), 1, np.power(w, 4), 1, np.power(w, 4) ], [ 1, np.power(w, 5), np.power(w, 2), np.power(w, 7), np.power(w, 4), w, np.power(w, 6), np.power(w, 3) ], [ 1, np.power(w, 6), np.power(w, 4), np.power(w, 2), 1, np.power(w, 6), np.power(w, 4), np.power(w, 2) ], [ 1, np.power(w, 7), np.power(w, 6), np.power(w, 5), np.power(w, 4), np.power(w, 3), np.power(w, 2), w ]], dtype=complex)) n_ts = 1000 tTotal = 1000 amps_low = None amps_high = None tolerence = 1e-1 max_iterations = 1e500 max_runtime = 1e100 init_options = ["RND", "LIN", "ZERO", "SINE", "SQUARE", "TRIANGLE", "SAW"] init_type = init_options[3] result = cpo.optimize_pulse(H_d, H_c, U_0, U_targ, num_tslots=n_ts, evo_time=tTotal, amp_lbound=amps_low, amp_ubound=amps_high, fid_err_targ=tolerence, max_iter=max_iterations, max_wall_time=max_runtime, init_pulse_type=init_type, gen_stats=True, log_level=qutip.logging_utils.DEBUG) amps = [] for i in range(len(labels)): amps.append(result.final_amps[:, i]) print(len(amps)) print(len(amps[0])) printResults(amps, n_ts, tTotal / (1.0 * n_ts), labels)
def generate_training_sample(unit_nb, ctrl_init, initial, params, n_ts, evo_time, noise_name, model_dim): f_ext = None path_template = "training/dim_{}/mtx/idx_{}" fid_err_targ = 1e-12 max_iter = 200000 max_wall_time = 5 * 60 min_grad = 1e-20 #target_DP = 0 if model_dim == "2x1": current_path = (path_template).format(model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = tensor(rand_unitary(2), identity(2)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) elif model_dim == "2" or model_dim == "4": current_path = (path_template).format(model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = rand_unitary(int(model_dim)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) if noise_name == 'id_aSxbSy_spinChain_2x1': ctrls, drift = id_aSxbSy_spinChain_2x1(params) elif noise_name == "aSxbSy_id_spinChain_dim_2x1": ctrls, drift = aSxbSy_id_spinChain_dim_2x1(params) ctrls = [Qobj(ctrls[i]) for i in range(len(ctrls))] drift = Qobj(drift) result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, amp_lbound=-1, amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=ctrl_init, log_level=log_level, gen_stats=True) print("Sample number ", unit_nb, " have error ", result.fid_err) np.savez("training/dim_{}/NCP_data/idx_{}".format(model_dim, unit_nb), result.final_amps)
# Frechet method, i.e. an exact gradent # fid_type='TRACEDIFF' # and that the fidelity error, i.e. distance from the target, is give # by the trace of the difference between the target and evolved operators result_s = cpo.optimize_pulse( drift, ctrls, rho0_vec, rho_targ_vec, n_ts, evo_time, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, amp_lbound=-0.5, amp_ubound=0.5, # dyn_params={'oper_dtype':Qobj}, # prop_type='AUG_MAT', # fid_type='UNIT', accuracy_factor=1, out_file_ext=f_ext, init_pulse_type=p_type, log_level=log_level, gen_stats=True) print("***********************************") print("\nOptimising complete.") if REPORT_STATS:
def generate_training_sample( unit_nb, params, argv_number ): # ctrl_init, noise_params, n_ts,evo_time,noise_name, model_dim, supeop_size): f_ext = None path_template = "training/dim_{}/mtx/idx_{}" fid_err_targ = 1e-12 max_iter = 200000 max_wall_time = 5 * 60 min_grad = 1e-20 #target_DP = 0 if params.model_dim == "2x1": current_path = (path_template).format(params.model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = tensor(rand_unitary(2), identity(2)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) elif params.model_dim == "2" or model_dim == "4": current_path = (path_template).format(params.model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = rand_unitary(int(dim)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) if params.noise_name == 'id_aSxbSy_spinChain_2x1': ctrls, drift = id_aSxbSy_spinChain_2x1(params.noise_params) elif params.noise_name == "aSxbSy_id_spinChain_dim_2x1": ctrls, drift = aSxbSy_id_spinChain_dim_2x1(params.noise_params) elif params.noise_name == "spinChainDrift_spinChain_dim_2x1": ctrls, drift = spinChainDrift_spinChain_dim_2x1(params.noise_params) elif params.noise_name == "Sz_id_and_ketbra01_id_Lindbald_spinChain_drift": ctrls, drift = Sz_id_and_ketbra01_id_Lindbald_spinChain_drift( params.noise_params) elif params.noise_name == "ketbra01_id_Lindbald_spinChain_drift": ctrls, drift = ketbra01_id_Lindbald_spinChain_drift( params.noise_params) elif params.noise_name == "Sz_id_and_ketbra01_id_and_reverse_Lindbald_spinChain_drift": ctrls, drift = Sz_id_and_ketbra01_id_and_reverse_Lindbald_spinChain_drift( params.noise_params) elif params.noise_name == "Sz_id_id_Sz_Lindbald_spinChain_drift": ctrls, drift = Sz_id_id_Sz_Lindbald_spinChain_drift( params.noise_params) ctrls = [Qobj(ctrls[i]) for i in range(len(ctrls))] drift = Qobj(drift) initial = identity(params.supeop_size) if argv_number == 0.: result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, params.n_ts, params.evo_time, amp_lbound=-1, amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=params.ctrl_init, log_level=log_level, gen_stats=True) print("Sample number ", unit_nb, " have error ", result.fid_err) np.savez( "training/dim_{}/NCP_data_unbounded/idx_{}".format( params.model_dim, unit_nb), result.final_amps) else: ampsy = np.load("training/dim_{}/NCP_data/idx_{}.npz".format( params.model_dim, unit_nb))['arr_0'] result = my_opt(drift, ctrls, initial, target_DP, params.n_ts, params.evo_time, amp_lbound=-1, amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=params.ctrl_init, log_level=log_level, gen_stats=True, init_pulse=ampsy) print("Sample number ", unit_nb, " have error ", 1 - result.fid_err) np.savez( "training/dim_{}/DCP_data/DCP_config{}/idx_{}".format( params.model_dim, argv_number, unit_nb), result.final_amps)