def probabilities(self, out_state, sort=False): state_copy = out_state.full().copy() length = state_copy.shape[0]-len(self.state.indistinguishable_states) new_state = np.zeros((length,1),dtype=np.complex128) new_dict = {} for i,(label,dim) in enumerate(self.state.reduced_label_map.iteritems()): reverse_label = "_".join(label.split('_')[::-1]) if (label, reverse_label) in self.state.indistinguishable_states.keys(): if (label, reverse_label) not in new_dict.keys(): new_key = (label, reverse_label) new_dict[new_key] = np.complex128(0) dims_to_add = self.state.indistinguishable_states[(label, reverse_label)] for dim in dims_to_add: new_dict[new_key] += state_copy[dim] else: if (label) not in new_dict.keys(): if (reverse_label) not in new_dict.keys(): if (label, reverse_label) not in self.state.indistinguishable_states.keys(): if (reverse_label, label) not in self.state.indistinguishable_states.keys(): new_dict[(label)] = state_copy[dim] new_state = np.array(new_dict.values()) new_qstate = Qobj(new_state).unit() prob_array = new_qstate.conj().full()*new_qstate.full() probs = {lab: prob for lab,prob in zip(new_dict.keys(), prob_array)} probs = {lab: prob.real.tolist()[0] for lab,prob in probs.iteritems() } # Check that probabilities add to one np.testing.assert_almost_equal(sum(probs.values()),1.0,decimal=5) # Return list if sort=True, else return dict if sort: return sorted(probs.items(), key=lambda x:x[1]) else: return probs
def generate_training_sample(unit_nb, ctrl_init, initial, params, n_ts, evo_time, noise_name, model_dim): f_ext = None path_template = "training/dim_{}/mtx/idx_{}" fid_err_targ = 1e-12 max_iter = 200000 max_wall_time = 5 * 60 min_grad = 1e-20 #target_DP = 0 if model_dim == "2x1": current_path = (path_template).format(model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = tensor(rand_unitary(2), identity(2)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) elif model_dim == "2" or model_dim == "4": current_path = (path_template).format(model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = rand_unitary(int(model_dim)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) if noise_name == 'id_aSxbSy_spinChain_2x1': ctrls, drift = id_aSxbSy_spinChain_2x1(params) elif noise_name == "aSxbSy_id_spinChain_dim_2x1": ctrls, drift = aSxbSy_id_spinChain_dim_2x1(params) ctrls = [Qobj(ctrls[i]) for i in range(len(ctrls))] drift = Qobj(drift) result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, n_ts, evo_time, amp_lbound=-1, amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=ctrl_init, log_level=log_level, gen_stats=True) print("Sample number ", unit_nb, " have error ", result.fid_err) np.savez("training/dim_{}/NCP_data/idx_{}".format(model_dim, unit_nb), result.final_amps)
def generate_training_sample( unit_nb, params, argv_number ): # ctrl_init, noise_params, n_ts,evo_time,noise_name, model_dim, supeop_size): f_ext = None path_template = "training/dim_{}/mtx/idx_{}" fid_err_targ = 1e-12 max_iter = 200000 max_wall_time = 5 * 60 min_grad = 1e-20 #target_DP = 0 if params.model_dim == "2x1": current_path = (path_template).format(params.model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = tensor(rand_unitary(2), identity(2)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) elif params.model_dim == "2" or model_dim == "4": current_path = (path_template).format(params.model_dim, unit_nb) if pathlib.Path(current_path + ".npz").exists(): rnd_unit = Qobj(np.load(current_path + ".npz")["arr_0"]) rnd_unitC = rnd_unit.conj() target_DP = tensor(rnd_unit, rnd_unitC) else: rnd_unit = rand_unitary(int(dim)) rnd_unitC = rnd_unit.conj() np.savez(current_path, rnd_unit.full()) target_DP = tensor(rnd_unit, rnd_unitC) if params.noise_name == 'id_aSxbSy_spinChain_2x1': ctrls, drift = id_aSxbSy_spinChain_2x1(params.noise_params) elif params.noise_name == "aSxbSy_id_spinChain_dim_2x1": ctrls, drift = aSxbSy_id_spinChain_dim_2x1(params.noise_params) elif params.noise_name == "spinChainDrift_spinChain_dim_2x1": ctrls, drift = spinChainDrift_spinChain_dim_2x1(params.noise_params) elif params.noise_name == "Sz_id_and_ketbra01_id_Lindbald_spinChain_drift": ctrls, drift = Sz_id_and_ketbra01_id_Lindbald_spinChain_drift( params.noise_params) elif params.noise_name == "ketbra01_id_Lindbald_spinChain_drift": ctrls, drift = ketbra01_id_Lindbald_spinChain_drift( params.noise_params) elif params.noise_name == "Sz_id_and_ketbra01_id_and_reverse_Lindbald_spinChain_drift": ctrls, drift = Sz_id_and_ketbra01_id_and_reverse_Lindbald_spinChain_drift( params.noise_params) elif params.noise_name == "Sz_id_id_Sz_Lindbald_spinChain_drift": ctrls, drift = Sz_id_id_Sz_Lindbald_spinChain_drift( params.noise_params) ctrls = [Qobj(ctrls[i]) for i in range(len(ctrls))] drift = Qobj(drift) initial = identity(params.supeop_size) if argv_number == 0.: result = cpo.optimize_pulse(drift, ctrls, initial, target_DP, params.n_ts, params.evo_time, amp_lbound=-1, amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=params.ctrl_init, log_level=log_level, gen_stats=True) print("Sample number ", unit_nb, " have error ", result.fid_err) np.savez( "training/dim_{}/NCP_data_unbounded/idx_{}".format( params.model_dim, unit_nb), result.final_amps) else: ampsy = np.load("training/dim_{}/NCP_data/idx_{}.npz".format( params.model_dim, unit_nb))['arr_0'] result = my_opt(drift, ctrls, initial, target_DP, params.n_ts, params.evo_time, amp_lbound=-1, amp_ubound=1, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, out_file_ext=f_ext, init_pulse_type=params.ctrl_init, log_level=log_level, gen_stats=True, init_pulse=ampsy) print("Sample number ", unit_nb, " have error ", 1 - result.fid_err) np.savez( "training/dim_{}/DCP_data/DCP_config{}/idx_{}".format( params.model_dim, argv_number, unit_nb), result.final_amps)