def get_inter_vecs(self): # get propagated states at each time step if not self.sys_para.use_inter_vecs: return None state_num = self.sys_para.state_num inter_vecs_mag_squared = [] inter_vecs_real = [] inter_vecs_imag = [] if self.sys_para.is_dressed: v_sorted = sort_ev(self.sys_para.v_c, self.sys_para.dressed_id) ii = 0 inter_vecs = tf.stack(self.tf_inter_vecs).eval() if self.sys_para.save: with H5File(self.sys_para.file_path) as hf: hf.append('inter_vecs_raw_real', np.array(inter_vecs[:, 0:state_num, :])) hf.append('inter_vecs_raw_imag', np.array(inter_vecs[:, state_num:2 * state_num, :])) for inter_vec in inter_vecs: inter_vec_real = (inter_vec[0:state_num, :]) inter_vec_imag = (inter_vec[state_num:2 * state_num, :]) inter_vec_c = inter_vec_real + 1j * inter_vec_imag if self.sys_para.is_dressed: dressed_vec_c = np.dot(np.transpose(v_sorted), inter_vec_c) inter_vec_mag_squared = np.square(np.abs(dressed_vec_c)) inter_vec_real = np.real(dressed_vec_c) inter_vec_imag = np.imag(dressed_vec_c) else: inter_vec_mag_squared = np.square(np.abs(inter_vec_c)) inter_vec_real = np.real(inter_vec_c) inter_vec_imag = np.imag(inter_vec_c) inter_vecs_mag_squared.append(inter_vec_mag_squared) inter_vecs_real.append(inter_vec_real) inter_vecs_imag.append(inter_vec_imag) ii += 1 if self.sys_para.save: with H5File(self.sys_para.file_path) as hf: hf.append('inter_vecs_mag_squared', np.array(inter_vecs_mag_squared)) hf.append('inter_vecs_real', np.array(inter_vecs_real)) hf.append('inter_vecs_imag', np.array(inter_vecs_imag)) return inter_vecs_mag_squared
def init_vectors(self): # initialized vectors used for propagation self.initial_vectors = [] self.initial_vectors_c = [] for state in self.states_concerned_list: if self.state_transfer: self.initial_vector_c = np.array(state) else: if self.is_dressed: self.initial_vector_c = self.v_c[:, get_state_index( state, self.dressed_id )] else: self.initial_vector_c = np.zeros(self.state_num) self.initial_vector_c[state] = 1 self.initial_vectors_c.append(self.initial_vector_c) self.initial_vector = c_to_r_vec(self.initial_vector_c) self.initial_vectors.append(self.initial_vector) if self.save: with H5File(self.file_path) as hf: hf.add('initial_vectors_c', data=np.array(self.initial_vectors_c))
def init_operators(self): # Create operator matrix in numpy array self.ops = [] for op_c in self.ops_c: op = c_to_r_mat(-1j * self.dt * op_c) self.ops.append(op) self.ops_len = len(self.ops) self.H0 = c_to_r_mat(-1j * self.dt * self.H0_c) self.identity_c = np.identity(self.state_num) self.identity = c_to_r_mat(self.identity_c) if self.Taylor_terms is None: self.exps = [] self.scalings = [] if self.state_transfer or self.no_scaling: comparisons = 1 else: comparisons = 6 d = 0 while comparisons > 0: self.exp_terms = self.Choose_exp_terms(d) self.exps.append(self.exp_terms) self.scalings.append(self.scaling) comparisons = comparisons - 1 d = d + 1 self.complexities = np.add(self.exps, self.scalings) a = np.argmin(self.complexities) self.exp_terms = self.exps[a] self.scaling = self.scalings[a] else: self.exp_terms = self.Taylor_terms[0] self.scaling = self.Taylor_terms[1] if self.save: with H5File(self.file_path) as hf: hf.add('taylor_terms', data=self.exp_terms) hf.add('taylor_scaling', data=self.scaling) print("Using " + str(self.exp_terms) + " Taylor terms and " + str(self.scaling) + " Scaling & Squaring terms") i_array = np.eye(2 * self.state_num) op_matrix_I = i_array.tolist() self.H_ops = [] for op in self.ops: self.H_ops.append(op) self.matrix_list = [self.H0] for ii in range(self.ops_len): self.matrix_list = self.matrix_list + [self.H_ops[ii]] self.matrix_list = self.matrix_list + [op_matrix_I] self.matrix_list = np.array(self.matrix_list)
def save_data(self): if self.sys_para.save: self.elapsed = time.time() - self.start_time with H5File(self.sys_para.file_path) as hf: hf.append('error', np.array(self.l)) hf.append('reg_error', np.array(self.rl)) hf.append('uks', np.array(self.Get_uks())) hf.append('iteration', np.array(self.iterations)) hf.append('run_time', np.array(self.elapsed)) hf.append('unitary_scale', np.array(self.metric))
def get_final_state(self, save=True): # get final evolved unitary state M = self.tf_final_state.eval() CMat = self.RtoCMat(M) if self.sys_para.save and save: with H5File(self.sys_para.file_path) as hf: hf.append('final_state', np.array(M)) return CMat
def Grape(H0, Hops, Hnames, U, total_time, steps, states_concerned_list, convergence=None, U0=None, reg_coeffs=None, dressed_info=None, maxA=None, use_gpu=True, sparse_H=True, sparse_U=False, sparse_K=False, draw=None, initial_guess=None, show_plots=True, unitary_error=1e-4, method='Adam', state_transfer=False, no_scaling=False, freq_unit='GHz', file_name=None, save=False, data_path=None, Taylor_terms=None, use_inter_vecs=True): # start time grape_start_time = time.time() # set timing unit used for plotting freq_time_unit_dict = {"GHz": "ns", "MHz": "us", "KHz": "ms", "Hz": "s"} time_unit = freq_time_unit_dict[freq_unit] # make sparse_{H,U,K} False if use_gpu is True, as GPU Sparse Matmul is not supported yet. if use_gpu: sparse_H = False sparse_U = False sparse_K = False file_path = None if save: # saves all the input values if file_name is None: raise ValueError( 'Grape function input: file_name, is not specified.') if data_path is None: raise ValueError( 'Grape function input: data_path, is not specified.') file_num = 0 while (os.path.exists( os.path.join(data_path, str(file_num).zfill(5) + "_" + file_name + ".h5"))): file_num += 1 file_name = str(file_num).zfill(5) + "_" + file_name + ".h5" file_path = os.path.join(data_path, file_name) print("data saved at: " + str(file_path)) with H5File(file_path) as hf: hf.add('H0', data=H0) hf.add('Hops', data=Hops) hf.add('Hnames', data=Hnames) hf.add('U', data=U) hf.add('total_time', data=total_time) hf.add('steps', data=steps) hf.add('states_concerned_list', data=states_concerned_list) hf.add('use_gpu', data=use_gpu) hf.add('sparse_H', data=sparse_H) hf.add('sparse_U', data=sparse_U) hf.add('sparse_K', data=sparse_K) if not maxA is None: hf.add('maxA', data=maxA) if not initial_guess is None: hf.add('initial_guess', data=initial_guess) hf.add('method', method) g1 = hf.create_group('convergence') for k, v in convergence.items(): g1.create_dataset(k, data=v) if not reg_coeffs is None: g2 = hf.create_group('reg_coeffs') for k, v in reg_coeffs.items(): g2.create_dataset(k, data=v) if not dressed_info is None: g3 = hf.create_group('dressed_info') for k, v in dressed_info.items(): g3.create_dataset(k, data=v) if U0 is None: U0 = np.identity(len(H0)) if convergence is None: convergence = { 'rate': 0.01, 'update_step': 100, 'max_iterations': 5000, 'conv_target': 1e-8, 'learning_rate_decay': 2500 } if maxA is None: if initial_guess is None: maxAmp = 4 * np.ones(len(Hops)) else: maxAmp = 1.5 * np.max(np.abs(initial_guess)) * np.ones(len(Hops)) else: maxAmp = maxA # pass in system parameters sys_para = SystemParameters(H0, Hops, Hnames, U, U0, total_time, steps, states_concerned_list, dressed_info, maxAmp, draw, initial_guess, show_plots, unitary_error, state_transfer, no_scaling, reg_coeffs, save, file_path, Taylor_terms, use_gpu, use_inter_vecs, sparse_H, sparse_U, sparse_K) if use_gpu: dev = '/gpu:0' else: dev = '/cpu:0' with tf.device(dev): tfs = TensorflowState(sys_para) # create tensorflow graph graph = tfs.build_graph() conv = Convergence(sys_para, time_unit, convergence) # run the optimization try: SS = run_session(tfs, graph, conv, sys_para, method, show_plots=sys_para.show_plots, use_gpu=use_gpu) # save wall clock time if save: wall_clock_time = time.time() - grape_start_time with H5File(file_path) as hf: hf.add('wall_clock_time', data=np.array(wall_clock_time)) print("data saved at: " + str(file_path)) return SS.uks, SS.Uf except KeyboardInterrupt: # save wall clock time if save: wall_clock_time = time.time() - grape_start_time with H5File(file_path) as hf: hf.add('wall_clock_time', data=np.array(wall_clock_time)) print("data saved at: " + str(file_path)) display.clear_output()