def func(t, b): if not isinstance(t, ADF) and not isinstance(b, ADF): return expm_multiply(t*A, b) t,b = to_auto_diff(t), to_auto_diff(b) if not isinstance(t.x, Number) and len(t) > 1: raise Exception("t must be a scalar") At = t.x * A x = expm_multiply(At, b.x) variables = _get_variables([t,b]) if not variables: return constant(x) Ax = A.dot(x) AAx = A.dot(Ax) lc, qc, cp = _make_derivs_dicts() b_derivs = {} # stores expm_multiply(At, b.d(v)) for i,v in enumerate(variables): b_derivs[v] = expm_multiply(At, b.d(v)) lc[v] = Ax * t.d(v) + b_derivs[v] # replace with A * exp(At) * b.dv b_derivs[v] = A.dot(b_derivs[v]) qc[v] = AAx * t.d(v) * t.d(v) + Ax * t.d2(v) + 2 * t.d(v) * b_derivs[v] + expm_multiply(At, b.d2(v)) if get_order() == 2: for i, v in enumerate(variables): for j,u in enumerate(variables): if i < j: cp[(v,u)] = AAx * t.d(u) * t.d(v) + Ax * t.d2c(u,v) + t.d(u) * b_derivs[v] + t.d(v) * b_derivs[u] + expm_multiply(At, b.d2c(u,v)) return ADF(x, lc, qc, cp)
def get_relevant_nodes(self, pct_heat_threshold): """Return a list of the relevant nodes in the prior. Heat diffusion is applied to the prior network based on initial heat on nodes that are mutated according to patient statistics. """ logger.info('Setting heat for relevant nodes in prior network') heats = np.zeros(len(self.prior_graph)) mut_nodes = {} for gene_name, muts in self.norm_mutations.items(): if muts: hgnc_id = get_hgnc_id(gene_name) node_key = 'HGNC:%s' % hgnc_id mut_nodes[node_key] = muts for idx, node in enumerate(self.prior_graph.nodes()): if node in mut_nodes: heats[idx] = mut_nodes[node] gamma = -0.1 logger.info('Calculating Laplacian matrix') lp_mx = nx.normalized_laplacian_matrix(self.prior_graph, weight='weight') logger.info('Diffusing heat') Df = expm_multiply(gamma * lp_mx, heats) heat_thresh = np.percentile(Df, pct_heat_threshold) logger.info('Filtering to relevant nodes with heat threshold %.2f ' '(%s percentile)' % (heat_thresh, pct_heat_threshold)) # Zip the nodes with their heats and sort node_heats = sorted(list(zip(self.prior_graph.nodes(), Df)), key=lambda x: x[1], reverse=True) relevant_nodes = [n for n, heat in node_heats if heat >= heat_thresh] return relevant_nodes
def propagate_interval(self, initial, tf, Nsteps=None, dt=None, normalize=True): """Propagate an initial probability distribution over a time interval, return time and the probability distribution at each time-step Arguments: initial initial probability density function tf stop time (inclusive) Nsteps number of time-steps (specifiy Nsteps or dt) dt length of time-steps (specifiy Nsteps or dt) normalize if True, normalize the initial probability """ p0 = initial(*self.grid) if normalize: p0 /= np.sum(p0) if Nsteps is not None: dt = tf / Nsteps elif dt is not None: Nsteps = np.ceil(tf / dt).astype(int) else: raise ValueError('specifiy either Nsteps or Nsteps') time = np.linspace(0, tf, Nsteps) pf = expm_multiply(self.master_matrix, p0.flatten(), start=0, stop=tf, num=Nsteps, endpoint=True) return time, pf.reshape((pf.shape[0], ) + tuple(self.Ngrid))
def _help_bench_expm_multiply(self, A, i, j): n = A.shape[0] print('converting the sparse matrix to a dense array...') tm_start = time.clock() A_dense = A.toarray() tm_end = time.clock() print(tm_end - tm_start, ' seconds') print() print('computing full expm of the dense array...') tm_start = time.clock() A_expm = scipy.linalg.expm(A_dense) full_expm_entry = A_expm[i, j] tm_end = time.clock() print('expm(A)[%d, %d]:' % (i, j), full_expm_entry) print(tm_end - tm_start, ' seconds') print() print('computing only column', j, 'of expm of the sparse matrix...') tm_start = time.clock() v = np.zeros(n, dtype=float) v[j] = 1 A_expm_col_j = expm_multiply(A, v) expm_col_entry = A_expm_col_j[i] tm_end = time.clock() print('expm(A)[%d, %d]:' % (i, j), expm_col_entry) print(tm_end - tm_start, ' seconds') print() if np.allclose(full_expm_entry, expm_col_entry): print('The two methods give the same answer.') else: print('!!! The two methods give different answers. !!!') print()
def diag_ops_dynamics(psi_0, ham, tsteps, dt, ops): ops_t = [] psi_t = psi_0 ops_t.append(_expec_diag_ops(psi_t, ops)) for _ in range(tsteps - 1): t1 = time.time() psi_t = expm_multiply(-1j * dt * ham, psi_t) t2 = time.time() print(t2 - t1) ops_t.append(_expec_diag_ops(psi_t, ops)) t3 = time.time() print(t3 - t2) psi_t = expm_multiply(-1j * dt * ham, psi_t) return (np.array(ops_t).transpose(), psi_t)
def test_ramdom_int_matrix(N=3500, ntest=10, seed=0): np.random.seed(seed) i = 0 while (i < ntest): print("testing random integer matrix {}".format(i + 1)) data_rvs = lambda n: np.random.randint( -100, 100, size=n, dtype=np.int8) A = random(N, N, density=np.log(N) / N, data_rvs=data_rvs, dtype=np.int8) A = A.tocsr() v = np.random.normal( 0, 1, size=(N, 10)) + 1j * np.random.normal(0, 1, size=(N, 10)) v /= np.linalg.norm(v) v1 = expm_multiply(-0.01j * A, v) v2 = expm_multiply_parallel(A, a=-0.01j, dtype=np.complex128).dot(v) np.testing.assert_allclose( v1, v2, rtol=0, atol=5e-15, err_msg='random matrix test failed, seed {:d}'.format(seed)) i += 1
def indice_scintillation(self,pc1d): assert len(pc1d.shape) == 1 , " les capteurs ne sont pas 1d " #print(" début IS ") # calcul de E[ |a_j|^2 |a_l|^2 ] #print(" Nm : ",self.MV.Nm) #print(" début calcul matrice B ") matB = self.MV.matriceB() #print(" fin calcul matrice B ") A0,AA0 = npy.meshgrid(self.aj0,self.aj0) P0 = (npy.abs(A0**2)*npy.abs(AA0**2)).flatten() #print(" début calcul mo4 ") mo4 = slinalg.expm_multiply(self.xa*matB,P0) #print(" fin calcul mo4 ") # fin calcul de E[ |a_j|^2 |a_l|^2 ] # calcul de E[I]^2 #print( " début calcul E[I]^2 " ) phiJ2 = 0 for pc in pc1d: phiJ2 += self.MV.modesPropagatifs(pc)**2 espI2 = npy.sum( 1/self.MV.Kxj * phiJ2 * self.mo2 )**2 #print( " fin calcul E[I]^2 " ) #espI2 = npy.sum( self.mo2 )**2 # fin calcul de E[I]^2L # calcul de E[ I^2 ] #print( " début calcul E[I^2] " ) BJ,BBJ = npy.meshgrid(npy.abs(self.MV.Kxj),npy.abs(self.MV.Kxj)) BJL = (BJ*BBJ).flatten() PJ,PPJ = npy.meshgrid(phiJ2,phiJ2) termesNonCroisees = npy.eye(PJ.shape[0]).flatten() PJL2 = (PJ*PPJ).flatten() EI2 = 2* (1/BJL)*PJL2 * mo4 - termesNonCroisees*(1/BJL)*PJL2 * mo4 return (npy.sum(EI2) - espI2) / espI2
def init_matrices(self): 'initialize the one-step basis and input effects matrices' dims = self.dims Timers.tic('expm') self.one_step_matrix_exp = expm(self.a_csc * self.time_elapser.step_size) Timers.toc('expm') Timers.tic('toarray') self.one_step_matrix_exp = self.one_step_matrix_exp.toarray() Timers.toc('toarray') if self.b_csc is not None: self.one_step_input_effects_matrix = np.zeros(self.b_csc.shape, dtype=float) for c in range(self.time_elapser.inputs): # create the a_matrix augmented with a column of the b_matrix as an affine term indptr = self.b_csc.indptr data = np.concatenate((self.a_csc.data, self. b_csc.data[indptr[c]:indptr[c+1]])) indices = np.concatenate((self.a_csc.indices, self.b_csc.indices[indptr[c]:indptr[c+1]])) indptr = np.concatenate((self.a_csc.indptr, [len(data)])) aug_a_csc = csc_matrix((data, indices, indptr), shape=(dims + 1, dims + 1)) mat = aug_a_csc * self.time_elapser.step_size # the last column of matrix_exp is the same as multiplying it by the initial state [0, 0, ..., 1] init_state = np.zeros(dims + 1, dtype=float) init_state[dims] = 1.0 col = expm_multiply(mat, init_state) self.one_step_input_effects_matrix[:, c] = col[:dims]
def start(self): """Diffuses the selected nodes against the network""" logging.info('Diffuser: Starting diffusion') if self.calculate_kernel: logging.info('Diffuser: Calculating kernel') self.calculateKernel(self.L) logging.info('Diffuser: Calculating kernel') if self.input_vector is not None: if self.calculate_kernel: self.out_vector = self.kernel.dot(self.input_vector) else: self.out_vector = expm_multiply(-self.L, self.input_vector, start=0, stop=0.1, endpoint=True)[-1] self.node_dict = dict([ (self.network.node.keys()[i], self.out_vector[i]) for i in range(len(self.network.node.keys())) ]) sorted_diffused = sorted(self.node_dict.items(), key=operator.itemgetter(1), reverse=True) self.node_dict_rank = dict([(sorted_diffused[i][0], i) for i in range(len(sorted_diffused))]) nx.set_node_attributes(self.network, 'diffused_output', self.node_dict) nx.set_node_attributes(self.network, 'diffused_output_rank', self.node_dict_rank) logging.info('Diffuser: Diffusion completed') return self.network
def quantum_walk_hypercube(N, timesteps, normalise=True): P = 2**N # number of positions gamma = 1/N # hopping rate A = hypercube(N) H = gamma * (A - N * np.eye(2 ** N)) posn0 = np.zeros(P) posn0[0] = 1 psi0 = posn0 psiN = expm_multiply(-(1j) * timesteps * H, psi0) prob = np.real(np.conj(psiN) * psiN) result = np.zeros(N + 1) normalise_array = np.zeros(N + 1) for i, probability in enumerate(prob): binary_i = bin(i) i_ones = [ones for ones in binary_i[2:] if ones == '1'] num_ones = len(i_ones) result[num_ones] += probability if normalise: normalise_array[num_ones] += 1 if normalise: result = result/normalise_array return result
def adiabatic(n, T, M, H_driver, H_problem, normalise=True): N = 2**n psiN = np.ones(N) * (1 / np.sqrt(N)) H = H_driver prob_ground_H = np.zeros(M + 1) prob_ground_H[0] = np.abs(np.dot(first_eigv(H), psiN))**2 prob_ground_H_driv = np.zeros(M + 1) ground_state_driv = first_eigv(H_driver) prob_ground_H_driv[0] = np.abs( np.dot(np.conjugate(ground_state_driv), psiN))**2 prob_ground_H_prob = np.zeros(M + 1) ground_state_prob = first_eigv(H_problem) prob_ground_H_prob[0] = np.abs( np.dot(np.conjugate(ground_state_prob), psiN))**2 for i in range(1, M + 1): t = i * (T / M) H = hamiltonian(t, T, H_driver, H_problem) # U = expm(-1j * (T / M) * H) # psiN = np.dot(U, psiN) A = -1j * (T / M) * H psiN = expm_multiply(A, psiN) prob_ground_H[i] = np.abs(np.dot(np.conjugate(first_eigv(H)), psiN))**2 prob_ground_H_driv[i] = np.abs( np.dot(np.conjugate(ground_state_driv), psiN))**2 prob_ground_H_prob[i] = np.abs( np.dot(np.conjugate(ground_state_prob), psiN))**2 return prob_ground_H, prob_ground_H_driv, prob_ground_H_prob
def _evolve_step_2(i,H,t_list,dt_list): psi0=_np.zeros((H.Ns,),dtype=_np.complex128) psi0[i]=1.0 for t,dt in zip(t_list,dt_list): psi0 = _sla.expm_multiply(-1j*dt*H.tocsr(t),psi0) return psi0
def heat_diffusion(network, diffusion_input, t=0.1): network_nodes = sorted(network.nodes()) sparse_laplacian = csc_matrix(nx.laplacian_matrix(network)) diffused_matrix = expm_multiply(-sparse_laplacian, diffusion_input, start=0, stop=t, endpoint=True)[-1] return diffused_matrix
def qaoa_step(state, H, n_qubits, params): """Returns a result of one QAOA step $e^{-1j*params[1]*B}e^{1j*params[0]*H}|state>$ Args: ---------- state (array): state H (array): Hamiltonian of interest n_qubits (int): number of qubits params: parameters of step Returns: ---------- scipy sparse array: state after application of $e^{-1j*params[1]*B}e^{1j*params[0]*H}|state>$ """ B=B_operator(n_qubits) state=lasp.expm_multiply(1j*params[0]*H, state) return lasp.expm_multiply(-1j*params[1]*B,state)
def evolve(self, state: State, time): if state.is_ket: return State(expm_multiply(-1j * time * self.hamiltonian, state), is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph) else: exp_hamiltonian = expm(-1j * time * self.hamiltonian) return State(exp_hamiltonian @ state @ exp_hamiltonian.conj().T, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def nh_evolve(self, state: State, time: float): """Non-hermitian time evolution.""" if state.is_ket: return State(expm_multiply(-1j * time * self.nh_hamiltonian, state), is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph) else: temp = expm(-1j * time * self.nh_hamiltonian) return State(temp @ state @ temp.conj().T, is_ket=state.is_ket, IS_subspace=state.IS_subspace, code=state.code, graph=self.graph)
def to_discrete_time_mat(a_mat, b_mat, dt, quick=False): 'convert an a and b matrix to a discrete time version' rv_a = None rv_b = None if quick: if not isinstance(a_mat, np.ndarray): a_mat = np.array(a_mat, dtype=float) rv_a = np.identity(a_mat.shape[0], dtype=float) + a_mat * dt if b_mat is not None: if not isinstance(b_mat, np.ndarray): b_mat = np.array(b_mat, dtype=float) rv_b = b_mat * dt else: # first convert both to csc matrices a_mat = csc_matrix(a_mat, dtype=float) dims = a_mat.shape[0] rv_a = expm(a_mat * dt) rv_a = rv_a.toarray() if b_mat is not None: b_mat = csc_matrix(b_mat, dtype=float) rv_b = np.zeros(b_mat.shape, dtype=float) inputs = b_mat.shape[1] for c in range(inputs): # create the a_matrix augmented with a column of the b_matrix as an affine term indptr = b_mat.indptr data = np.concatenate( (a_mat.data, b_mat.data[indptr[c]:indptr[c + 1]])) indices = np.concatenate( (a_mat.indices, b_mat.indices[indptr[c]:indptr[c + 1]])) indptr = np.concatenate((a_mat.indptr, [len(data)])) aug_a_csc = csc_matrix((data, indices, indptr), shape=(dims + 1, dims + 1)) mat = aug_a_csc * dt # the last column of matrix_exp is the same as multiplying it by the initial state [0, 0, ..., 1] init_state = np.zeros(dims + 1, dtype=float) init_state[dims] = 1.0 col = expm_multiply(mat, init_state) rv_b[:, c] = col[:dims] return rv_a, rv_b
def plot_entropy_time_evo_lin(spin, N, h, c, phi, start_time, end_start, points): """ This function plots the time evolution of von Neuman entropy over a linear time axis. Args: "spin" is the spin of the individual particles "N" is the system size "h" is the strength of the pseudo-random field "c" is the angular frequency of the field "phi" is the phase shift "start_time" is the first point in the plot, in time "end_start" is the last point in the plot, in time "points" is the number points to plot Returns: "imbalance_plot" is a list of values to be plotted. "error" is the status of the state choosing function that is called from this function. If "error" is True, then no state of a zero total <Sz> with an energy density could be found for the current configuration. """ D = int(2 * spin + 1) ** N Sx, Sy, Sz = qm.init(spin) entropy_plot = np.zeros(points) delta_t = (end_start - start_time) / (points - 1) # The spin 0 block of H H = aubryH.blk_full(N, h, c, 0, phi).tocsc() E, V = np.linalg.eigh(H.toarray()) psi, error = aubryC.get_state_blk(H, N) # psi = psi.toarray() if not error: # Plot the first point which requires a special kind of time evolution. psi = expm_multiply(-1j * H * start_time, psi) # psi = aubryC.time_evo_exact_diag(E, V, psi, start_time) # psi = lil_matrix(psi) # psi in the full spin basis psi_long = aubryC.recast(N, psi) psi_tz = aubryC.spin2z(D, N, psi_long) # psi in the total Sz basis entropy_plot[0] += qm.get_vn_entropy(psi_tz, spin, N, mode='eqsplit') U = expm(-1j * H * delta_t) psi_time_evolved = psi # Plot the rest of the points. for plot_point in range(1, points): psi_time_evolved = U * psi_time_evolved # psi_time_evolved = aubryC.time_evo_exact_diag(E, V, psi_time_evolved ,delta_t) # psi_time_evolved = lil_matrix(psi_time_evolved) # Rewrite the time evolved state in the total Sz basis # before passing it onto the entropy function. psi_tevo_long = aubryC.recast(N, psi_time_evolved) psi_time_evolved_tz = aubryC.spin2z(D, N, psi_tevo_long) entropy_plot[plot_point] = qm.get_vn_entropy(psi_time_evolved_tz, spin, N, mode='eqsplit') return entropy_plot, error
def apply_time_evolution_op(qc, Hcsc, tn, nstates): qc_vec = np.array(qc.get_coeff_vec()) return expm_multiply(Hcsc, qc_vec, start=0.0, stop=tn, num=nstates, endpoint=True)
def time_expm_multiply(self, format): if format == 'full': # computing full expm of the dense array... A_expm = scipy.linalg.expm(self.A_dense) A_expm[self.i, self.j] else: # computing only column', j, 'of expm of the sparse matrix... v = np.zeros(self.n, dtype=float) v[self.j] = 1 A_expm_col_j = expm_multiply(self.A, v) A_expm_col_j[self.i]
def _evolve_step_2(i,H,t_list,dt_list): """This function calculates the evolved state for Periodic Step (point 2. in def of 'evo_dict'. """ psi0=_np.zeros((H.Ns,),dtype=_np.complex128) psi0[i]=1.0 for t,dt in zip(t_list,dt_list): psi0 = _sla.expm_multiply(-1j*dt*H.tocsr(t),psi0) return psi0
def _evolve_step_1(i,H_list,dt_list): """ This function calculates the evolved state """ psi0=_np.zeros((H_list[0].Ns,),dtype=_np.complex128) psi0[i]=1.0 for dt,H in zip(dt_list,H_list): psi0 = _sla.expm_multiply(-1j*dt*H.tocsr(),psi0) return psi0
def _evolve_step_3(i,H_list,dt_list): """This function calculates the evolved state for Periodic Step (point 3. in def of 'evo_dict'). """ psi0=_np.zeros((H_list[0].Ns,),dtype=_np.complex128) psi0[i]=1.0 for dt,H in zip(dt_list,H_list): psi0 = _sla.expm_multiply(-1j*dt*H.tocsr(),psi0) return psi0
def compare_with_scipy(self, A, v, t): start = timer() result = expmv(t, A, v) end = timer() print("Expokit: {:.4f}".format(end - start)) start = timer() scipy_result = expm_multiply(t * A, v) end = timer() print("expm_multiply: {:.4f}".format(end - start)) np.testing.assert_allclose(result, scipy_result)
def heat_diffusion(heat, laplacian, start=0, end=0.1): """Heat diffusion Iterative matrix multiplication between the graph laplacian and heat """ out_vector = expm_multiply(-laplacian, heat, start=start, stop=end, endpoint=True)[-1] return out_vector
def _diffuse(self, matrix, heat_array, time): """ :param matrix: :param heat_array: :param time: :return: """ return expm_multiply(-matrix, heat_array, start=0, stop=time, endpoint=True)[-1]
def plot_entropy(): entropy_plot = np.zeros(sample_size) init_delta_t,r = get_init_delta_t(time_range_lower_lim, time_range_upper_lim,sample_size) H,E,psi = get_random_state(Sx,Sy,Sz,spin,N,h,mode='expm') # Plot the first point which does not require time evolution. entropy_plot[0] += get_vn_entropy(psi,spin,N,mode='eqsplit') # Plot the second point which requires the first time evolution. current_delta_t = init_delta_t psi_time_evolved = expm_multiply(-1j*H*current_delta_t,psi) entropy_plot[1] += get_vn_entropy(psi_time_evolved, spin,N,mode='eqsplit') # Plot the rest of the points with time evolution. for plot_point in range(2,sample_size): delta_delta_t = get_delta_delta_t(time_range_lower_lim,plot_point,r) current_delta_t += delta_delta_t psi_time_evolved = expm_multiply(-1j*H*current_delta_t, psi_time_evolved) entropy_plot[plot_point] += get_vn_entropy(psi_time_evolved, spin,N,mode='eqsplit') return entropy_plot
def adiabatic(n, T, M, H_driver, H_problem, ground_state_prob, normalise=True, sprs=True): N = 2**n psiN = np.ones(N) * (1 / np.sqrt(N)) H = H_driver for i in range(1, M + 1): t = i * (T / M) H = hamiltonian(t, T, H_driver, H_problem) if sprs: A = -1j * (T / M) * H psiN = expm_multiply(A, psiN) else: U = expm(-1j * (T / M) * H) psiN = np.dot(U, psiN) return np.abs(np.dot(np.conjugate(ground_state_prob), psiN)) ** 2
def both_ops_dynamics(psi_0, ham, tsteps, dt, dops, mops): dops_t = [] mops_t = [] psi_t = psi_0 dops_t.append(_expec_diag_ops(psi_t, dops)) mops_t.append(_expec_ops(psi_t, mops)) for _ in range(tsteps - 1): t1 = time.time() psi_t = expm_multiply(-1j * dt * ham, psi_t) t2 = time.time() print(t2 - t1) dops_t.append(_expec_diag_ops(psi_t, dops)) mops_t.append(_expec_ops(psi_t, mops)) t3 = time.time() print(t3 - t2) return (np.array(dops_t).transpose(), np.array(mops_t).transpose())
def propagate(self, initial, time, normalize=True, dense=False): """Propagate an initial probability distribution in time Arguments: initial initial probability density function time amount of time to propagate normalize if True, normalize the initial probability dense if True, use dense method of expm (might be faster, at memory cost) """ p0 = initial(*self.grid) if normalize: p0 /= np.sum(p0) if dense: pf = expm(self.master_matrix * time) @ p0.flatten() else: pf = expm_multiply(self.master_matrix * time, p0.flatten()) return pf.reshape(self.Ngrid)
def edint(self, T): """edint: exact diagonalisation """ H = self.H psi_0 = self.mps.recombine().reshape(-1) H = sum([n_body(a, i, len(H), d=2) for i, a in enumerate(H)], axis=0) if not self.fullH else H psi_n = psi_0 self.ed_history = [psi_0] dt = T[1]-T[0] for t in tqdm(T[:-1]): psi_n = expm_multiply(-1j * H*dt, psi_n) self.ed_history.append(psi_n) self.ed_history = array(self.ed_history) self.psi = self.ed_history[-1] self.mps = fMPS().left_from_state( self.psi.reshape([self.mps.d]*self.mps.L)) return self
def quantum_walk_hypercube(N, H, psi0, timesteps, normalise): psiN = expm_multiply(-(1j) * timesteps * H, psi0) prob = np.real(np.conj(psiN) * psiN) result = np.zeros(N + 1) normalise_array = np.zeros(N + 1) for i, probability in enumerate(prob): binary_i = bin(i) i_ones = [ones for ones in binary_i[2:] if ones == '1'] num_ones = len(i_ones) result[num_ones] += probability if normalise: normalise_array[num_ones] += 1 if normalise: result = result / normalise_array return result
def time_evolution(psi_0, H_ev, **args): print('evolution') DIM_H = args.get("DIM_H") dt = args.get("dt") step_num = args.get("step_num") t_start = args.get("t_start") psi0 = psi_0[:, 0] if isinstance(H_ev, sp.sparse.csc.csc_matrix): HT = -1j * dt * H_ev psit = linalgS.expm_multiply(HT, psi0, start=0, stop=dt * step_num, num=step_num + 1, endpoint=True) else: print('denso') psit = np.zeros((step_num, DIM_H), dtype=np.complex) HT = np.asarray(-t_start * 1j * H_ev) mat_exp = sp.linalg.expm(HT) phi = psi0.dot(mat_exp.T) HT = np.asarray(-1j * dt * H_ev) mat_exp = sp.linalg.expm(HT) for tt in range(0, step_num): psit[tt] = phi phi = phi.dot(mat_exp.T) return psit
def test_imag_time(L=20, seed=0): np.random.seed(seed) basis = spin_basis_1d(L, m=0, kblock=0, pblock=1, zblock=1) J = [[1.0, i, (i + 1) % L] for i in range(L)] static = [["xx", J], ["yy", J], ["zz", J]] H = hamiltonian(static, [], basis=basis, dtype=np.float64) (E, ), psi_gs = H.eigsh(k=1, which="SA") psi_gs = psi_gs.ravel() A = -(H.tocsr() - E * eye(H.Ns, format="csr", dtype=np.float64)) U = expm_multiply_parallel(A) v1 = np.random.normal(0, 1, size=(H.Ns, 10)) v1 /= np.linalg.norm(v1, axis=0) v2 = v1.copy() for i in range(100): v2 = U.dot(v2) v2 /= np.linalg.norm(v2) v1 = expm_multiply(A, v1) v1 /= np.linalg.norm(v1) if (np.all(np.abs(H.expt_value(v2) - E) < 1e-15)): break # i += 1 np.testing.assert_allclose( v1, v2, rtol=0, atol=5e-15, err_msg='imaginary time test failed, seed {:d}'.format(seed))
def timeseries(self, ions = None, start=None, stop=None, num=None, endpoint=None, **kwargs): # this is TOO inefficient except for small isotope vectors # use of identity matrix is not a good choice. silent = kwargs.get('silent', False) self.setup_logger(silent = silent) kwm = dict(start=start, stop=stop, num=num, endpoint=endpoint) kw = kwargs.copy() from scipy.sparse.linalg import expm_multiply _a = self.map x = expm_multiply( self.a0, np.identity(self.a0.shape[0]), **kwm) out = [] for a in x: self._project(a) out += [self.__call__(ions)] self.map = _a self.close_logger(timing = 'time series completed in {}.') return out
def ExpPert(nQubits, hz, hzz, hx, Psi, T, dt, errchk, eps, outinfo): """ Solve using exponential perturbation theory (i.e. Magnus expansion). """ if outinfo['eigdat'] or outinfo['eigplot']: eigspec = [] if outinfo['overlapdat'] or outinfo['overlapplot']: overlap = [] N = T/dt # steps mingap = None # Loop over time for i in range(0, int(sp.floor(N))): t = i*dt t0 = (i-1)*dt # Approximate Hamiltonian to first term in Magnus expansion cz = (t**2 - t0**2)/(2*T) cx = (2*T*(t - t0) + t0**2 - t**2)/(2*T) Psi = sla.expm_multiply(-1j*(cx*hx + cz*(hz + hzz)), Psi) # This is a HUGE performance loss -- requires sparse to dense # A = sla.expm(-1j*(cx*hx + cz*(hz + hzz))) # Psi = A*Psi # Get eigendecomposition of true Hamiltonian if necessary if (errchk or outinfo['mingap'] or outinfo['eigdat'] or outinfo['eigplot'] or outinfo['fiddat'] or outinfo['fidplot']): # Unfortunately we cannot compute all eigenpairs if outinfo['eignum'] == 2**nQubits: # This is very expensive!! Hvals, Hvecs = sp.linalg.eigh((cx*hx + cz*(hz + hzz)).todense()) else: Hvals, Hvecs = sla.eigsh(cx*hx + cz*(hz + hzz), k=outinfo['eignum'], which='SA') # Sort by eigenvalues idx = Hvals.argsort() Hvals = Hvals[idx]/dt Hvecs = Hvecs[:,idx] if mingap is None: mingap = [sp.absolute(Hvals[1] - Hvals[0]), t/T] elif mingap[0] > sp.absolute(Hvals[1] - Hvals[0]): mingap = [sp.absolute(Hvals[1] - Hvals[0]), t/T] # Check for numerical error if (errchk): CheckNorm(t, nQubits, Psi, Hvecs, eps) # Construct eigenspectrum datapoint = [t, eigval 1, ... , eigval n] if (outinfo['eigdat'] or outinfo['eigplot']): eigspec.append(output.ConstructEigData(t, Hvals, outinfo['eignum'])) if (outinfo['overlapdat'] or outinfo['overlapplot']): overlap.append(output.ConstructOverlapData(t, Psi, Hvecs[:,0])) # Output our progress, if specified if outinfo['progressout']: output.ProgressOutput(t, T, outinfo['outdir']) # Output the overlap with pattern vectors if outinfo['stateoverlap'] is not None: output.StateOverlapOutput(t, outinfo, Psi) # Output stuff as needed if (outinfo['eigdat']): output.RecordEigSpec(eigspec, outinfo['outdir'], outinfo['binary']) if (outinfo['eigplot']): output.PlotEigSpec(eigspec, outinfo['outdir'], T) if (outinfo['overlapdat']): output.RecordOverlap(overlap, outinfo['outdir'], T, outinfo['binary']) if (outinfo['overlapplot']): output.PlotOverlap(overlap, outinfo['outdir'], T) if outinfo['stateoverlap'] is not None: output.StateOverlapLabelsOutput(t, outinfo) return Psi, mingap
def entropy_exp(spin,N,psi_0,H,t): '''Using exponentiated Hamiltonian.''' psi = expm_multiply(-1j*H*t,psi_0) entropy = get_vn_entropy(psi,spin,N,mode='eqsplit') return entropy
f.write(str(err)+'\n') print("Done messing around, now for time-dependence:\n") #groundState = vecs[:, 0] #position = 32-4-(L/2-1) #groundState = groundState[:position]+'1'+state[(position+1):] #for i in range(0, N): # state = format(i, '032b') # if state[32-4-(L/2-1)]=='0': # groundState[i] = 0 groundState = np.full((N), 1.0) groundState = groundState/(np.linalg.norm(groundState, 1)) stateTimeSeries = la.expm_multiply(cscRateMatrix, groundState, start=0.0, num=numTimeSlices, stop=totTime, endpoint=True) densTimeSeries = cscDensityMatrix.dot(np.transpose(stateTimeSeries)) entropySeries = [] with open(resultsPlace+'timeSeries.dat', 'w') as f: for i in range(0, numTimeSlices): # entropy = 0.0 # entropySeries.append(entropy) for j in range(0, L+4): f.write(str(totTime*float(i)/(numTimeSlices-1.0))+" "+str(j)+" "+str(np.real(densTimeSeries[j][i]))+"\n") entropySeries.append(st.entropy(pk=stateTimeSeries[:, i], base=2.0)) print("Done step "+str(i+1)) with open(resultsPlace+'entropySeries.dat', 'w') as f: for i in entropySeries: f.write(str(i)+'\n')
def time_expm_multiply(self): # computing only column', j, 'of expm of the sparse matrix v = np.zeros(self.n, dtype=float) v[self.j] = 1 A_expm_col_j = expm_multiply(self.A, v) A_expm_col_j[self.i]
def plot_entropy_time_evo_log(spin, N, h, c, phi, start_time, end_start, points): """ This function plots the time evolution of von Neuman entropy over a logarithmic time axis. Args: "spin" is the spin of the individual particles "N" is the system size "h" is the strength of the pseudo-random field "c" is the angular frequency of the field "phi" is the phase shift "start_time" is the first point in the plot, in time "end_start" is the last point in the plot, in time "points" is the number points to plot Returns: "imbalance_plot" is a list of values to be plotted. "error" is the status of the state choosing function that is called from this function. If "error" is True, then no state of a zero total <Sz> with an energy density could be found for the current configuration. """ D = int(2 * spin + 1) ** N Sx, Sy, Sz = qm.init(spin) entropy_plot = np.zeros(points) init_delta_t, r = qm.get_init_delta_t(start_time, end_start, points) # The spin 0 block of H H = aubryH.blk_full(N, h, c, 0, phi).tocsc() # Use exact diagonalization for small systems. psi, error = aubryC.get_state_blk(H, N) dense = False if H.get_shape()[0] <= 16: dense = True H = H.toarray() E, V = np.linalg.eigh(H) tm = aubryC.time_machine(E, V, psi) if not error: # Plot the first point which requires a different kind of time # evolution. if H.get_shape()[0] <= 16: psi_tevo_short = tm.evolve(start_time) else: psi_tevo_short = expm_multiply(-1j * H * start_time, psi) psi_long = aubryC.recast(N, psi_tevo_short) psi_tz = aubryC.spin2z(D, N, psi_long) # psi in the total Sz basis entropy_plot[0] += qm.get_vn_entropy(psi_tz, spin, N, mode='eqsplit') # Plot the rest of the points with time evolution. for plot_point in range(1, points): if plot_point == 1: current_delta_t, r = qm.get_init_delta_t(start_time, end_start, points) elif plot_point > 1: delta_delta_t = qm.get_delta_delta_t(start_time, plot_point, r) current_delta_t += delta_delta_t if dense: psi_tevo_short = tm.evolve(current_delta_t) else: psi_tevo_short = expm_multiply(-1j * H * current_delta_t, psi_tevo_short) psi_tevo_long = aubryC.recast(N, psi_tevo_short) psi_tevo_tz = aubryC.spin2z(D, N, psi_tevo_long) entropy_plot[plot_point] += qm.get_vn_entropy(psi_tevo_tz, spin, N, mode='eqsplit') return entropy_plot, error
def moran_action(t, v): return expm_multiply(rate_matrix(len(v) - 1) * t, v)