def var_gate_exact(top_state, site, bottom_state): ''' Goal: to find argmax_{gate} <top_state | gate | down_state> where gate is actting on (site, site+1) Input: top_state: (did not have conjugation yet!!!) site: gate is applying on (site, site+1) bottom_state Return: new_gate ''' total_dim = top_state.size L = int(np.log2(total_dim)) top_theta = np.reshape(top_state, [(2**site), 4, 2**(L - (site + 2))]) bottom_theta = np.reshape(bottom_state, [(2**site), 4, 2**(L - (site + 2))]) M = np.tensordot(top_theta.conj(), bottom_theta, axes=([0, 2], [ 0, 2 ])) # [ ..., upper_p, ...], [..., lower_p, ...] --> upper_p, lower_p ## If the convention is lower_p, upper_p ## uncomment the following line. # M = M.T # the convention is lower_p, upper_p ### For detailed explanation of the formula, see function var_gate U, _, Vd = misc.svd(M, full_matrices=False) new_gate = np.dot(U, Vd).conj() # [TODO:remove] new_gate = new_gate.reshape([2, 2, 2, 2]) return new_gate
def get_renyi_n_entanglement(A_list, n): ''' Goal: Compute the renyi-n entanglement at each cut. Input: mps in left canonical form Output: list of bipartite entanglement [(0,1...), (01,2...), (012,...)] ''' L = len(A_list) copy_A_list = [A.copy() for A in A_list] ent_list = [None] * (L - 1) for i in range(L - 1, 0, -1): d1, chi1, chi2 = copy_A_list[i].shape X, Y, Z = misc.svd(np.reshape(np.transpose(copy_A_list[i], [1, 0, 2]), [chi1, d1 * chi2]), full_matrices=0) chi1 = np.sum(Y > 1e-14) arg_sorted_idx = (np.argsort(Y)[::-1])[:chi1] Y = Y[arg_sorted_idx] X = X[:, arg_sorted_idx] Z = Z[arg_sorted_idx, :] copy_A_list[i] = np.transpose(Z.reshape([chi1, d1, chi2]), [1, 0, 2]) R = np.dot(X, np.diag(Y)) new_A = np.tensordot(copy_A_list[i - 1], R, axes=([2], [0])) #[p, 1l, (1r)] [(2l), 2r] copy_A_list[i - 1] = new_A bi_ent = np.log(np.sum(Y**(2 * n))) / (1 - n) ent_list[i - 1] = bi_ent return ent_list
def operator_2_MPO(op, L, chimax): ''' Input: op: the operator in matrix of size (2**L, 2**L) L: system size chimax: the maximum bond dimension Return: MPO in [p, l, r, q] form ''' op_aR = np.reshape(op, (1, 2**L, 2**L)) Ms = [] for n in range(1, L + 1): chi_n, dim_R1, dim_R2 = op_aR.shape assert dim_R1 == 2**(L - (n - 1)) op_LR = np.reshape(op_aR, [chi_n * 2, dim_R1 // 2, 2, dim_R2 // 2]) op_LR = np.transpose(op_LR, [0, 2, 1, 3]).reshape( [chi_n * 4, (dim_R1 // 2) * (dim_R2 // 2)]) M_n, lambda_n, op_tilde = misc.svd(op_LR, full_matrices=False) if len(lambda_n) > chimax: keep = np.argsort(lambda_n)[::-1][:chimax] M_n = M_n[:, keep] lambda_n = lambda_n[keep] op_tilde = op_tilde[keep, :] chi_np1 = len(lambda_n) M_n = np.reshape(M_n, (chi_n, 2, 2, chi_np1)) Ms.append(M_n.transpose([1, 0, 3, 2])) op_aR = lambda_n[:, np.newaxis] * op_tilde[:, :] op_aR = op_aR.reshape([chi_np1, (dim_R1 // 2), (dim_R2 // 2)]) assert op_aR.shape == (1, 1, 1) Ms[-1] = Ms[-1] * op_aR[0, 0, 0] return Ms
def state_2_MPS(psi, L, chimax): ''' Input: psi: the state L: the system size chimax: the maximum bond dimension ''' psi_aR = np.reshape(psi, (1, 2**L)) Ms = [] for n in range(1, L + 1): chi_n, dim_R = psi_aR.shape assert dim_R == 2**(L - (n - 1)) psi_LR = np.reshape(psi_aR, (chi_n * 2, dim_R // 2)) M_n, lambda_n, psi_tilde = misc.svd(psi_LR, full_matrices=False) if len(lambda_n) > chimax: keep = np.argsort(lambda_n)[::-1][:chimax] M_n = M_n[:, keep] lambda_n = lambda_n[keep] psi_tilde = psi_tilde[keep, :] chi_np1 = len(lambda_n) M_n = np.reshape(M_n, (chi_n, 2, chi_np1)) Ms.append(M_n) psi_aR = lambda_n[:, np.newaxis] * psi_tilde[:, :] assert psi_aR.shape == (1, 1) return lpr_2_plr(Ms)
def right_canonicalize(A_list, no_trunc=False, chi=None, normalized=True): ''' Bring mps in right canonical form, assuming the input mps is in left canonical form already. modification in place ''' L = len(A_list) tot_trunc_err = 0. for i in range(L - 1, 0, -1): d1, chi1, chi2 = A_list[i].shape X, Y, Z = misc.svd(np.reshape(np.transpose(A_list[i], [1, 0, 2]), [chi1, d1 * chi2]), full_matrices=0) if no_trunc: chi1 = np.size(Y) else: chi1 = np.sum(Y > 1e-14) if chi is not None: chi1 = np.amin([chi1, chi]) trunc_idx = (np.argsort(Y)[::-1])[chi1:] trunc_error = np.sum(Y[trunc_idx]**2) / np.sum(Y**2) tot_trunc_err = tot_trunc_err + trunc_error arg_sorted_idx = (np.argsort(Y)[::-1])[:chi1] Y = Y[arg_sorted_idx] if normalized: Y = Y / np.linalg.norm(Y) X = X[:, arg_sorted_idx] Z = Z[arg_sorted_idx, :] A_list[i] = np.transpose(Z.reshape([chi1, d1, chi2]), [1, 0, 2]) R = np.dot(X, np.diag(Y)) new_A = np.tensordot(A_list[i - 1], R, axes=([2], [0])) #[p, 1l, (1r)] [(2l), 2r] A_list[i - 1] = new_A if normalized: A_list[0] = A_list[0] / np.linalg.norm(A_list[0]) return A_list, tot_trunc_err