def check_P_symmetry(config, gap_singlet, gap_triplet): def norm_sc(b, a): return np.sum(a * b.conj()) / np.sum(np.abs(b ** 2)) df = config.total_dof P = np.ones(df) P[np.arange(df // 2, df, 2) + 1] = -1 P = np.diag(P) assert np.allclose(np.eye(df), P.dot(P)) gs = combine_product_terms(config, gap_singlet) gt = combine_product_terms(config, gap_triplet) gs = models.xy_to_chiral(gs, 'pairing', config, chiral = True) gt = models.xy_to_chiral(gt, 'pairing', config, chiral = True) print(np.unique(gs), np.unique(gt)) delta_s_extended = np.zeros((df, df), dtype=np.complex128) delta_s_extended[df // 2:, :df // 2] = gs delta_s_extended[:df // 2, df // 2:] = -gs.T delta_t_extended = np.zeros((df, df), dtype=np.complex128) delta_t_extended[df // 2:, :df // 2] = gt delta_t_extended[:df // 2, df // 2:] = -gt.T delta_s_extended = P.T.dot(delta_s_extended).dot(P) coeff = norm_sc(delta_s_extended.flatten(), delta_t_extended.flatten()) print(coeff) assert np.isclose(np.abs(coeff), 1.0) print('P^T {:s} P = {:s}'.format(gap_singlet[-1], gap_triplet[-1]))
def plot_DOS(config): model = config.model(config, 0.0, spin=+1.0)[0] model = models.xy_to_chiral( model, 'K_matrix', config, config.chiral_basis) # this option is only valid for Koshino model K_0_plus = model[:, np.arange(0, config.total_dof // 2, 2)] K_0_plus = K_0_plus[np.arange(0, config.total_dof // 2, 2), :] K_0_minus = model[:, np.arange(1, config.total_dof // 2, 2)] K_0_minus = K_0_minus[np.arange(1, config.total_dof // 2, 2), :] Ep, _ = np.linalg.eigh(K_0_plus) Em, _ = np.linalg.eigh(K_0_minus) set_style() fig = plt.figure() plt.hist(Ep, bins=np.linspace(Ep.min() - 0.1, Ep.max() + 0.1, 300), color='red') plt.hist(Em + 0.01, bins=np.linspace(Ep.min() - 0.1, Ep.max() + 0.1, 300), color='blue') for ep, em in zip(Ep, Em): print(ep - em) plt.xlabel('$dn(E) / dE$') plt.ylabel('$E$') plt.grid(True) plt.savefig('DoS.pdf') plt.show() plt.clf()
def get_kinetic_orbitals(config, filling): Ls = config.Ls K0 = config.K_0 assert config.twist_mesh == 'PBC' Tx, Ty = pairings.Tx_symmetry_map, pairings.Ty_symmetry_map C3z = np.argmax(np.abs(pairings.C3z_symmetry_map_chiral), axis=0) C2y = np.argmax(np.abs(pairings.C2y_symmetry_map_chiral), axis=0) tx, ty = [], [] for i in range(Tx.shape[0]): assert len(np.where(Tx[i, :] == 1)[0]) == 1 assert len(np.where(Ty[i, :] == 1)[0]) == 1 tx.append(np.where(Tx[i, :] == 1)[0][0]) ty.append(np.where(Ty[i, :] == 1)[0][0]) tx, ty = np.array(tx), np.array(ty) np.save('tx.npy', tx) np.save('ty.npy', ty) assert np.allclose(tx[ty], ty[tx]) tx_valley = tx[::2] // 2 ty_valley = ty[::2] // 2 assert np.allclose(tx_valley[ty_valley], ty_valley[tx_valley]) valley = np.concatenate( [np.array([2 * i + 1, 2 * i]) for i in range(config.Ls**2 * 2)]) path = '/home/astronaut/Documents/all_Imada_formats/' ########### writing the spin locations (none) ########## f = open(os.path.join(path, 'locspn_{:d}.def'.format(Ls)), 'w') f.write('================================\n') f.write('NlocalSpin 0\n') f.write('================================\n') f.write('========i_0LocSpn_1IteElc ======\n') f.write('================================\n') for i in range(Ls**2 * 4): f.write(' {:d} 0\n'.format(i)) f.close() symmetries = [np.arange(Ls**2 * 4)] ########### writing the translational symmetries ########## f = open(os.path.join(path, 'qptransidx_{:d}.def'.format(Ls)), 'w') f.write('=============================================\n') f.write('NQPTrans {:d}\n'.format(len(symmetries))) f.write('=============================================\n') f.write('======== TrIdx_TrWeight_and_TrIdx_i_xi ======\n') f.write('=============================================\n') for i in range(len(symmetries)): f.write('{:d} 1.00000\n'.format(i)) for i, symm in enumerate(symmetries): for i_from in range(symm.shape[0]): f.write(' {:d} {:d} {:d}\n'.format( i, i_from, symm[i_from])) f.close() from copy import deepcopy ########### writing the jastrows ########## all_translations = [np.arange(Ls**2 * 4)] curr_trans = tx.copy() all_new_translations = [] for kx in range(config.Ls - 1): new_translations = [symm[curr_trans] for symm in all_translations] all_new_translations.append(deepcopy(new_translations)) curr_trans = curr_trans[tx] for d in all_new_translations: all_translations += d curr_trans = ty.copy() all_new_translations = [] for kx in range(config.Ls - 1): new_translations = [symm[curr_trans] for symm in all_translations] all_new_translations.append(deepcopy(new_translations)) curr_trans = curr_trans[ty] for d in all_new_translations: all_translations += d f = open(os.path.join(path, 'jastrowidx_TRSbroken_{:d}.def'.format(Ls)), 'w') jastrow_ij, jastrow_k, n_jastrows, matrix_jastrows = get_jastrow_fromshift( all_translations, config.Ls, np.around(np.array(config.all_distances), decimals=5), dist_threshold=5.) np.save('check.npy', matrix_jastrows) # G) assert np.allclose(matrix_jastrows, matrix_jastrows.T) matrix_jastrows_trans = matrix_jastrows.copy() matrix_jastrows_trans = matrix_jastrows_trans[:, tx] matrix_jastrows_trans = matrix_jastrows_trans[tx, :] assert np.allclose(matrix_jastrows_trans, matrix_jastrows) matrix_jastrows_trans = matrix_jastrows.copy() matrix_jastrows_trans = matrix_jastrows_trans[:, ty] matrix_jastrows_trans = matrix_jastrows_trans[ty, :] assert np.allclose(matrix_jastrows_trans, matrix_jastrows) f.write('=============================================\n') f.write('NJastrowIdx {:d}\n'.format(n_jastrows + 1)) f.write('ComplexType {:d}\n'.format(0)) f.write('=============================================\n') f.write('=============================================\n') uniques = [] for i in range(config.Ls**2 * 4): for j in range(config.Ls**2 * 4): if i == j: continue f.write(' {:d} {:d} {:d}\n'.format( i, j, matrix_jastrows[i, j])) for i in range(n_jastrows + 1): f.write(' {:d} 1\n'.format(i)) f.close() f = open(os.path.join(path, 'InJastrow_TRSbroken_{:d}.def'.format(Ls)), 'w') f.write('======================\n') f.write('NJastrowIdx {:d}\n'.format(n_jastrows + 1)) f.write('======================\n') f.write('== i_j_JastrowIdx ===\n') f.write('======================\n') for i in range(n_jastrows): f.write('{:d} {:.10f} {:.10f}\n'.format(i, \ np.random.uniform(0.0, 1.0) * 0, np.random.uniform(0.0, 1.0) * 0)) f.write('{:d} {:.10f} {:.10f}\n'.format(n_jastrows, 0, 0)) f.close() f = open(os.path.join(path, 'gutzwilleridx_{:d}.def'.format(Ls)), 'w') f.write('=============================================\n') f.write('NGutzwillerIdx {:d}\n'.format(1)) f.write('ComplexType {:d}\n'.format(0)) f.write('=============================================\n') f.write('=============================================\n') for i in range(4 * Ls**2): f.write(' {:d} {:d}\n'.format(i, 0)) #idx)) for i in range(1): f.write(' {:d} 1\n'.format(i)) f.close() f = open(os.path.join(path, 'InGutzwiller.def'), 'w') f.write('======================\n') f.write('NGutzwillerIdx {:d}\n'.format(1)) f.write('======================\n') f.write('== i_j_GutzwillerIdx ===\n') f.write('======================\n') for i in range(1): f.write('{:d} {:.10f} {:.10f}\n'.format( i, np.random.uniform(0.0, 1.0) * 0, np.random.uniform(0.0, 1.0) * 0)) f.close() ########### writing the modpara ########## f = open(os.path.join(path, 'modpara_{:d}_{:d}.def'.format(Ls, filling)), 'w') f.write('--------------------\n') f.write('Model_Parameters 0\n') f.write('--------------------\n') f.write('VMC_Cal_Parameters\n') f.write('--------------------\n') f.write('CDataFileHead zvo\n') f.write('CParaFileHead zqp\n') f.write('--------------------\n') f.write('NVMCCalMode 0\n') f.write('--------------------\n') f.write('NDataIdxStart 1\n') f.write('NDataQtySmp 1\n') f.write('--------------------\n') f.write('Nsite {:d}\n'.format(Ls**2 * 4)) f.write('Ncond {:d}\n'.format(filling)) f.write('2Sz 0\n') f.write('NSPGaussLeg 8\n') f.write('NSPStot 0\n') f.write('NMPTrans {:d}\n'.format(1)) f.write('NSROptItrStep 400\n') f.write('NSROptItrSmp 40\n') f.write('DSROptRedCut 0.0000001000\n') f.write('DSROptStaDel 0.0200000000\n') f.write('DSROptStepDt 0.0000020000\n') f.write('NVMCWarmUp 400\n') f.write('NVMCInterval 1\n') f.write('NVMCSample 4000\n') f.write('NExUpdatePath 0\n') f.write('RndSeed 1\n') f.write('NSplitSize 1\n') f.write('NStore 0\n') f.write('NSRCG 1\n') f.close() twist = (0, 0.5) twist_exp = [ np.exp(2 * np.pi * 1.0j * twist[0]), np.exp(2 * np.pi * 1.0j * twist[1]) ] fft = get_fft_APBCy(config.Ls) for gap_idx in [40, 9, 36, 14]: #[0, 32, 43, 17, 11, 19]: # 40, 43, 9, 36[!!] print('gap_idx = ', gap_idx) #if config.idx_map[gap_idx] != 13: # continue for gap_val in [0.0, 0.003, 0.02]: #g = gap_val * np.load('the_wave_extended_{:d}.npy'.format(config.Ls)) g = gap_val * np.load( '/home/astronaut/Documents/DQMC_TBG/gaps_8x8/gap_{:d}.npy'. format(gap_idx)) # = gap_val * np.load('/home/astronaut/Documents/XQMC/gaps_6x6_extended/twist_0/gap_{:d}.npy'.format(gap_idx)) #gap_val * np.load('/home/astronaut/Documents/DQMC_TBG/gaps_8x8/gap_{:d}.npy'.format(gap_idx)) if config.Ls == 6: g = models.xy_to_chiral(g, 'pairing', config, True) TRS = np.concatenate( [np.array([2 * i + 1, 2 * i]) for i in range(g.shape[0] // 2)]) g_TRS = g[:, TRS] g_TRS = g_TRS[TRS, :] g = g + g_TRS # * (0.1 if gap_val == 0.02 else 1.) #np.save('sheck.npy', g) if gap_idx in [11, 19]: g = g / 1.0j # g = (g + g.conj()) / np.sqrt(2) print(g[0], 'g[0]') print(g[1], 'g[1]') assert np.allclose(g, g.T) swave = 1e-5 * models.xy_to_chiral( pairings.combine_product_terms( config, pairings.twoorb_hex_all[1]), 'pairing', config, True) assert np.allclose(swave, swave.T) print(swave[0], swave[1], 'swave in chiral basis') g = g + swave gap = models.apply_TBC(config, twist_exp, deepcopy(g), inverse=False) #np.save('the_wave_extended_twisted_{:d}.npy'.format(config.Ls), models.apply_TBC(config, twist_exp, deepcopy(np.load('the_wave_extended_{:d}.npy'.format(config.Ls))), inverse = False)) #exit(-1) gapT = models.apply_TBC(config, twist_exp, deepcopy(g).T, inverse=True) gap_fft = fft.T.conj().dot(gap).dot(fft) gap_check = gap_fft.copy() for i in range(gap_check.shape[0] // 4): #print(i % 4, i // 4) #(np.abs(np.linalg.eig(gap_check[i * 4:i * 4 + 4,i * 4:i * 4 + 4])[0]), i) # print(gap_check[i * 4:i * 4 + 4,i * 4:i * 4 + 4]) #assert np.allclose(gap_check[i * 4:i * 4 + 4,i * 4:i * 4 + 4], gap_check[i * 4:i * 4 + 4,i * 4:i * 4 + 4].conj().T) gap_check[i * 4:i * 4 + 4, i * 4:i * 4 + 4] = 0.0 assert np.isclose(np.sum(np.abs(gap_check)), 0.0) ############ determine required mu_BCS to start ################ K0_up = models.apply_TBC(config, twist_exp, deepcopy(K0), inverse=False) K0_down = models.apply_TBC(config, twist_exp, deepcopy(K0), inverse=True) K0_downT = models.apply_TBC(config, twist_exp, deepcopy(K0), inverse=True).T K0_upT = models.apply_TBC(config, twist_exp, deepcopy(K0), inverse=False).T #print('energies {:d}'.format(config.Ls), np.linalg.eigh(K0_up)[0]) #exit(-1) #### check twist is correct ### K0_fft_plus = fft.conj().T.dot(K0_up).dot(fft) K0_fft_minus = fft.T.dot(K0_up).dot(fft.conj()) K0_check = K0_fft_plus.copy() for i in range(K0_check.shape[0] // 4): K0_check[i * 4:i * 4 + 4, i * 4:i * 4 + 4] = 0.0 assert np.isclose(np.sum(np.abs(K0_check)), 0.0) K0_check = K0_fft_minus.copy() for i in range(K0_check.shape[0] // 4): K0_check[i * 4:i * 4 + 4, i * 4:i * 4 + 4] = 0.0 assert np.isclose(np.sum(np.abs(K0_check)), 0.0) assert np.allclose(K0_up, K0_up.conj().T) assert np.allclose(K0_down, K0_down.conj().T) L = K0.shape[0] totalM = np.zeros((4 * L, 4 * L), dtype=np.complex128) totalM[:L, :L] = K0_up totalM[L:2 * L, L:2 * L] = K0_down totalM[2 * L:3 * L, 2 * L:3 * L] = -K0_upT totalM[3 * L:, 3 * L:] = -K0_downT totalM[:L, 3 * L:] = gap totalM[L:2 * L, 2 * L:3 * L] = -gapT totalM[2 * L:3 * L, L:2 * L] = -gapT.conj().T totalM[3 * L:, :L] = gap.conj().T energies = np.linalg.eigh(totalM)[ 0] # energies with BC twist and gap #print(energies) mu_BCS = (energies[filling * 2] + energies[filling * 2 - 1]) / 2. print(energies[filling * 2], energies[filling * 2 - 1]) #assert not np.isclose(energies[filling * 2], energies[filling * 2 - 1]) print('mu_BCS = ', mu_BCS) K0_up = K0_up - np.eye(K0_up.shape[0]) * mu_BCS K0_upT = K0_upT - np.eye(K0_upT.shape[0]) * mu_BCS K0_down = K0_down - np.eye(K0_down.shape[0]) * mu_BCS K0_downT = K0_downT - np.eye(K0_downT.shape[0]) * mu_BCS L = K0.shape[0] totalM = np.zeros((4 * L, 4 * L), dtype=np.complex128) totalM[:L, :L] = K0_up totalM[L:2 * L, L:2 * L] = K0_down totalM[2 * L:3 * L, 2 * L:3 * L] = -K0_upT totalM[3 * L:, 3 * L:] = -K0_downT totalM[:L, 3 * L:] = gap totalM[L:2 * L, 2 * L:3 * L] = -gapT totalM[2 * L:3 * L, L:2 * L] = -gapT.conj().T totalM[3 * L:, :L] = gap.conj().T selected_idxs = np.concatenate( [np.arange(0, L), np.arange(3 * L, 4 * L)]) totalM_updown = totalM[:, selected_idxs] totalM_updown = totalM_updown[selected_idxs, ...] #totalM_updown = np.zeros((2 * L, 2 * L), dtype=np.complex128) #totalM_updown[:L, :L] = K0; totalM_updown[L:, L:] = -K0.T; #totalM_updown[:L, L:] = gap; totalM_updown[L:, :L] = gap.conj().T; selected_idxs = np.arange(L, 3 * L) totalM_downup = totalM[:, selected_idxs] totalM_downup = totalM_downup[selected_idxs, ...] TRS = np.concatenate([ np.array([2 * i + 1, 2 * i], dtype=np.int64) for i in range(L) ]) #totalM_downup = np.zeros((2 * L, 2 * L), dtype=np.complex128) #totalM_downup[:L, :L] = K0; totalM_downup[L:, L:] = -K0.T; #totalM_downup[:L, L:] = -gap.T; totalM_downup[L:, :L] = -gap.conj(); en_updown, W_updown = np.linalg.eigh(totalM_updown) totalM_updown_TRS = totalM_updown[TRS, ...] totalM_updown_TRS = totalM_updown_TRS[..., TRS] totalM_updown_TRS = totalM_updown_TRS.conj() print('after TRS, discrepancy', np.sum(np.abs(totalM_updown_TRS - totalM_updown))) en_downup, W_downup = np.linalg.eigh(totalM_downup) assert np.allclose(en_updown, np.sort(-en_updown)) assert np.allclose(en_downup, np.sort(-en_downup)) en_total, W = np.linalg.eigh(totalM) #print('energies with gap', gap_val, en_total) #print('updown energies', en_updown) #print('downup energies', en_downup) #for en, state in zip(en_downup, W_downup.T): # if np.abs(en + 0.03085819) < 1e-6: # print(en, state) #exit(-1) for i in range(W_updown.shape[1] // 2): v = W_updown[:, i] en = en_updown[i] v_conj = v * 0.0 v_conj[:len(v) // 2] = v[len(v) // 2:].conj() v_conj[len(v) // 2:] = v[:len(v) // 2].conj() en_conj = np.dot(v_conj.conj(), totalM_updown.dot(v_conj)) / np.dot( v_conj.conj(), v_conj) #print(en_conj, en, np.dot(v_conj.conj(), v_conj), np.dot(v.conj(), totalM_updown.dot(v))) #W_conj.append(v_conj) #assert np.isclose(en_conj, -en) #exit(-1) W_conj = [] for i in range(W.shape[1] // 2): v = W[:, i] en = en_total[i] v_conj = v * 0.0 v_conj[:len(v) // 2] = v[len(v) // 2:].conj() v_conj[len(v) // 2:] = v[:len(v) // 2].conj() en_conj = np.dot(v_conj.conj(), totalM.dot(v_conj)) #print(en_conj, en) W_conj.append(v_conj) assert np.isclose(en_conj, -en) W_conj = np.array(W_conj).T W[:, W.shape[1] // 2:] = W_conj # make the right form -- but this makes no difference: this is only rearrangement of 2nd part of the array, while we only use the 1st part # why W does not protect that block form? -- or do we even need this form? assert np.allclose( np.diag(W.conj().T.dot(totalM).dot(W)).real, np.diag(W.conj().T.dot(totalM).dot(W))) assert np.allclose( np.sort(np.diag(W.conj().T.dot(totalM).dot(W)).real), np.linalg.eigh(totalM)[0]) # with gap 6, W_pieces does not diagonalize totalM! why? W_pieces = np.zeros((4 * L, 4 * L), dtype=np.complex128) W_pieces[:L, :L] = W_updown[:L, :L] W_pieces[3 * L:, 3 * L:] = W_updown[L:, L:] W_pieces[3 * L:, :L] = W_updown[L:, :L] W_pieces[:L, 3 * L:] = W_updown[:L, L:] W_pieces[L:2 * L, L:2 * L] = W_downup[:L, :L] W_pieces[2 * L:3 * L, 2 * L:3 * L] = W_downup[L:, L:] W_pieces[2 * L:3 * L, L:2 * L] = W_downup[L:, :L] W_pieces[L:2 * L, 2 * L:3 * L] = W_downup[:L, L:] #assert np.allclose(np.sort(np.diag(W_pieces.conj().T.dot(totalM).dot(W_pieces)).real), np.sort(np.diag(W_pieces.conj().T.dot(totalM).dot(W_pieces)))) #assert np.isclose(np.sum(np.abs(W_pieces.conj().T.dot(totalM).dot(W_pieces) - np.diag(np.diag(W_pieces.conj().T.dot(totalM).dot(W_pieces))))), 0.0) assert np.isclose( np.sum( np.abs(W.conj().T.dot(totalM).dot(W) - np.diag(np.diag(W.conj().T.dot(totalM).dot(W))))), 0.0) #print(np.sort(np.diag(W.conj().T.dot(totalM).dot(W)).real) - np.sort(np.diag(W_pieces.conj().T.dot(totalM).dot(W_pieces)).real)) #assert np.allclose(np.sort(np.diag(W.conj().T.dot(totalM).dot(W)).real), \ # np.sort(np.diag(W_pieces.conj().T.dot(totalM).dot(W_pieces)).real)) #print(np.linalg.det(W_updown), np.linalg.det(W_downup), np.linalg.det(W_updown) * np.linalg.det(W_downup)) #print(np.linalg.det(W_pieces)) #print(np.linalg.det(W)) for i in range(W_updown.shape[1]): v = W_updown[:, i] en = en_updown[i] v_conj = v * 0.0 v_conj[:len(v) // 2] = v[len(v) // 2:].conj() v_conj[len(v) // 2:] = v[:len(v) // 2].conj() en_conj = np.dot(v_conj.conj(), totalM_updown.dot(v_conj)) # print(en_conj, en) #assert en_conj == -en mask = np.zeros((4 * L, 4 * L), dtype=np.complex128) mask[:L, :L] = np.ones((L, L)) mask[L:2 * L, L:2 * L] = np.ones((L, L)) mask[2 * L:3 * L, 2 * L:3 * L] = np.ones((L, L)) mask[3 * L:, 3 * L:] = np.ones((L, L)) mask[3 * L:, :L] = np.ones((L, L)) mask[2 * L:3 * L, L:2 * L] = np.ones((L, L)) mask[L:2 * L, 2 * L:3 * L] = np.ones((L, L)) mask[:L, 3 * L:] = np.ones((L, L)) #totalM[:L, :L] = K0; totalM[L:2 * L, L:2 * L] = K0; totalM[2 * L:3 * L, 2 * L:3 * L] = -K0.T; totalM[3 * L:, 3 * L:] = -K0.T #totalM[:L, 2 * L:3 * L] = gap; totalM[L: 2 * L, 3 * L:] = -gap.T; totalM[3 * L:, L: 2 * L] = -gap.conj(); totalM[2 * L:3 * L, :L] = gap.conj().T; assert np.allclose(totalM, totalM.conj().T) # W = np.linalg.eigh(totalM / 2.)[1] #print(np.linalg.eigh(totalM / 2.)[0]) #assert np.sum(np.abs(W - W * mask)) == 0 #assert np.allclose(W[:W.shape[0] // 2, :W.shape[0] // 2], W[W.shape[0] // 2:, W.shape[0] // 2:].conj()) #assert np.allclose(W[W.shape[0] // 2:, :W.shape[0] // 2], W[:W.shape[0] // 2, W.shape[0] // 2:].conj()) Q, V = W[:W.shape[0] // 2, :W.shape[0] // 2], \ W[W.shape[0] // 2:, :W.shape[0] // 2] Z = (Q.dot(np.linalg.inv(V))) print('max U^{-1} = ', np.max(np.abs(np.linalg.inv(Q))), gap_val, gap_idx) np.save('Z_fast.npy', Z) result = Z[Z.shape[0] // 2:, :Z.shape[0] // 2] Z = Z / np.max(np.abs(Z)) print( np.sum( np.abs(Z[Z.shape[0] // 2:, :Z.shape[0] // 2] + Z[:Z.shape[0] // 2, Z.shape[0] // 2:].T))) print( np.sum( np.abs( np.real(Z[Z.shape[0] // 2:, :Z.shape[0] // 2] + Z[:Z.shape[0] // 2, Z.shape[0] // 2:].T)))) print( np.sum( np.abs( np.imag(Z[Z.shape[0] // 2:, :Z.shape[0] // 2] + Z[:Z.shape[0] // 2, Z.shape[0] // 2:].T)))) assert np.allclose(Z[Z.shape[0] // 2:, :Z.shape[0] // 2], -Z[:Z.shape[0] // 2, Z.shape[0] // 2:].T) assert np.allclose(Z[Z.shape[0] // 2:, Z.shape[0] // 2:], Z[Z.shape[0] // 2:, Z.shape[0] // 2:] * 0.0) assert np.allclose(Z[:Z.shape[0] // 2, :Z.shape[0] // 2], Z[:Z.shape[0] // 2, :Z.shape[0] // 2] * 0.0) ##### preparing orbital idxs and teir initial values #### vol = 4 * Ls**2 orbital_idxs = -np.ones((vol, vol), dtype=np.int64) f_ij = result f_ij = f_ij / np.abs(np.max(f_ij)) np.save('f_ij_fast.npy', f_ij) current_orb_idx = 0 for xshift in range(Ls): for yshift in range(Ls): for iorb in range(4): for jorb in range(4): if yshift > 0: for ipos in range(Ls**2): i = ipos * 4 + iorb oi, si, xi, yi = models.from_linearized_index( i, config.Ls, config.n_orbitals, config.n_sublattices) j = models.to_linearized_index( (xi + xshift) % Ls, (yi + yshift) % Ls, jorb % 2, jorb // 2, Ls, 2, 2) if yi + yshift > Ls - 1: orbital_idxs[i, j] = current_orb_idx current_orb_idx += 1 for ipos in range(Ls**2): i = ipos * 4 + iorb oi, si, xi, yi = models.from_linearized_index( i, config.Ls, config.n_orbitals, config.n_sublattices) j = models.to_linearized_index( (xi + xshift) % Ls, (yi + yshift) % Ls, jorb % 2, jorb // 2, Ls, 2, 2) if yi + yshift <= Ls - 1: orbital_idxs[i, j] = current_orb_idx current_orb_idx += 1 print('FAST: orbitals after enforcing APBCy remaining:', current_orb_idx) for i in range(current_orb_idx): values = f_ij.flatten()[orbital_idxs.flatten() == i] assert np.isclose(np.std(values - values.mean()), 0.0) if np.allclose(f_ij, f_ij.T): print( 'FAST: symmetric f_ij = f_ji (singlet): restricting su(2) parameters' ) for i in range(vol): for j in range(vol): orb_ij = orbital_idxs[i, j] orb_ji = orbital_idxs[j, i] orbital_idxs[i, j] = np.min([orb_ij, orb_ji]) orbital_idxs[j, i] = np.min([orb_ij, orb_ji]) new_orbitals = np.unique(orbital_idxs.flatten()) mapping = list(np.sort(new_orbitals)) for i in range(vol): for j in range(vol): orbital_idxs[i, j] = mapping.index(orbital_idxs[i, j]) for i in range(len(mapping)): values = f_ij.flatten()[orbital_idxs.flatten() == i] assert np.isclose(np.std(values - values.mean()), 0.0) print('FAST: total orbitals su(2) with APBCy', len(mapping)) current_orb_idx = len(mapping) TRS = np.concatenate([[2 * i + 1, 2 * i] for i in range(vol // 2)]) f_trs = f_ij[:, TRS] f_trs = f_trs[TRS, :] if np.allclose(f_trs, f_ij): print('FAST: f_ij = TRS f_ij: resticting TRS parameters') for i in range(vol): for j in range(vol): orb_ij = orbital_idxs[i, j] i_trs = ((i // 2) * 2) + (((i % 2) + 1) % 2) j_trs = ((j // 2) * 2) + (((j % 2) + 1) % 2) orb_ij_trs = orbital_idxs[i_trs, j_trs] #print(f_ij[i, j], f_ij[i_trs, j_trs]) assert np.isclose(f_ij[i, j], f_ij[i_trs, j_trs]) orbital_idxs[i, j] = np.min([orb_ij, orb_ij_trs]) orbital_idxs[i_trs, j_trs] = np.min([orb_ij, orb_ij_trs]) #for i in range(current_orb_idx): # if np.sum(orbital_idxs.flatten() == i) == 0: # print('orbital', i, 'is missing') new_orbitals = np.unique(orbital_idxs.flatten()) mapping = list(np.sort(new_orbitals)) for i in range(vol): for j in range(vol): orbital_idxs[i, j] = mapping.index(orbital_idxs[i, j]) for i in range(len(mapping)): values = f_ij.flatten()[orbital_idxs.flatten() == i] assert np.isclose(np.std(values - values.mean()), 0.0) print('FAST: total orbitals su(2) with APBCy and TRS!', len(mapping) + 1) current_orb_idx = len(mapping) np.save('orbital_idxs_fast.npy', orbital_idxs) f = open( os.path.join( path, 'InOrbital_extended_{:d}_{:d}_{:d}_{:.4f}.def'.format( Ls, gap_idx, filling, gap_val)), 'w') f.write('======================\n') f.write('NOrbitalIdx {:d}\n'.format(current_orb_idx)) f.write('======================\n') f.write('== i_j_OrbitalIdx ===\n') f.write('======================\n') for k in range(current_orb_idx): mask = (orbital_idxs == k) val = np.sum(f_ij * mask) / np.sum(mask) f.write('{:d} {:.20f} {:.20f}\n'.format( k, val.real, val.imag)) f.close() ########### writing the orbitals indexes ########## f = open( os.path.join( path, 'orbitalidx_extended_{:d}_{:d}_{:d}.def'.format( Ls, gap_idx, filling)), 'w') f.write('=============================================\n') f.write('NOrbitalIdx {:d}\n'.format(current_orb_idx)) f.write('ComplexType {:d}\n'.format(1)) f.write('=============================================\n') f.write('=============================================\n') for i in range(config.Ls**2 * 4): for j in range(config.Ls**2 * 4): f.write(' {:d} {:d} {:d}\n'.format( i, j, orbital_idxs[i, j])) for i in range(current_orb_idx): f.write(' {:d} 1\n'.format(i)) f.close() twist_exp = [ np.exp(2.0j * np.pi * twist[0]), np.exp(2.0j * np.pi * twist[1]) ] K_0_twisted = models.apply_TBC(config, twist_exp, deepcopy(K0), inverse=False) ########### writing the K--matrix ########## K0_up_int = K0_up - np.diag(np.diag(K0_up)) K0_down_int = K0_down - np.diag(np.diag(K0_down)) f = open( os.path.join(path, 'trans_{:d}_{:.3f}_{:.3f}.def'.format(Ls, *twist)), 'w') f.write('========================\n') f.write('NTransfer {:d}\n'.format( 2 * np.sum(np.abs(K0_up_int) > 1e-7))) f.write('========================\n') f.write('========i_j_s_tijs======\n') f.write('========================\n') for i in range(K_0_twisted.shape[0]): for j in range(K_0_twisted.shape[1]): if np.abs(K0_up_int[i, j]) > 1e-7: f.write( ' {:d} 0 {:d} 0 {:.6f} {:.6f}\n'. format(i, j, np.real(-K0_up_int[i, j]), np.imag(-K0_up_int[i, j]))) f.write( ' {:d} 1 {:d} 1 {:.6f} {:.6f}\n'. format(i, j, np.real(-K0_down_int[i, j]), np.imag(-K0_down_int[i, j]))) f.close() return
def __init__(self, Ls, Ne, irrep_idx): ### geometry and general settings ### self.Ls = Ls # spatial size, the lattice will be of size Ls x Ls self.Ne = Ne #Ls ** 2 * 4 - 4 * 7 * 2 self.BC_twist = True self.twist_mesh = 'PBC' # apply BC-twist self.BC = 'PBC' self.L_twists_uniform = 6 self.rank = irrep_idx assert self.BC_twist # this is always true self.twist = np.array([1, 1]) self.n_chains = 6 assert self.twist[ 0] == 1 and self.twist[1] == 1 # twist MUST be set to [1, 1] here self.model = models.model_hex_2orb_Koshino self.chiral_basis = True self.K_0, self.n_orbitals, self.n_sublattices, = self.model( self, 0.0, spin=+1.0, BC=self.BC ) # K_0 is the tb-matrix, which before twist and particle-hole is the same for spin-up and spin-down self.K_0 = models.xy_to_chiral(self.K_0, 'K_matrix', self, self.chiral_basis) print(repr(np.linalg.eigh(self.K_0)[0])) exit(-1) #print(np.sum(self.K_0) / 0.331) check_chirality(self.K_0, self.chiral_basis) self.total_dof = self.Ls**2 * 2 * self.n_sublattices * self.n_orbitals self.far_indices = models._jit_get_far_indices(self.Ls, self.total_dof, self.n_sublattices, self.n_orbitals) Efull, _ = np.linalg.eigh(self.K_0) K_0_plus = self.K_0[:, np.arange(0, self.total_dof // 2, 2)] K_0_plus = K_0_plus[np.arange(0, self.total_dof // 2, 2), :] K_0_minus = self.K_0[:, np.arange(1, self.total_dof // 2, 2)] K_0_minus = K_0_minus[np.arange(1, self.total_dof // 2, 2), :] assert np.allclose(K_0_plus, np.conj(K_0_minus)) Eplus, _ = np.linalg.eigh(K_0_plus) Eminus, _ = np.linalg.eigh(K_0_minus) assert np.allclose(Efull, np.sort(np.concatenate([Eplus, Eminus]))) self.adjacency_list, self.longest_distance = models.get_adjacency_list( self, BC=self.BC) ### interaction parameters ### self.epsilon = 5 self.xi = 0.10 self.hamiltonian = hamiltonians_vmc.hamiltonian_Koshino self.U = 1.0 ### density VQMC parameters ### self.valley_imbalance = 0 self.enforce_particle_hole_orbitals = False self.use_preassigned_orbitals = False # self.preassigned_orbitals_path = '/home/astronaut/Documents/DQMC_TBG/logs/x11/saved_orbital_indexes.npy' self.valley_projection = True # project onto valley imbalance = ... self.PN_projection = True #False # if PN_projection = False, work in the Grand Canonial approach, otherwise Canonical approach ### other parameters ### self.visualisation = False self.workdir = '/users/nastrakh/DQMC_TBG/logs/9x9_regnew/' self.tests = False self.test_gaps = False self.n_cpus = self.n_chains # the number of processors to use | -1 -- take as many as available self.load_parameters = True self.load_parameters_path = None self.offset = 0 self.all_distances = models.get_distances_list(self, BC=self.BC) ### hoppings parameters setting ### all_Koshino_hoppings_real = [ ] #hoppings.obtain_all_hoppings_Koshino_real(self, pairings)[1:] # exclude the mu_BCS term all_Koshino_hoppings_complex = [ ] #hoppings.obtain_all_hoppings_Koshino_complex(self, pairings) self.hoppings = [ ] #[h[-1] + 0.0j for h in all_Koshino_hoppings_real + all_Koshino_hoppings_complex] self.hopping_names = [ ] #[h[0] for h in all_Koshino_hoppings_real + all_Koshino_hoppings_complex] for h in self.hoppings: projection = np.trace(np.dot(self.K_0.conj().T, h)) / np.trace( np.dot(h.conj().T, h)) print(projection, name) ### variational parameters settings ### pairings.obtain_all_pairings( self) # the pairings are constructed without twist self.idx_map = [] self.pairings_list = pairings.Koshino_united[irrep_idx] self.pairings_list_names = [p[-1] for p in self.pairings_list] self.pairings_list_unwrapped = [ pairings.combine_product_terms(self, gap) for gap in self.pairings_list ] self.pairings_list_unwrapped = [ models.xy_to_chiral(g, 'pairing', self, self.chiral_basis) for g in self.pairings_list_unwrapped ] ### SDW/CDW parameters setting ### waves.obtain_all_waves(self) self.waves_list = waves.hex_2orb self.waves_list_unwrapped = [w[0] for w in self.waves_list] self.waves_list_names = [w[-1] for w in self.waves_list] self.enforce_valley_orbitals = False self.adjacency_transition_matrix = models.get_transition_matrix(self.PN_projection, self.K_0, \ self.n_orbitals, valley_conservation_K = self.valley_projection, valley_conservation_Delta = self.enforce_valley_orbitals) print(self.adjacency_transition_matrix) ### jastrow parameters setting ### jastrow.obtain_all_jastrows(self) #self.jastrows_list = jastrow.jastrow_Koshino_Gutzwiller self.jastrows_list = jastrow.jastrow_Koshino_simple #[:20] print(len(self.jastrows_list)) self.jastrows_list_names = [j[-1] for j in self.jastrows_list] print(self.jastrows_list_names) ### optimisation parameters ### self.MC_chain = 400000 self.MC_thermalisation = 100000 self.opt_raw = 1500 self.optimisation_steps = 10000 self.thermalization = 100000 self.obs_calc_frequency = 20 # thermalisation = steps w.o. observables measurement | obs_calc_frequency -- how often calculate observables (in opt steps) self.correlation = (self.total_dof // 2) * 5 self.observables_frequency = self.MC_chain // 3 # how often to compute observables self.opt_parameters = [1e-1, 0.001, 1.00] # regularizer for the S_stoch matrix | learning rate | MC_chain increasement rate self.n_delayed_updates = 1 self.generator_mode = True self.condensation_energy_check_regime = False #True ### regularisation ### if not self.enforce_valley_orbitals: reg_valley = np.array([[-0.41341, -0.64892], [-0.9587, 0.64063]]) self.reg_gap_term = np.kron(np.eye(self.total_dof // 2 // 2), reg_valley) #models.xy_to_chiral(pairings.combine_product_terms(self, pairings.twoorb_hex_all[1][0]), 'pairing', \ # self, self.chiral_basis) # + models.xy_to_chiral(pairings.combine_product_terms(self, pairings.twoorb_hex_all[9][0]), 'pairing', \ #self, self.chiral_basis) else: self.reg_gap_term = models.xy_to_chiral(pairings.combine_product_terms(self, pairings.twoorb_hex_all[9][0]), 'pairing', \ self, self.chiral_basis) self.reg_gap_val = 0.0005 # if not self.condensation_energy_check_regime else 0. ## initial values definition and layout ### self.layout = np.array([ 1, 0, len(self.waves_list_names), len(self.pairings_list_names), len(self.jastrows_list) ]) self.initial_parameters = np.concatenate([ np.array([0.0]), # mu_BCS #np.array([0.0] if not self.PN_projection else []), # fugacity np.array([]), # no fugacity np.random.uniform(2.5e-2, 2.5e-2, size=self.layout[2]), # waves np.random.uniform(1.3e-2, 1.3e-2, size=self.layout[3]), # gaps np.random.uniform(0.0, 0.0, size=self.layout[4]), # jastrows ]) #self.initial_parameters[-self.layout[4]:-self.layout[4] + 16] = np.array([7.3187606e-02 , 6.0324221e-01 , 1.3468416e-01 , 6.1753768e-01 , 2.0022604e-01 , 2.9362039e-01, 2.2077280e-01 , 2.6178142e-01 , 2.8158927e-02 ,7.2660561e-02, 1.5578811e-02, 6.2218900e-02, 8.2826054e-03 ,4.8288188e-02 , -4.7837225e-03 , 5.2365285e-02]) ''' self.parameter_fixing = np.concatenate([ np.array([None]), # mu_BCS np.array(] if not self.PN_projection else []), # fugacity np.array([False] * size = self.layout[2]), # waves np.array([False] * size = self.layout[3]), # gaps np.array([False] * size = self.layout[4]), # jastrows ]) ''' self.all_names = np.concatenate([ np.array(['mu_BCS']), # mu_BCS np.array(self.waves_list_names), # hopping matrices np.array(self.pairings_list_names), # gaps np.array(self.jastrows_list_names), ]) self.all_clips = np.concatenate([ np.ones(self.layout[0]) * 3e+4, # mu_BCS #np.array([3e+4] if not self.PN_projection else []), # fugacity np.ones(self.layout[2]) * 3e+4, # waves np.ones(self.layout[3]) * 3e+4, # gaps np.ones(self.layout[4]) * 3e+4, # jastrows ]) self.initial_parameters[:self.layout[ 0]], self.mu_BCS_min, self.mu_BCS_max = self.select_initial_muBCS_Koshino( self.Ne) print(self.initial_parameters[:self.layout[0]], self.mu_BCS_min, self.mu_BCS_max) self.mu = 0.0 #-1.2 #self.initial_parameters[0]
n_copy = config.n_copy #config.nu_V = np.sqrt(V * config.dt / 2) #np.arccosh(np.exp(V / 2. * config.dt)) # this is almost sqrt(V t) #config.nu_U = np.arccosh(np.exp((U / 2. + V / 2.) * config.dt)) #assert V == U config.nu_U = np.sqrt(config.dt / 2 * U) config.nu_V = 0. #np.sqrt(config.dt / 2 * V) K_matrix = config.model(config, 0.0)[0] K_matrix -= np.eye(K_matrix.shape[0]) * config.mu ### application of real TBCs ### real_twists = [[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]] twist = real_twists[ 0] #[(rank + config.offset) % len(real_twists)] # each rank knows its twist K_matrix = models.xy_to_chiral(K_matrix, 'K_matrix', config, config.chiral_basis) #np.save('K_matrix.npy', (K_matrix)) #exit(-1) #K_matrix = K_matrix.real +1.0j * Kim.imag # FIXME FIXME FIXME #print(repr(K_matrix)) #assert config.mu == 0 #assert np.allclose(K_matrix.imag, K_matrix * 0.) assert np.allclose(K_matrix, K_matrix.conj().T) K_matrix = -K_matrix # this is to agree with ED, this is just unitary transform in terms of particle-hole transformation print(repr(K_matrix))
def check_irrep_properties(config, irrep, term_type = 'pairing', chiral = False): if not config.test_gaps: return #if term_type == 'pairing': # return global C2y_symmetry_map, C3z_symmetry_map, C4z_symmetry_map global C2y_symmetry_map_chiral, C3z_symmetry_map_chiral global name_group_dict if not config.tests: return if chiral: reflection = C2y_symmetry_map_chiral else: reflection = C2y_symmetry_map if config.n_sublattices == 2: if chiral: rotation = C3z_symmetry_map_chiral else: rotation = C3z_symmetry_map else: rotation = C4z_symmetry_map def norm_sc(b, a): return np.sum(a * b.conj()) / np.sum(np.abs(b ** 2)) def in_close(a, array): return np.any(np.abs(array - a) < 1e-5) for irr in irrep: print(irr[-1]) pairing_group = 0 if term_type == 'pairing': print('λ_spin = {:d}'.format(check_parity(config, irr))) pairing_group += (check_parity(config, irr) + 1) // 2 gap = combine_product_terms(config, irr) if type(irr[0]) == tuple else irr[0] gap = models.xy_to_chiral(gap, term_type, config, chiral = chiral) # can do nothing or make chiral transform if term_type != 'pairing': gap_image = (reflection).dot(gap).dot(reflection.conj().T) else: gap_image = (reflection).dot(gap).dot(reflection.T) norm = np.sum(np.abs(gap_image ** 2)) gap_image = gap_image.flatten() for irr_decompose in irrep: gap_decompose = combine_product_terms(config, irr_decompose) if type(irr_decompose[0]) == tuple else irr_decompose[0] gap_decompose = models.xy_to_chiral(gap_decompose, term_type, config, chiral = chiral) # can do nothing or make chiral transform coeff = norm_sc(gap_decompose.flatten(), gap_image) # print('<{:s}|M|{:s}> = '.format(irr[-1], irr_decompose[-1]) + str(coeff)) if np.abs(coeff) > 1e-5: if not (np.isclose(coeff, 1) or np.isclose(coeff, -1)): print('Strange eigenvalue M') exit(-1) print('λ_M = {:d}'.format(int(np.sign(coeff)))) pairing_group += (int(np.sign(coeff)) + 1) // 2 * 2 gap_image = gap_image - gap_decompose.flatten() * coeff norm = np.sum(np.abs(gap_image ** 2)) assert norm < 1e-5 if term_type != 'pairing': gap_image = (rotation).dot(gap).dot(rotation.conj().T) else: gap_image = (rotation).dot(gap).dot(rotation.T) norm = np.sum(np.abs(gap_image ** 2)) gap_image = gap_image.flatten() for irr_decompose in irrep: gap_decompose = combine_product_terms(config, irr_decompose) if type(irr_decompose[0]) == tuple else irr_decompose[0] gap_decompose = models.xy_to_chiral(gap_decompose, term_type, config, chiral = chiral) # can do nothing or make chiral transform coeff = norm_sc(gap_decompose.flatten(), gap_image.flatten()) if np.abs(coeff) > 1e-5: if np.isclose(coeff, np.exp(2.0j * np.pi / 3)): print('λ_R = ω') pairing_group += 4 elif np.isclose(coeff, np.exp(-2.0j * np.pi / 3)): print('λ_R = ω*') pairing_group += 8 elif np.isclose(coeff, 1): print('λ_R = 1') pairing_group += 0 elif np.isclose(coeff, -1. / 2.): print('<{:s}|R|{:s}> = -1/2'.format(irr_decompose[-1], irr[-1])) elif np.isclose(coeff, np.sqrt(3) / 2.): print('<{:s}|R|{:s}> = sqrt(3)/2'.format(irr_decompose[-1], irr[-1])) elif np.isclose(coeff, -np.sqrt(3) / 2.): print('<{:s}|R|{:s}> = -sqrt(3)/2'.format(irr_decompose[-1], irr[-1])) else: print('Strange eigenvalue λ_R, chiral = ', chiral, coeff) exit(-1) gap_image = gap_image - gap_decompose.flatten() * coeff norm = np.sum(np.abs(gap_image ** 2)) assert norm < 1e-5 print('group = {:d}'.format(pairing_group)) if len(irr) > 2: name_group_dict[irr[-1]] = pairing_group # only if not waves print('test passed');
def get_MFH(config, mu_given=None, gap_given=None, only_free=False): K_up = config.model(config, config.mu, spin=+1.0)[0] K_up = models.xy_to_chiral(K_up, 'K_matrix', config, config.chiral_basis) K_up = models.apply_TBC(config, config.twist, deepcopy(K_up), inverse=False) K_down = config.model(config, config.mu, spin=-1.0)[0].T K_down = models.xy_to_chiral(K_down, 'K_matrix', config, config.chiral_basis) K_down = models.apply_TBC(config, config.twist, deepcopy(K_down), inverse=True) mu, fugacity, waves, gap, jastrow = config.unpack_parameters( config.initial_parameters) C2y = pairings.C2y_symmetry_map_chiral C2y = np.kron(np.eye(2), C2y) C3z = pairings.C3z_symmetry_map_chiral C3z = np.kron(np.eye(2), C3z) if mu_given is not None: Deltas_twisted = [ models.apply_TBC(config, config.twist, deepcopy(gap), inverse=False) for gap in config.pairings_list_unwrapped ] Delta = pairings.get_total_pairing_upwrapped(config, Deltas_twisted, gap) reg = models.apply_TBC( config, config.twist, deepcopy( config.reg_gap_term), inverse=False) * config.reg_gap_val Ts = [] for dmu in mu_given: T = scipy.linalg.block_diag( K_up - np.eye(K_up.shape[0]) * (mu + dmu), -(K_down - np.eye(K_down.shape[0]) * (mu + dmu))) + 0.0j T[:config.total_dof // 2, config.total_dof // 2:] = Delta + reg T[config.total_dof // 2:, :config.total_dof // 2] = Delta.conj().T + reg.conj().T Ts.append(T.copy() * 1.) Deltaonly = T * 0.0 Deltaonly[:config.total_dof // 2, config.total_dof // 2:] = Delta Deltaonly[config.total_dof // 2:, :config.total_dof // 2] = Delta.conj().T eigenvalues, eigenvectors = np.linalg.eigh(Ts[-1]) idxs = np.argsort(np.abs(eigenvalues))[:2] # print(eigenvalues[idxs]) vplus = eigenvectors[:, idxs[0]] vminus = eigenvectors[:, idxs[1]] print(np.dot(vplus.conj(), C3z.dot(vplus))) print(np.dot(vminus.conj(), C3z.dot(vminus))) #print(C3z.shape) #print(Deltaonly.shape) print(np.trace(Delta.T.conj().dot(pairings.C3z_symmetry_map_chiral.dot(Delta.dot(pairings.C3z_symmetry_map_chiral.T)))) \ / np.trace(np.dot(Delta, Delta.conj().T))) print(np.abs(np.dot(vplus.conj(), np.dot(Deltaonly, vminus))), np.abs(np.dot(vplus.conj(), vminus)), eigenvalues[idxs], idxs, dmu, \ np.trace(np.dot(Deltaonly, Deltaonly.T.conj()))) #exit(-1) return Ts K_up -= np.eye(K_up.shape[0]) * mu K_down -= np.eye(K_down.shape[0]) * mu T = scipy.linalg.block_diag(K_up, -K_down) + 0.0j if only_free: return T if gap_given is None: Delta = pairings.get_total_pairing_upwrapped( config, config.pairings_list_unwrapped, gap) T[:config.total_dof // 2, config.total_dof // 2:] = Delta T[config.total_dof // 2:, :config.total_dof // 2] = Delta.conj().T return T else: Ts = [] Delta = pairings.get_total_pairing_upwrapped( config, config.pairings_list_unwrapped, [1]) for gap in gap_given: T_gap = T.copy() * 1.0 T_gap[:config.total_dof // 2, config.total_dof // 2:] = Delta * gap T_gap[config.total_dof // 2:, :config.total_dof // 2] = Delta.conj().T * gap Ts.append(T_gap.copy() * 1.0) return Ts
def plot_all_waves(config): for wave, name in zip(config.waves_list, config.waves_list_names): wave_chiral = models.xy_to_chiral(wave[0], 'wave', config, config.chiral_basis) plot_wave(config, wave_chiral, name)
def plot_all_pairings(config): for gap, name in zip(config.pairings_list, config.pairings_list_names): gap_expanded = models.xy_to_chiral( pairings.combine_product_terms(config, gap), 'pairing', config, config.chiral_basis) plot_pairing(config, gap_expanded, name)