def __init__(self, inputfile): # load data from the HDF5 result file self.nup = assy_vec( pyalps.loadEigenstateMeasurements([inputfile], what='Nup')[0][0]) self.ndown = assy_vec( pyalps.loadEigenstateMeasurements([inputfile], what='Ndown')[0][0]) self.dmup = assy_hc( self.nup, pyalps.loadEigenstateMeasurements([inputfile], what='dm_up')[0][0]) self.dmdown = assy_hc( self.ndown, pyalps.loadEigenstateMeasurements([inputfile], what='dm_down')[0][0])
def load_2rdm(inputfile): # load data from the HDF5 result file rdm = pyalps.loadEigenstateMeasurements([inputfile], what='twoptdm')[0][0] rdm.y[0] = 0.5 * rdm.y[0] # uncomment for CASPT2 comparison # rdm.y[0] = rdm.y[0] return rdm
def run_dmrg(nsite, J2): #prepare the input parameters parms = [{ 'LATTICE_LIBRARY': 'j1j2_%d.xml' % nsite, 'LATTICE': 'J1J2', 'MODEL': 'spin', 'local_S0': '0.5', # local_S0 means type 0 site, right? 'CONSERVED_QUANTUMNUMBERS': 'N,Sz', 'Sz_total': 0, 'J0': 1, 'J1': J2, 'SWEEPS': 4, 'NUMBER_EIGENVALUES': 1, 'MAXSTATES': 400 }] #write the input file and run the simulation prefix = 'data/j1j2_%dJ2%s' % (nsite, J2) input_file = pyalps.writeInputFiles(prefix, parms) res = pyalps.runApplication('dmrg', input_file, writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=prefix)) # print properties of the eigenvector for each run: for run in data: for s in run: print('%s : %s' % (s.props['observable'], s.y[0]))
def __init__(self, inputfile): self.loc_n = pyalps.loadEigenstateMeasurements([inputfile], what='N')[0][0].y[0] self.norb = len(self.loc_n) DMRG_Parms = pyalps.getParameters([inputfile]) orbital_order = map(int, DMRG_Parms[0]['orbital_order'].split(',')) inv_order = [] for i in range(self.norb): inv_order.append(orbital_order.index(i + 1)) self.orb_order = inv_order empty_diag = np.zeros(self.norb) self.corr_cdag_c = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='dm')[0][0]) self.corr_docc = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='doccdocc')[0][0])
def load_1spdm(inputfile): """From the diagonal and upper triangle, construct a symmetric matrix diag: diagonal triang: upper triangle, sequential reversed rows""" diagup = pyalps.loadEigenstateMeasurements([inputfile], what='Nup')[0][0] diagdown = pyalps.loadEigenstateMeasurements([inputfile], what='Ndown')[0][0] triangup = pyalps.loadEigenstateMeasurements([inputfile], what='dm_up')[0][0] triangdown = pyalps.loadEigenstateMeasurements([inputfile], what='dm_down')[0][0] # Create the full matrix from the diagonal (nup.y[0]) and upper triangle (dmup) dmu = assemble_halfcorr(diagup.y[0], triangup) dmd = assemble_halfcorr(diagdown.y[0], triangdown) # this is the spin-density matrix ds = dmu - dmd return ds
def __init__(self, inputfile): self.loc_nup = pyalps.loadEigenstateMeasurements([inputfile], what='Nup')[0][0].y[0] self.loc_ndown = pyalps.loadEigenstateMeasurements( [inputfile], what='Ndown')[0][0].y[0] self.loc_nupdown = pyalps.loadEigenstateMeasurements( [inputfile], what='Nupdown')[0][0].y[0] self.loc_nup_nup = pyalps.loadEigenstateMeasurements( [inputfile], what='nupnup')[0][0].y[0] self.loc_ndown_nup = pyalps.loadEigenstateMeasurements( [inputfile], what='nupndown')[0][0].y[0] self.loc_nup_ndown = pyalps.loadEigenstateMeasurements( [inputfile], what='ndownnup')[0][0].y[0] self.loc_ndown_ndown = pyalps.loadEigenstateMeasurements( [inputfile], what='ndownndown')[0][0].y[0] self.loc_splus_sminus = pyalps.loadEigenstateMeasurements( [inputfile], what='splus_sminus')[0][0].y[0]
def load_spectrum_observable(fname, observable, remove_equal_indexes=False): if not os.path.exists(fname): raise IOError('Archive `%s` not found.' % fname) data = pyalps.loadEigenstateMeasurements([fname], [observable]) data = pyalps.flatten(data) if len(data) != 1: raise ObservableNotFound(fname, observable) d = data[0] if len(d.x) > 1 and d.props['observable'] != 'Entropy': # removing observables with repeated indexes if remove_equal_indexes: x = np.array(d.x) if len(x.shape) > 1: sel = np.array([True] * len(x)) for i in range(len(x)): for j in range(x.shape[1]): for k in range(x.shape[1]): if k != j and np.all(x[i, j, ...] == x[i, k, ...]): sel[i] = False break else: continue break d.x = d.x[sel] y = [] for i in range(len(d.y)): y.append(d.y[i][sel]) d.y = y # sorting observables x = np.array(d.x) if len(x.shape) > 1: x = x.reshape(x.shape[0], np.prod(x.shape[1:])) keys = [] for i in reversed(range(x.shape[1])): keys.append(x[:, i]) ind = np.lexsort(keys) else: ind = np.argsort(x) d.x = d.x[ind] for i in range(len(d.y)): d.y[i] = d.y[i][ind] return d
def detectDataType(fname): fname = pyalps.make_list(fname) # Monte Carlo results try: data = pyalps.loadMeasurements(fname) if len(data[0]) == 0: raise RuntimeError for task in data: for obs in task: tmp = obs.y[0].error except (RuntimeError, AttributeError, IndexError): pass else: return compareMC # mixed type (QWL) try: data = pyalps.loadMeasurements(fname) if len(data[0]) == 0: raise RuntimeError except (RuntimeError, AttributeError, IndexError): pass else: return compareMixed # Epsilon-precise results try: data = pyalps.loadEigenstateMeasurements(fname) except RuntimeError: pass else: return compareEpsilon raise Exception("Measurement data type couldn't be detected")
def print_rdm1(inputfile, tag): tag1 = tag tag2 = tag f = open('oneparticle.rdm.%s.%s' % (tag1, tag2), 'w') b = open('extDMRG_%s_%s.rdm1' % (tag1, tag2), 'w') # load data from the HDF5 result file dm = pyalps.loadEigenstateMeasurements([inputfile], what='oneptdm')[0][0] # Create the full matrix from the upper triangle (dm) (dm_real, dm_imag) = assemble_complex_dm(dm) spinors = int(dm.props["L"]) for j in range(spinors): for i in range(spinors): dump_element(f, dm_real[i, j], dm_imag[i, j], i, j) dump_element(b, dm_real[i, j], dm_imag[i, j], i + 1, j + 1) f.close() b.close()
def main(): Ws = np.linspace(2e11,3.2e11,10)#[2e10] nW = len(Ws) Ns = range(0,2*L+1)#range(23,27) nN = len(Ns) WNs = zip(range(nW*nN), [[i, j] for i in range(nW) for j in range(nN)], [[Wi, Ni] for Wi in Ws for Ni in Ns]) ntasks = len(WNs) start = datetime.datetime.now() pbar = progressbar.ProgressBar(widgets=['Res: '+str(resi)+' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.Timer()], maxval=ntasks).start() with concurrent.futures.ProcessPoolExecutor(max_workers=numthreads) as executor: futures = [executor.submit(runmps, task, iW, iN, Wi, N) for (task, [iW, iN], [Wi, N]) in WNs] for future in pbar(concurrent.futures.as_completed(futures)): future.result() end = datetime.datetime.now() #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=basename)) solved = makeres(nW, nN) Es = makeres(nW, nN) ns = makeres(nW, nN) n2s = makeres(nW, nN) corrs = makeres(nW, nN) ncorrs = makeres(nW, nN) for d in data: for s in d: iW = int(s.props['iW']) iN = int(s.props['iN']) solved[iW][iN] = s.props['solved'] if(s.props['observable'] == 'Energy'): Es[iW][iN] = s.y[0] if(s.props['observable'] == 'Local density'): ns[iW][iN] = s.y[0] if(s.props['observable'] == 'Local density squared'): n2s[iW][iN] = s.y[0] if(s.props['observable'] == 'One body density matrix'): corrs[iW][iN] = sparse.coo_matrix((s.y[0], (s.x[:,0], s.x[:,1]))).toarray() if(s.props['observable'] == 'Density density'): ncorrs[iW][iN] = sparse.coo_matrix((s.y[0], (s.x[:,0], s.x[:,1]))).toarray() resultsfile = open(resdir + resifile(resi), 'w') resultsstr = '' resultsstr += 'seed['+str(resi)+']='+str(seed)+';\n' resultsstr += 'L['+str(resi)+']='+str(L)+';\n' resultsstr += 'nmax['+str(resi)+']='+str(nmax)+';\n' resultsstr += 'sweeps['+str(resi)+']='+str(sweeps)+';\n' resultsstr += 'maxstates['+str(resi)+']='+str(maxstates)+';\n' resultsstr += 'periodic['+str(resi)+']='+str(periodic)+';\n' resultsstr += 'twisted['+str(resi)+']='+str(twist)+';\n' resultsstr += 'xi['+str(resi)+']='+mathematica(xi)+';\n' resultsstr += 'Ws['+str(resi)+']='+mathematica(Ws)+';\n' resultsstr += 'ts['+str(resi)+']='+mathematica([JWi(Wi) for Wi in Ws])+';\n' resultsstr += 'Us['+str(resi)+']='+mathematica([UW(Wi) for Wi in Ws])+';\n' resultsstr += 'Ns['+str(resi)+']='+mathematica(Ns)+';\n' resultsstr += 'solved['+str(resi)+']='+mathematica(solved)+';\n' resultsstr += 'Eres['+str(resi)+']='+mathematica(Es)+';\n' resultsstr += 'nres['+str(resi)+']='+mathematica(ns)+';\n' resultsstr += 'n2res['+str(resi)+']='+mathematica(n2s)+';\n' resultsstr += 'corrres['+str(resi)+']='+mathematica(corrs)+';\n' resultsstr += 'ncorrres['+str(resi)+']='+mathematica(ncorrs)+';\n' resultsstr += 'runtime['+str(resi)+']="'+str(end-start)+'";\n' resultsfile.write(resultsstr)
'Nup_total': 6, 'Ndown_total': 6, 't': 1, 'U': 8., 'mu': '0.8 * (x-L/2)^2', 'SWEEPS': 5, 'NUMBER_EIGENVALUES': 1, 'MAXSTATES': 100, 'MEASURE_LOCAL[Local density]': 'n' }) #write the input file and run the simulation input_file = pyalps.writeInputFiles(basename, parms) res = pyalps.runApplication('mps_optim', input_file, writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=basename), ['Local density']) for d in pyalps.flatten(data): d.y = d.y[0] d.props['line'] = '-o' plt.figure() pyalps.plot.plot(data) plt.legend() plt.ylabel('local density') plt.xlabel('site') plt.show()
'hz': hz, 'cg': cg, 'sg': sg, 'SWEEPS': sweeps, 'NUM_WARMUP_STATES': warmup_states, 'NUMBER_EIGENVALUES': 1, 'MAXSTATES': max_states, 'MEASURE_LOCAL[nUP]': 'nUP', 'MEASURE_LOCAL[nDO]': 'nDO', 'MEASURE_CORRELATIONS[One-body Correlation UP]': "bdagUP:bUP", 'MEASURE_CORRELATIONS[One-body Correlation DO]': "bdagDO:bDO", 'MEASURE_CORRELATIONS[One-body Correlation UPDO]': "bdagUP:bDO", 'MEASURE_CORRELATIONS[Two-body Correlation UP]': "nUP:nUP", 'MEASURE_CORRELATIONS[Two-body Correlation DO]': "nDO:nDO", 'MEASURE_CORRELATIONS[Two-body Correlation UPDO]': "nUP:nDO" }] #Write the input file and run the simulation input_file = pyalps.writeInputFiles(filename, parms) res = pyalps.runApplication('dmrg', input_file, writexml=False, MPI=None) #Load measurements for the ground state data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=filename)) #Print the properties of the ground state if __name__ == '__main__': for s in data[0]: print s.props['observable'], ' : ', s.y[0]
def runmain(pipe): # ts = np.linspace(0.05, 0.3, 15).tolist() ts = np.linspace(5e10, 2.5e11, 5).tolist() ts = [ts[0]] # ti = int(sys.argv[4]) # if ti >= 0: # ts = [ts[ti]] # ts = [11e10] # ts = [2.5e11] # ts = [8e10] # ts = [np.linspace(0.01, 0.3, 10).tolist()[2]] # ts = [0.3] # ts = np.linspace(0.3, 0.3, 1).tolist() [speckle(t) for t in ts] Ns = range(1, 2 * L + 1, 1) Ns = range(30, 101, 1) Ns = range(50, 80, 1) # Ns = [70] # Ns = range(2*L-5,2*L+1,1) # Ns = [1] # Ns = range(1,15,1) # Ns = range(1,16,1) # Ns = range(1, L, 1) # Ns = range(L+1, 2*L+1, 1) # Ns = [ 16 ] # Ns = range(3,17,1) # Ns = range(1, 16, 1) # Ns = range(1,7,1) # Ns = [7] # Ns = [1,2,3,4,5,6] # Ns = [1] # Ns = range(7,13,1) # Ns = range(1,13,1) # Ns = [6,7,8,9] # Ns = range(1, L+1, 1) # Ns = range(L+1,2*L+1,1) # Ns = [L+1,L+2] # Ns = [L+1] # Do L+2 at some point dims = [len(ts), len(Ns), neigen] ndims = dims + [L] Cdims = dims + [L, L] trunc = np.zeros(dims) E0res = np.zeros(dims) nres = np.zeros(ndims) n2res = np.zeros(ndims) Cres = np.zeros(Cdims) cres = np.zeros(Cdims) trunc.fill(np.NaN) E0res.fill(np.NaN) nres.fill(np.NaN) n2res.fill(np.NaN) Cres.fill(np.NaN) cres.fill(np.NaN) mindims = [len(ts), len(Ns)] nmindims = mindims + [L] Cmindims = mindims + [L, L] truncmin = np.zeros(mindims) E0minres = np.zeros(mindims) nminres = np.zeros(nmindims) n2minres = np.zeros(nmindims) Cminres = np.zeros(Cmindims) cminres = np.zeros(Cmindims) truncmin.fill(np.NaN) E0minres.fill(np.NaN) nminres.fill(np.NaN) n2minres.fill(np.NaN) Cminres.fill(np.NaN) cminres.fill(np.NaN) # E0res = [[[np.NaN for i in range(reps)] for j in range(len(Ns))] for k in range(len(ts))] # E0res = [[[] for j in range(len(Ns))] for k in range(len(ts))] start = datetime.datetime.now() with concurrent.futures.ThreadPoolExecutor( max_workers=numthreads) as executor: futures = [ executor.submit(rundmrg, i, tN[0][0], tN[0][1], tN[1][0], tN[1][1]) for i, tN in enumerate( zip(itertools.product(ts, Ns), itertools.product(range(0, len(ts)), range(0, len(Ns))))) ] pickle.dump(len(futures), pipe) for future in concurrent.futures.as_completed(futures): future.result() pickle.dump(1, pipe) ip = np.zeros([len(ts), len(Ns)]) res = '' res += 'Wres[{0}]={1};\n'.format(resi, mathformat([speckle(Wi) for Wi in ts])) res += 'Jres[{0}]={1};\n'.format( resi, mathformat([JW(speckle(Wi)) for Wi in ts])) res += 'Ures[{0}]={1};\n'.format( resi, mathformat([UW(speckle(Wi)) for Wi in ts])) res += 'Wmres[{0}]={1};\n'.format(resi, mathformat([Wi for Wi in ts])) res += 'Jmres[{0}]={1};\n'.format( resi, mathformat([JW(np.array([Wi, Wi]))[0] for Wi in ts])) res += 'Umres[{0}]={1};\n'.format( resi, mathformat([UW(np.array([Wi]))[0] for Wi in ts])) res += 'neigen[{0}]={1};\n'.format(resi, neigen) res += 'delta[{0}]={1};\n'.format(resi, delta) res += 'trunc[{0}]={1};\n'.format(resi, mathformat(trunc)) res += 'Lres[{0}]={1};\n'.format(resi, L) res += 'sweeps[{0}]={1};\n'.format(resi, sweeps) res += 'maxstates[{0}]={1};\n'.format(resi, maxstates) res += 'warmup[{0}]={1};\n'.format(resi, warmup) res += 'truncerror[{0}]={1};\n'.format(resi, truncerror) res += 'nmax[{0}]={1};\n'.format(resi, nmax) res += 'Nres[{0}]={1};\n'.format(resi, mathformat(Ns)) res += 'tres[{0}]={1};\n'.format(resi, mathformat(ts)) res += 'mures[{0}]={1};\n'.format(resi, mathformat(mu)) data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=filenameprefix)) for d in data: try: it = int(d[0].props['it']) iN = int(d[0].props['iN']) # ip = int(d[0].props['ip']) for s in d: for case in switch(s.props['observable']): if case('Truncation error'): # trunc[it][iN][ip] = s.y[0] # trunc[it][iN] = s.y break if case('Energy'): for i, sy in enumerate(s.y): E0res[it][iN][i] = sy # E0res[it][iN][ip] = s.y[0] # E0res[it][iN] = s.y # for sy in s.y break if case('Local density'): for i, sy in enumerate(make2d(s.y)): nres[it][iN][i] = sy # nres[it][iN][ip] = s.y[0] # nres[it][iN] = s.y break if case('Local density squared'): for i, sy in enumerate(make2d(s.y)): n2res[it][iN][i] = sy # n2res[it][iN][ip] = s.y[0] # n2res[it][iN] = s.y break if case('Onebody density matrix'): for i, sy in enumerate(s.y): for x, y in zip(s.x, sy): Cres[it][iN][i][tuple(x)] = y # for x, y in zip(s.x, s.y[0]): # Cres[it][iN][ip][tuple(x)] = y # for x, y in zip(s.x, s.y[0]): # Cres[it][iN][tuple(x)] = y # for ieig, sy in enumerate(s.y): # for x, y in zip(s.x, sy): # Cres[it][iN][ieig][tuple(x)] = y break for i in range(neigen): Cres[it][iN][i][range(L), range(L)] = nres[it][iN][i] cres[it][iN][i] = Cres[it][iN][i] / np.sqrt( np.outer(nres[it][iN][i], nres[it][iN][i])) # for ieig in range(neigen): # Cres[it][iN][ieig][range(L), range(L)] = nres[it][iN][ieig] # cres[it][iN][ieig] = Cres[it][iN][ieig] / np.sqrt(np.outer(nres[it][iN][ieig], nres[it][iN][ieig])) # except Exception as e: except BufferError as e: print(e.message) # for it in range(len(ts)): # for iN in range(len(Ns)): # try: # m = min(E0res[it][iN]) # ieig = np.where(E0res[it][iN] == m)[0][0] # truncmin[it][iN] = trunc[it][iN][ieig] # E0minres[it][iN] = E0res[it][iN][ieig] # nminres[it][iN] = nres[it][iN][ieig] # n2minres[it][iN] = n2res[it][iN][ieig] # Cminres[it][iN] = Cres[it][iN][ieig] # cminres[it][iN] = cres[it][iN][ieig] # except Exception as e: # print(e.message) end = datetime.datetime.now() res += 'E0res[{0}]={1};\n'.format(resi, mathformat(E0res)) res += 'nres[{0}]={1};\n'.format(resi, mathformat(nres)) res += 'n2res[{0}]={1};\n'.format(resi, mathformat(n2res)) res += 'Cres[{0}]={1};\n'.format(resi, mathformat(Cres)) res += 'cres[{0}]={1};\n'.format(resi, mathformat(cres)) # res += 'truncmin[{0}]={1};\n'.format(resi, mathformat(truncmin)) # res += 'E0minres[{0}]={1};\n'.format(resi, mathformat(E0minres)) # res += 'nminres[{0}]={1};\n'.format(resi, mathformat(nminres)) # res += 'n2minres[{0}]={1};\n'.format(resi, mathformat(n2minres)) # res += 'Cminres[{0}]={1};\n'.format(resi, mathformat(Cminres)) # res += 'cminres[{0}]={1};\n'.format(resi, mathformat(cminres)) res += 'runtime[{0}]=\"{1}\";\n'.format(resi, end - start) resf.write(res) resf.flush() os.fsync(resf.fileno()) if sys.platform == 'linux2': shutil.copy( resfile, '/home/ubuntu/Dropbox/Amazon EC2/Simulation Results/BH-MPS')
parms = [] parms.append( { 'LATTICE' : "open ladder", 'L' : 10, 'MODEL_LIBRARY' : "mymodels.xml", 'MODEL' : "fermion Hubbard", 'CONSERVED_QUANTUMNUMBERS' : 'Nup,Ndown', 'Nup_total' : 10, 'Ndown_total' : 10, 't0' : "1+0.6*I", 'ct0' : "1-0.6*I", 't1' : 0.1, 'U' : 0., 'SWEEPS' : 6, 'MAXSTATES' : 400, 'COMPLEX' : 1, } ) #write the input file and run the simulation input_file = pyalps.writeInputFiles(basename,parms) res = pyalps.runApplication('mps_optim',input_file,writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=basename), ['Energy']) en_exact = -28.1129977 print('Exact energy for MAXSTATES=inf ::', en_exact) for d in pyalps.flatten(data): print(d.props['observable'], '=', d.y)
'U': 8., 'SWEEPS': 5, 'NUMBER_EIGENVALUES': 1, 'MAXSTATES': 100, 'MEASURE_LOCAL[Local density]': 'n', 'MEASURE_LOCAL_AT[String order 2]': 'st:st|(4,5),(5,6),(6,7)', 'MEASURE_LOCAL_AT[String order 4]': 'st:st:st:st|((4,5,6,7),(3,4,5,6),(5,6,7,8))', }) #write the input file and run the simulation input_file = pyalps.writeInputFiles(basename, parms) res = pyalps.runApplication('mps_optim', input_file, writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=basename), ['String order 2', 'String order 4']) for d in pyalps.flatten(data): print('##', d.props['observable']) for x, y in zip(d.x, d.y[0]): print('Sites:', x) print('Val: ', y)
def load_1rdm(inputfile): # load data from the HDF5 result file rdm = pyalps.loadEigenstateMeasurements([inputfile], what='oneptdm')[0][0] return rdm
def main(): Ws = np.linspace(7.9e10, 1.1e12, 17)#[1e11]#[7.9e10]#np.linspace(2e11,3.2e11,10)#[2e10] nW = len(Ws) # Ns = [L]#range(0,2*L+1)#range(30,86)#[L]#range(0,2*L+1)#range(40,70)#range(0,2*L+1)#range(24,2*L+1)#range(0,2*L+1)#range(23,27) # nN = len(Ns) sigmas = [0,2,5,10]#range(0, 11) nsigma = len(sigmas) Wsigmas = zip(range(nW*nsigma), [[i, j] for i in range(nW) for j in range(nsigma)], [[Wi, sigmai] for Wi in Ws for sigmai in sigmas]) ntasks = len(Wsigmas) start = datetime.datetime.now() pbar = progressbar.ProgressBar(widgets=['Res: '+str(resi)+' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.Timer()], maxval=ntasks).start() with concurrent.futures.ProcessPoolExecutor(max_workers=numthreads) as executor: futures = [executor.submit(runmps, task, iW, isigma, Wi, sigma) for (task, [iW, isigma], [Wi, sigma]) in Wsigmas] for future in pbar(concurrent.futures.as_completed(futures)): future.result() sys.stderr.flush() end = datetime.datetime.now() #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=basename)) Es = makeres(nW, nsigma) ns = makeres(nW, nsigma) n2s = makeres(nW, nsigma) corrs = makeres(nW, nsigma) ncorrs = makeres(nW, nsigma) entropy = makeres(nW, nsigma) es = makeres(nW, nsigma) for d in data: for sigma in d: iW = int(sigma.props['iW']) isigma = int(sigma.props['is']) if(sigma.props['observable'] == 'Energy'): Es[iW][isigma] = sigma.y[0] if(sigma.props['observable'] == 'Local density'): ns[iW][isigma] = sigma.y[0] if(sigma.props['observable'] == 'Local density squared'): n2s[iW][isigma] = sigma.y[0] if(sigma.props['observable'] == 'One body density matrix'): corrs[iW][isigma] = sparse.coo_matrix((sigma.y[0], (sigma.x[:,0], sigma.x[:,1]))).toarray() if(sigma.props['observable'] == 'Density density'): ncorrs[iW][isigma] = sparse.coo_matrix((sigma.y[0], (sigma.x[:,0], sigma.x[:,1]))).toarray() if(sigma.props['observable'] == 'Entropy'): entropy[iW][isigma] = sigma.y[0] if(sigma.props['observable'] == 'Entanglement Spectra'): es[iW][isigma] = [[sigma for sigma in reversed(sorted(esi[1]))][0:4] for esi in sigma.y[0]] resultsfile = open(resdir + resifile(resi), 'w') resultsstr = '' resultsstr += 'seed['+str(resi)+']='+str(seed)+';\n' resultsstr += 'L['+str(resi)+']='+str(L)+';\n' resultsstr += 'nmax['+str(resi)+']='+str(nmax)+';\n' resultsstr += 'sweeps['+str(resi)+']='+str(sweeps)+';\n' resultsstr += 'maxstates['+str(resi)+']='+str(maxstates)+';\n' resultsstr += 'Ws['+str(resi)+']='+mathematica(Ws)+';\n' resultsstr += 'ts['+str(resi)+']='+mathematica([JWi(Wi) for Wi in Ws])+';\n' resultsstr += 'Us['+str(resi)+']='+mathematica([UW(Wi) for Wi in Ws])+';\n' resultsstr += 'sigmas['+str(resi)+']='+mathematica(sigmas)+';\n' resultsstr += 'Eres['+str(resi)+']='+mathematica(Es)+';\n' resultsstr += 'nres['+str(resi)+']='+mathematica(ns)+';\n' resultsstr += 'n2res['+str(resi)+']='+mathematica(n2s)+';\n' resultsstr += 'corrres['+str(resi)+']='+mathematica(corrs)+';\n' resultsstr += 'ncorrres['+str(resi)+']='+mathematica(ncorrs)+';\n' resultsstr += 'entropy['+str(resi)+']='+mathematica(entropy)+';\n' resultsstr += 'es['+str(resi)+']='+mathematica(es)+';\n' resultsstr += 'runtime['+str(resi)+']="'+str(end-start)+'";\n' resultsfile.write(resultsstr)
def __init__(self, inputfile): self.loc_nup = assy_vec( pyalps.loadEigenstateMeasurements([inputfile], what='Nup')[0][0]) self.loc_ndown = assy_vec( pyalps.loadEigenstateMeasurements([inputfile], what='Ndown')[0][0]) self.loc_docc = assy_vec( pyalps.loadEigenstateMeasurements([inputfile], what='Nupdown')[0][0]) self.norb = len(self.loc_nup) empty_diag = np.zeros(self.norb) self.corr_cdag_up_c_up = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='dm_up')[0][0]) self.corr_cdag_down_c_down = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='dm_down')[0][0]) self.corr_nupnup = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='nupnup')[0][0]) self.corr_nupndown = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='nupndown')[0][0]) self.corr_ndownnup = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='ndownnup')[0][0]) self.corr_ndownndown = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='ndownndown')[0][0]) self.corr_docc = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='doccdocc')[0][0]) self.corr_trans_up = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements( [inputfile], what='transfer_up_while_down')[0][0]) self.corr_trans_down = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements( [inputfile], what='transfer_down_while_up')[0][0]) u1 = pyalps.loadEigenstateMeasurements( [inputfile], what='transfer_up_while_down_at_2')[0][0] u2 = pyalps.loadEigenstateMeasurements( [inputfile], what='transfer_up_while_down_at_1')[0][0] d1 = pyalps.loadEigenstateMeasurements( [inputfile], what='transfer_down_while_up_at_2')[0][0] d2 = pyalps.loadEigenstateMeasurements( [inputfile], what='transfer_down_while_up_at_1')[0][0] self.corr_trans_up_down2 = assy_c(empty_diag, u1, u2) self.corr_trans_up_down1 = assy_c(empty_diag, u2, u1) self.corr_trans_down_up2 = assy_c(empty_diag, d1, d2) self.corr_trans_down_up1 = assy_c(empty_diag, d2, d1) self.corr_trans_pair = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='transfer_pair')[0][0]) self.corr_spinflip = assy_hc( empty_diag, pyalps.loadEigenstateMeasurements([inputfile], what='spinflip')[0][0]) u1 = pyalps.loadEigenstateMeasurements([inputfile], what='nupdocc')[0][0] u2 = pyalps.loadEigenstateMeasurements([inputfile], what='doccnup')[0][0] self.corr_nupdocc = assy_c(empty_diag, u1, u2) self.corr_doccnup = assy_c(empty_diag, u2, u1) u1 = pyalps.loadEigenstateMeasurements([inputfile], what='ndowndocc')[0][0] u2 = pyalps.loadEigenstateMeasurements([inputfile], what='doccndown')[0][0] self.corr_ndowndocc = assy_c(empty_diag, u1, u2) self.corr_doccndown = assy_c(empty_diag, u2, u1)
def compareMixed(testfiles, reffiles, tol_factor='auto', whatlist=None): """ Compare results of QWL, DMRG (ALPS) returns True if test succeeded""" if tol_factor == 'auto': tol_factor = 2.0 testdata = pyalps.loadMeasurements(testfiles) refdata = pyalps.loadMeasurements(reffiles) if len(testdata) != len(refdata): raise Exception( "Comparison Error: test and reference data differ in number of tasks" ) # This is needed by the dmrg example try: testeig = pyalps.loadEigenstateMeasurements(testfiles) refeig = pyalps.loadEigenstateMeasurements(reffiles) for ttask, rtask, teig, reig in zip(testdata, refdata, testeig, refeig): ttask += teig rtask += reig except RuntimeError: pass # File level compare_list = [] for testtask, reftask in zip(testdata, refdata): testfile = testtask[0].props['filename'] reffile = reftask[0].props['filename'] # Ensure we compare equivalent tasks if len(testtask) != len(reftask): raise Exception("Comparison Error: test and reference data have \ different number of observables\n") # Observables # Select only observables from whatlist if specified if whatlist: notfoundtest = [ w for w in whatlist if w not in [o.props['observable'] for o in testtask] ] if notfoundtest: print( "The following observables specified for comparison\nhave not been found in test results:" ) print("File:", testfile) print(notfoundtest) sys.exit(1) notfoundref = [ w for w in whatlist if w not in [o.props['observable'] for o in reftask] ] if notfoundref: print( "The following observables specified for comparison\nhave not been found in reference results:" ) print("File:", reffile) print(notfoundref) sys.exit(1) testtask = [ o for o in testtask if o.props['observable'] in whatlist ] reftask = [o for o in reftask if o.props['observable'] in whatlist] #print("\ncomparing file " + testfile + " against file " + reffile) compare_obs = [] for testobs, refobs in zip(testtask, reftask): # MC if it succeeds try: # Scalar observables if pyalps.size(testobs.y) == 1: testerr = testobs.y[0].error referr = refobs.y[0].error tol = np.sqrt(testerr**2 + referr**2) * tol_factor diff = np.abs(testobs.y[0].mean - refobs.y[0].mean) compare_obs.append(obsdict(tol, diff, testobs.props)) # Array valued observables else: tol_list = [] diff_list = [] for (ty, ry) in zip(testobs.y, refobs.y): tol_list.append( np.sqrt(ty.error**2 + ry.error**2) * tol_factor) diff_list.append(np.abs(ty - ry)) maxdiff = max(diff_list) tol = tol_list[diff_list.index(maxdiff)] * tol_factor compare_obs.append(obsdict(tol, maxdiff, testobs.props)) # Epsilon otherwise except AttributeError: # Scalar observables if pyalps.size(testobs.y) == 1: tol = max(10e-12, np.abs(refobs.y[0]) * 10e-12) * tol_factor diff = np.abs(testobs.y[0] - refobs.y[0]) compare_obs.append(obsdict(tol, diff, testobs.props)) # Array valued observables else: tol_list = [] diff_list = [] for (ty, ry) in zip(testobs.y, refobs.y): tol_list.append(max(10e-12, ry * 10e-12)) diff_list.append(np.abs(ty - ry)) maxdiff = max(diff_list) tol = tol_list[diff_list.index(maxdiff)] * tol_factor compare_obs.append(obsdict(tol, maxdiff, testobs.props)) compare_list.append(compare_obs) #writeTest2stdout(compare_list) # or a file, if that has been specified succeed_list = [ obs['passed'] for obs_list in compare_list for obs in obs_list ] return False not in succeed_list, compare_list
import pyalps fileheader = 'heisenberg' data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=fileheader),'Energy') #print data,len(data) J_En = pyalps.collectXY(data, x='J', y='Energy') print data for x, y in zip(J_En[0].x, J_En[0].y): print x, y
def main(): ts = [0.01]#[0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]#[1e-10,1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3,1e-2,1e-1]#[0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]#[0.01,0.1]#np.linspace(0.01, 0.05, 5).tolist() nt = len(ts) Us = [1]*nt Ns = range(0, 2*L+1)#range(23,27)#range(25,2*L+1)#[35,36,37]#[32]*12#range(32,40)#range(38, 46)#[40,41,42,43]#range(25, 2*L+1)#range(51,70)#[66,66,66,66,66,66,66,66,66]#[66,67,68]#[66,67,68,69,70]#range(0, 2*L+1) nN = len(Ns) tUNs = zip(range(nt*nN), [[i, j] for i in range(nt) for j in range(nN)], [[Ui, ti, Ni] for (Ui, ti) in zip(Us, ts) for Ni in Ns]) ntasks = len(tUNs) start = datetime.datetime.now() pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.Timer()], maxval=ntasks).start() # with concurrent.futures.ThreadPoolExecutor(max_workers=numthreads) as executor: with concurrent.futures.ProcessPoolExecutor(max_workers=numthreads) as executor: futures = [executor.submit(runmps, task, it, iN, Ui, ti, N) for (task, [it, iN], [Ui, ti, N]) in tUNs] for future in pbar(concurrent.futures.as_completed(futures)): future.result() end = datetime.datetime.now() #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=basename)) solved = makeres(nt, nN) Es = makeres(nt, nN) ns = makeres(nt, nN) n2s = makeres(nt, nN) corrs = makeres(nt, nN) ncorrs = makeres(nt, nN) for d in data: for s in d: it = int(s.props['it']) iN = int(s.props['iN']) solved[it][iN] = s.props['solved'] if(s.props['observable'] == 'Energy'): Es[it][iN] = s.y[0] if(s.props['observable'] == 'Local density'): ns[it][iN] = s.y[0] if(s.props['observable'] == 'Local density squared'): n2s[it][iN] = s.y[0] if(s.props['observable'] == 'One body density matrix'): corrs[it][iN] = sparse.coo_matrix((s.y[0], (s.x[:,0], s.x[:,1]))).toarray() if(s.props['observable'] == 'Density density'): ncorrs[it][iN] = sparse.coo_matrix((s.y[0], (s.x[:,0], s.x[:,1]))).toarray() resultsfile = open(resdir + 'res.'+str(resi)+'.txt', 'w') resultsstr = '' resultsstr += 'seed['+str(resi)+']='+str(seed)+';\n' resultsstr += 'L['+str(resi)+']='+str(L)+';\n' resultsstr += 'nmax['+str(resi)+']='+str(nmax)+';\n' resultsstr += 'sweeps['+str(resi)+']='+str(sweeps)+';\n' resultsstr += 'maxstates['+str(resi)+']='+str(maxstates)+';\n' resultsstr += 'periodic['+str(resi)+']='+str(periodic)+';\n' resultsstr += 'twisted['+str(resi)+']='+str(twist)+';\n' resultsstr += 'xi['+str(resi)+']='+mathematica(xi)+';\n' resultsstr += 'ts['+str(resi)+']='+mathematica(ts)+';\n' resultsstr += 'Us['+str(resi)+']='+mathematica(Us)+';\n' resultsstr += 'Ns['+str(resi)+']='+mathematica(Ns)+';\n' resultsstr += 'solved['+str(resi)+']='+mathematica(solved)+';\n' resultsstr += 'Eres['+str(resi)+']='+mathematica(Es)+';\n' resultsstr += 'nres['+str(resi)+']='+mathematica(ns)+';\n' resultsstr += 'n2res['+str(resi)+']='+mathematica(n2s)+';\n' resultsstr += 'corrres['+str(resi)+']='+mathematica(corrs)+';\n' resultsstr += 'ncorrres['+str(resi)+']='+mathematica(ncorrs)+';\n' resultsstr += 'runtime['+str(resi)+']="'+str(end-start)+'";\n' resultsfile.write(resultsstr) print 'Res: ' + str(resi)
def load_3rdm(inputfile): # load data from the HDF5 result file rdm = pyalps.loadEigenstateMeasurements([inputfile], what='transition_threeptdm')[0][0] rdm.y[0] = rdm.y[0] return rdm
parms = [ { 'optimization' : 'singlesite', 'LATTICE' : 'open chain lattice', 'L' : 20, 'MODEL' : 'spin', 'local_S0' : '0.5', 'local_S1' : '1', 'CONSERVED_QUANTUMNUMBERS' : 'N,Sz', 'Sz_total' : 9, 'J' : 1, 'SWEEPS' : 4, 'NUMBER_EIGENVALUES' : 1, 'MAXSTATES' : 50, 'MEASURE_LOCAL[Spin]' : 'Sz', # 'init_state' : 'local_quantumnumbers', # 'initial_local_Sz' : ','.join(['0.5']*10+['-0.5']*1+['0.5']*9),#'0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,-0.5',#'1,0,0,0,0,0,0,0,0,0', # 'initial_local_S' : ','.join(['0.5']*20+['-0.5']*0),#'0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,-0.5',#'1,0,0,0,0,0,0,0,0,0', } ] #write the input file and run the simulation input_file = pyalps.writeInputFiles('SingleSite3/parm_spin_one',parms) res = pyalps.runApplication('mps_optim',input_file,writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix='SingleSite3/parm_spin_one')) # print properties of the eigenvector: for s in data[0]: print s.props['observable'], ' : ', s.y[0]
#prepare the input parameters parms = [{ 'LATTICE': "chain lattice", 'MODEL': "spin", 'local_S': 1, 'J': 1, 'L': 4, 'CONSERVED_QUANTUMNUMBERS': 'Sz', 'MEASURE_STRUCTURE_FACTOR[Structure Factor S]': 'Sz', 'MEASURE_CORRELATIONS[Diagonal spin correlations]=': 'Sz', 'MEASURE_CORRELATIONS[Offdiagonal spin correlations]': 'Splus:Sminus' }] #write the input file and run the simulation input_file = pyalps.writeInputFiles('ed01a', parms) res = pyalps.runApplication('sparsediag', input_file) #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix='ed01a')) # print properties of ground states in all sectors: for sector in data[0]: print '\nSector with Sz =', sector[0].props['Sz'], print 'and k =', sector[0].props['TOTAL_MOMENTUM'] for s in sector: if pyalps.size(s.y[0]) == 1: print s.props['observable'], ' : ', s.y[0] else: for (x, y) in zip(s.x, s.y[0]): print s.props['observable'], '(', x, ') : ', y
e4 = list() e5 = list() e6 = list() file_name1 = 'e4-' + str(num) file_name2 = 'e5-' + str(num) file_name3 = 'e6-' + str(num) #e4 parms[0]['Nup_total'] = Nup parms[0]['Ndown_total'] = 0 input_file = pyalps.writeInputFiles(file_name1, parms) res = pyalps.runApplication('dmrg', input_file, writexml=True) data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=file_name1)) for s in data[0]: f.append(s.y[0]) for m in range(0, len(f), 2): e4.append(f[m]) print(f) # e5 parms[0]['Nup_total'] = Nup + 1 parms[0]['Ndown_total'] = 1 input_file = pyalps.writeInputFiles(file_name2, parms) res = pyalps.runApplication('dmrg', input_file, writexml=True) data = pyalps.loadEigenstateMeasurements(
def runmain(): ts = np.linspace(0.01, 0.3, 1).tolist() # ts = [np.linspace(0.01, 0.3, 10).tolist()[2]] # ts = [0.3] # ts = np.linspace(0.3, 0.3, 1).tolist() Ns = range(1, 2 * L + 1, 1) # Ns = range(1,15,1) # Ns = range(1,16,1) # Ns = range(1, L, 1) # Ns = range(L+1, 2*L+1, 1) # Ns = [ 16 ] # Ns = range(3,17,1) Ns = range(1, 16, 1) dims = [len(ts), len(Ns), reps] ndims = dims + [L] Cdims = dims + [L, L] trunc = np.zeros(dims) E0res = np.zeros(dims) nres = np.zeros(ndims) n2res = np.zeros(ndims) Cres = np.zeros(Cdims) cres = np.zeros(Cdims) E0res.fill(np.NaN) nres.fill(np.NaN) n2res.fill(np.NaN) Cres.fill(np.NaN) cres.fill(np.NaN) # E0res = [[[np.NaN for i in range(reps)] for j in range(len(Ns))] for k in range(len(ts))] # E0res = [[[] for j in range(len(Ns))] for k in range(len(ts))] start = datetime.datetime.now() with concurrent.futures.ThreadPoolExecutor(max_workers=numthreads) as executor: futures = [executor.submit(rundmrg, i, tN[0][0], tN[0][1], tN[1][0], tN[1][1]) for i, tN in enumerate(zip(itertools.product(ts, Ns), itertools.product(range(0, len(ts)), range(0, len(Ns)))))] for future in gprogress(concurrent.futures.as_completed(futures), size=len(futures)): pass ip = np.zeros([len(ts), len(Ns)]) data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=filenameprefix)) for d in data: for s in d: it = int(s.props['it']) iN = int(s.props['iN']) ip = int(s.props['ip']) for case in switch(s.props['observable']): if case('Truncation error'): trunc[it][iN][ip] = s.y[0] break if case('Energy'): E0res[it][iN][ip] = s.y[0] # E0res[it][iN].append(s.y[0]) break if case('Local density'): nres[it][iN][ip] = s.y[0] break if case('Local density squared'): n2res[it][iN][ip] = s.y[0] break if case('Correlation function'): Cres[it][iN][ip] = np.split(s.y[0], L) break cres[it][iN][ip] = Cres[it][iN][ip] / np.sqrt(np.outer(nres[it][iN][ip], nres[it][iN][ip])) end = datetime.datetime.now() resi = sys.argv[1] if sys.platform == 'darwin': resfile = '/Users/Abuenameh/Documents/Simulation Results/BH-DMRG/res.' + str(resi) + '.txt' elif sys.platform == 'linux2': resfile = '/home/ubuntu/Dropbox/Amazon EC2/Simulation Results/BH-DMRG/res.' + str(resi) + '.txt' resf = open(resfile, 'w') res = '' res += 'delta[{0}]={1};\n'.format(resi, delta) res += 'trunc[{0}]={1};\n'.format(resi, mathformat(trunc)) res += 'Lres[{0}]={1};\n'.format(resi, L) res += 'sweeps[{0}]={1};\n'.format(resi, sweeps) res += 'maxstates[{0}]={1};\n'.format(resi, maxstates) res += 'warmup[{0}]={1};\n'.format(resi, warmup) res += 'truncerror[{0}]={1};\n'.format(resi, truncerror) res += 'perturb[{0}]={1};\n'.format(resi, perturb) res += 'nmax[{0}]={1};\n'.format(resi, nmax) res += 'Nres[{0}]={1};\n'.format(resi, mathformat(Ns)) res += 'tres[{0}]={1};\n'.format(resi, mathformat(ts)) res += 'mures[{0}]={1};\n'.format(resi, mathformat(mu)) res += 'E0res[{0}]={1};\n'.format(resi, mathformat(E0res)) res += 'nres[{0}]={1};\n'.format(resi, mathformat(nres)) res += 'n2res[{0}]={1};\n'.format(resi, mathformat(n2res)) res += 'Cres[{0}]={1};\n'.format(resi, mathformat(Cres)) res += 'cres[{0}]={1};\n'.format(resi, mathformat(cres)) res += 'runtime[{0}]=\"{1}\";\n'.format(resi, end - start) resf.write(res) gtk.main_quit()
def load_4rdm(inputfile): # load data from the HDF5 result file return pyalps.loadEigenstateMeasurements([inputfile], what='fourptdm')[0][0]
#List measurements performed on the ground state meas_list = [ 'Energy', 'Truncation error', 'One-body Correlation UP', 'One-body Correlation DO', 'Two-body Correlation UP', 'Two-body Correlation DO', 'nUP', 'nDO', 'One-body Correlation UPDO', 'Two-body Correlation UPDO' ] for j in indx_list: fname = '{}'.format(j) try: #Load all measurements on ground state data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix=fname), what=meas_list) #Save eigenstate properties as a dictionary prop = data[0][0].props Nmax, L = prop['NMax'], prop['L'] #Extrach properties E0 = data[0][0].y[0] trunc = data[0][1].y[0] obUP = data[0][2].y[0] obDO = data[0][3].y[0] tbUP = data[0][4].y[0] tbDO = data[0][5].y[0] nUP = data[0][6].y[0] nDO = data[0][7].y[0] obUPDO = data[0][8].y[0]
'MODEL' : "spin", 'CONSERVED_QUANTUMNUMBERS' : 'N,Sz', 'Sz_total' : 0, 'J' : 1, 'SWEEPS' : 4, 'NUMBER_EIGENVALUES' : 1, 'L' : 32, 'MAXSTATES' : 100 } ] #write the input file and run the simulation input_file = pyalps.writeInputFiles('parm_spin_one_half',parms) res = pyalps.runApplication('dmrg',input_file,writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix='parm_spin_one_half')) # print properties of the eigenvector: for s in data[0]: print(s.props['observable'], ' : ', s.y[0]) # load and plot iteration history iter = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm_spin_one_half'), what=['Iteration Energy','Iteration Truncation Error']) plt.figure() pyalps.plot.plot(iter[0][0]) plt.title('Iteration history of ground state energy (S=1/2)') plt.ylim(-15,0) plt.ylabel('$E_0$') plt.xlabel('iteration')
def compareEpsilon(testfiles, reffiles, tol_factor='auto', whatlist=None): """ Compare results from diagonalization applications returns True if test succeeded""" if tol_factor == 'auto': tol_factor = 1.0 testdata = pyalps.loadEigenstateMeasurements(testfiles) refdata = pyalps.loadEigenstateMeasurements(reffiles) if not testdata or not refdata: if not testdata: print( "loadEigenstateMeasurements of file %s returned an empty list" % testfiles) if not refdata: print( "loadEigenstateMeasurements of file %s returned an empty list" % reffiles) return # File level compare_list = [] for testtask, reftask in zip(testdata, refdata): try: # ALPS applications testfile = testtask[0][0].props['filename'] reffile = reftask[0][0].props['filename'] except AttributeError: # workaround for MAQUIS DMRG which doesn't have sectors testtask = [testtask] reftask = [reftask] testfile = testtask[0][0].props['filename'] reffile = reftask[0][0].props['filename'] # Ensure we compare equivalent tasks if len(testtask) != len(reftask): raise Exception("Comparison Error: test and reference data have \ different number of sectors\n\ (Have both reference and test data been pyalps.evaluate'd?)" ) # Sector level #print("\ncomparing file " + testfile + " against file " + reffile) compare_sector = [] for testsector, refsector in zip(testtask, reftask): # Observables # Select only observables from whatlist if specified if whatlist: notfoundtest = [ w for w in whatlist if w not in [o.props['observable'] for o in testsector] ] if notfoundtest: print( "The following observables specified for comparison\n\ have not been found in test results:") print("File:", testfile) print(notfoundtest) sys.exit(1) notfoundref = [ w for w in whatlist if w not in [o.props['observable'] for o in refsector] ] if notfoundref: print( "The following observables specified for comparison\n\ have not been found in reference results:") print("File:", reffile) print(notfoundref) sys.exit(1) testsector = [ o for o in testsector if o.props['observable'] in whatlist ] refsector = [ o for o in refsector if o.props['observable'] in whatlist ] for testobs, refobs in zip(testsector, refsector): # Scalar observables if pyalps.size(testobs.y[0]) == 1: tol = max(10e-12, np.abs(refobs.y[0]) * 10e-12) * tol_factor diff = np.abs(testobs.y[0] - refobs.y[0]) compare_sector.append(obsdict(tol, diff, testobs.props)) # Array valued observables else: tol_list = [] diff_list = [] for (ty, ry) in zip(testobs.y[0], refobs.y[0]): tol_list.append(max(10e-12, ry * 10e-12)) diff_list.append(np.abs(ty - ry)) maxdiff = max(diff_list) tol = tol_list[diff_list.index(maxdiff)] * tol_factor compare_sector.append(obsdict(tol, maxdiff, testobs.props)) compare_list.append(compare_sector) #writeTest2stdout(compare_list) # or a file, if that has been specified succeed_list = [ obs['passed'] for obs_list in compare_list for obs in obs_list ] return False not in succeed_list, compare_list
basename = 'Tasks/bhstestts3' # basename = 'Tasks/bhq1' parmslist = [] for N in range(L+1, 2*L+1): parmsi = deepcopy(parms) parmsi['N_total'] = N parmslist.append(parmsi) #write the input file and run the simulation input_file = pyalps.writeInputFiles(basename,parmslist) res = pyalps.runApplication('mps_optim',input_file,writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=basename)) results = [] for d in data: for s in d: if(s.props['observable'] == 'Energy'): results += [(s.props['N_total'], s.y[0])] Ns = [res[0] for res in sorted(results)] energies = [res[1] for res in sorted(results)] # print(energies) resultsfile = open('/home/ubuntu/Dropbox/Amazon EC2/Simulation Results/ALPS-MPS/Results/'+basename.split('/')[-1]+'.txt', 'w') resultsstr = '{'+str(L)+',{'+','.join(["{:d}".format(int(N)) for N in Ns]) + '},{' + ','.join(["{:.20f}".format(en) for en in energies]) + '}}' print(resultsstr) resultsfile.write(resultsstr)
def createTest(script, inputs=None, outputs=None, prefix=None, refdir='./ref'): """ Create reference data, .testin.xml file and execute_test.py inputs are: ----------- script: computes results to be tested inputs: Optional list of input files if the application(s) called in 'script' rely on them and the input files are in the same directory as 'script'. If you specified relative paths to another directory, it won't work. outputs or prefix: outputs of script can either be specified with a complete list of output files or as a prefix creates a script called apptest_name_of_script.py, which can be used to execute the test """ if outputs is not None and prefix is not None: raise Exception("Cannot both define outputs and prefix") elif outputs is None and prefix is None: raise Exception("Script output has to be specified") script = os.path.expandvars(script) scriptdir = os.path.dirname(script) if not os.path.exists(refdir): recursive_mkdir(refdir) # Copy input files to refdir to allow execution of script there if inputs is not None: for f in inputs: if not os.path.expandvars(os.path.dirname(f)) == scriptdir: print( "Input files to %s should be in the same directory as %s" % (script, script)) sys.exit(1) shutil.copy(f, refdir) # execute given script in refdir ( creates reference data ) pardir = os.getcwd() os.chdir(refdir) cmdline = [sys.executable, os.path.join(pardir, script)] pyalps.executeCommand(cmdline) if inputs is not None: for f in inputs: os.remove(f) os.chdir(pardir) if prefix is None: reffiles = [os.path.join(refdir, os.path.basename(f)) for f in outputs] else: reffiles = pyalps.getResultFiles(prefix=prefix, dirname=refdir) if not reffiles: print( "Reference files not found. (If you use 'loop' or 'dmrg', try to delete old result files.)" ) sys.exit(1) # acquire a list of all observables allobs = [] try: eigenstatedata = pyalps.loadEigenstateMeasurements(reffiles) except RuntimeError: pass else: try: allobs += [o.props['observable'] for o in eigenstatedata[0][0]] # DMRG eigenstate data has one level of nesting less except TypeError: allobs += [o.props['observable'] for o in eigenstatedata[0]] try: mcdata = pyalps.loadMeasurements(reffiles) except RuntimeError: pass else: allobs += [o.props['observable'] for o in mcdata[0]] allobs = list(set(allobs)) scriptname = os.path.basename(script) scriptname = os.path.splitext(scriptname)[0] scriptname_prefixed = 'apptest_%s.py' % scriptname # Write .xml test-input file refparms = { "TESTNAME": scriptname, "TOLERANCE": "auto", "WRITE_RESULTS_TO_FILE": "yes", "SAVE_OUT_IF_FAIL": "yes" } testinputfile = writeTestInputFile(script, inputs, refparms, reffiles, allobs) pyalps.tools.copyStylesheet(pardir) # Write .py test-start script f = open(scriptname_prefixed, 'w') f.write('#!/usr/bin/env python\n\n') f.write('import sys\n') f.write('from pyalps import apptest\n') f.write( '# Explicitly specify "compMethod=..." and "outputs=..." if needed\n') f.write( "ret = apptest.runTest( '%s', outputs='auto', compMethod='auto', pyexec='auto' )\n" % testinputfile) f.write('if not ret: sys.exit(1)\n') f.close() os.chmod(scriptname_prefixed, 0o755)
# Copyright (C) 2015 Institute for Theoretical Physics, ETH Zurich # 2015 by Michele Dolfi <*****@*****.**> # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import numpy as np def load_variance_for_dset(ss): try: import pyalps except ImportError, e: print 'ERROR: To extract new observbales from the raw data you need the ALPS.Python library.' raise e variance = pyalps.loadEigenstateMeasurements([ss.props['filename']], what=['EnergyVariance']) if len(variance) < 1 or len(variance[0]) < 1: raise Exception('EnergyVariance not found in', ss.props['filename']) return variance[0][0].y[0] def load_truncated_weight_for_dset(ss): try: import pyalps except ImportError, e: print 'ERROR: To extract new observbales from the raw data you need the ALPS.Python library.' raise e ar = pyalps.hdf5.archive(ss.props['filename']) try: if 'simulation' in ar.list_children('/'): iteration_path = '/simulation/iteration'