def LoadLastTruncation(sets, pname='TruncatedWeight'): last_sweep_selector = lambda props: props['sweep'] == props['nsweeps'] - 1 for d in pyalps.flatten(sets): truncs = LoadDMRGSweeps([d.props['filename']], what=pname, selector=last_sweep_selector) if truncs: d.props[pname] = max(pyalps.flatten(truncs)[0].y)
def mergeXY(sets, foreach=[]): foreach_sets = {} for iset in pyalps.flatten(sets): fe_par_set = tuple((iset.props[m] for m in foreach)) if fe_par_set in foreach_sets: foreach_sets[fe_par_set].append(iset) else: foreach_sets[fe_par_set] = [iset] for k, v in foreach_sets.items(): common_props = pyalps.dict_intersect([q.props for q in v]) res = pyalps.DataSet() res.props = common_props for im in range(0, len(foreach)): m = foreach[im] res.props[m] = k[im] for data in v: if len(res.x) > 0 and len(res.y) > 0: res.x = np.concatenate((res.x, data.x)) res.y = np.concatenate((res.y, data.y)) else: res.x = data.x res.y = data.y order = np.argsort(res.x, kind='mergesort') res.x = res.x[order] res.y = res.y[order] res.props['label'] = '' for im in range(0, len(foreach)): res.props['label'] += '%s = %s ' % (foreach[im], k[im]) foreach_sets[k] = res return foreach_sets.values()
def load_iterations_observable(fname, observable, remove_equal_indexes=False): if not os.path.exists(fname): raise IOError('Archive `%s` not found.' % fname) if remove_equal_indexes: print 'WARNING:', 'removing index not implemented for iterations meas.' data = pydmrg.LoadDMRGSweeps([fname], [observable]) obs = pyalps.collectXY(data, 'sweep', observable) obs = pyalps.flatten(obs) if len(obs) == 0: raise ObservableNotFound(fname, observable) return obs[0]
def __init__(self, sets, x, obs): self.props = pyalps.dict_intersect( [d.props for d in pyalps.flatten(sets)] ) self.props['observable'] = str(obs) self.xname = str(x) self.obsx = [] self.ydata = np.empty((0,0)) self.xdata = np.empty(0) self.bonddims = np.empty(0) self.init_data(sets) self.xdata = np.array(self.xdata) self.ydata = np.array(self.ydata) order = np.argsort(self.xdata) self.bonddims = self.bonddims[order] self.xdata = self.xdata[order] for i in range(len(self.ydata)): self.ydata[i] = self.ydata[i][order]
def load_spectrum_observable(fname, observable, remove_equal_indexes=False): if not os.path.exists(fname): raise IOError('Archive `%s` not found.' % fname) data = pyalps.loadEigenstateMeasurements([fname], [observable]) data = pyalps.flatten(data) if len(data) != 1: raise ObservableNotFound(fname, observable) d = data[0] if len(d.x) > 1 and d.props['observable'] != 'Entropy': # removing observables with repeated indexes if remove_equal_indexes: x = np.array(d.x) if len(x.shape) > 1: sel = np.array([True] * len(x)) for i in range(len(x)): for j in range(x.shape[1]): for k in range(x.shape[1]): if k != j and np.all(x[i, j, ...] == x[i, k, ...]): sel[i] = False break else: continue break d.x = d.x[sel] y = [] for i in range(len(d.y)): y.append(d.y[i][sel]) d.y = y # sorting observables x = np.array(d.x) if len(x.shape) > 1: x = x.reshape(x.shape[0], np.prod(x.shape[1:])) keys = [] for i in reversed(range(x.shape[1])): keys.append(x[:, i]) ind = np.lexsort(keys) else: ind = np.argsort(x) d.x = d.x[ind] for i in range(len(d.y)): d.y[i] = d.y[i][ind] return d
def init_data(self, sets): for i,q in enumerate(pyalps.flatten(sets)): # sorting according to q.x if len(q.x) > 0: order = tools.labels_argsort(q.x) qx = q.x[order] qy = q.y[order] else: qx = [] qy = np.array(q.y) if i == 0: self.obsx = q.x self.ydata = np.empty((len(qy),0)) elif not np.all(abs(self.obsx-q.x) < 1e-8): raise Exception("Observable `x` values don't match!") self.bonddims = np.concatenate ( (self.bonddims, [q.props['max_bond_dimension']]) ) self.xdata = np.concatenate ( (self.xdata, [q.props[self.xname]]) ) self.ydata = np.column_stack( (self.ydata, qy) )
import pyalps import numpy as np import matplotlib.pyplot as plt import pyalps.plot ## Please run the tutorial5a.py before this one listobs = ['0', '2'] # we look at convergence of a single flavor (=0) ## load all results data = pyalps.loadDMFTIterations(pyalps.getResultFiles(pattern='parm_u_*.h5'), measurements=listobs, verbose=True) ## create a figure for each BETA grouped = pyalps.groupSets(pyalps.flatten(data), ['U', 'observable']) for sim in grouped: common_props = pyalps.dict_intersect([ d.props for d in sim ]) ## rescale x-axis and set label for d in sim: d.x = d.x * d.props['BETA']/float(d.props['N']) d.y *= -1. d.props['label'] = 'it'+d.props['iteration'] ## plot all iterations for this BETA plt.figure() plt.xlabel(r'$\tau$') plt.ylabel(r'$-G_{flavor=%8s}(\tau)$' % common_props['observable']) plt.title('DMFT-05: Orbitally Selective Mott Transition on the Bethe lattice: ' + r'$U = %.4s$' % common_props['U']) pyalps.plot.plot(sim)
# Plot binning analysis from all runs of the C++ program pimc in the current directory. import numpy as np import matplotlib.pyplot as plt import pyalps import pyalps.plot import pyalps.load runfiles = pyalps.getResultFiles(prefix='*.run') loader = pyalps.load.Hdf5Loader() ebinning = pyalps.flatten(loader.ReadBinningAnalysis(runfiles,measurements=['Energy'],respath='/simulation/realizations/0/clones/0/results')) tbinning = pyalps.flatten(loader.ReadBinningAnalysis(runfiles,measurements=['KineticEnergy'],respath='/simulation/realizations/0/clones/0/results')) vbinning = pyalps.flatten(loader.ReadBinningAnalysis(runfiles,measurements=['PotentialEnergy'],respath='/simulation/realizations/0/clones/0/results')) for o in (ebinning,tbinning,vbinning): for d in o: d.props['label'] = str(d.props['SWEEPS'])+' sweeps' plt.figure() plt.title(o[0].props['observable']) plt.xlabel('binning level') plt.ylabel('error estimate') pyalps.plot.plot(o) plt.legend() plt.show()
cmd.write('x_val is ' + x_val + '\n') cmd.write('y_val is ' + y_val + '\n') if not x_val in val_list: cmd.write('The X-value you inputted do not exist.\n') cmd.write('You should choose this list, ' + str(val_list) + '\n') sys.exit(1) elif not y_val in val_list: cmd.write('The Y-value you inputted do not exist.\n') cmd.write('You should choose this list, ' + str(val_list) + '\n') sys.exit(1) #XML data file -> gnuplot-form text cmd.write('Start to convert the files XML to gnuplot-form.\n') data = pyalps.loadMeasurements(read_file, y_val) data = pyalps.flatten(data) xy_data = pyalps.collectXY(data, x_val, y_val) gnu_xy_data = pyalps.plot.makeGnuplotPlot(xy_data) if args.debug: cmd.write(str(gnu_xy_data)) cmd.write('Finish to convert the files XML to gnuplot-form.\n') #gnuplot-form text -> csv-form text cmd.write('Start to convert the files gnuplot-form to CSV.\n') temp_file = '__tmp_replace__.dat' f = open(temp_file, 'w') f.write(gnu_xy_data) f.close() head_x = x_val head_y = y_val
name.append(['Specific Heat','specheat']) name.append(['Specific Heat Conventional','specheatconv']) name.append(['Specific Heat by FT','specheatft']) name.append(['Magnetic Susceptibility connected','magsuscon']) name.append(['Magnetic Susceptibility connected for Scaling','magsusconsca']) name.append(['Magnetic Susceptibility disconnected','magsusdis']) name.append(['Magnetic Susceptibility disconnected for Scaling','magsusdissca']) name.append(['Binder Ratio of Magnetization connected','bindercon']) name.append(['Binder Ratio of Magnetization disconnected','binderdis']) name.append(['Binder Ratio of Magnetization 1 connected','binder1con']) name.append(['Binder Ratio of Magnetization 1 disconnected','binder1dis']) name.append(['Specific Heat connected','specheatcon']) for i in range(0,len(name)): data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='LRSW_params'),name[i][0]) for item in pyalps.flatten(data): item.props['L'] = int(item.props['L']) graph = pyalps.collectXY(data,x='T',y=name[i][0],foreach=['L']) graph.sort(key=lambda item: item.props['L']) f1 = open(name[i][1]+'.plt','w') f1.write(pyalps.plot.makeGnuplotPlot(graph)) f1.close() f2 = open(name[i][1]+'.dat','w') for j in graph: L=j.props['L'] for k in range(0,len(j.x)): f2.write(str(L)+' '+str(j.x[k])+' '+str(j.y[k].mean)+' '+str(j.y[k].error)+'\n') f2.close() print 'finished to output ' + name[i][1] + '.plt and ' + name[i][1] + '.dat'
'J': 1, 'THERMALIZATION': 1000, 'SWEEPS': 100000, 'UPDATE': "cluster", 'MODEL': "Ising", 'L': l }) #write the input file and run the simulation input_file = pyalps.writeInputFiles('parm1b', parms) pyalps.runApplication('spinmc', input_file, Tmin=5) #load the binning analysis for the absolute value of the magnetization binning = pyalps.loadBinningAnalysis(pyalps.getResultFiles(prefix='parm1b'), '|Magnetization|') binning = pyalps.flatten(binning) #make one plot with all data for dataset in binning: dataset.props['label'] = 'L=' + str(dataset.props['L']) plt.figure() plt.title('Binning analysis for cluster updates') plt.xlabel('binning level') plt.ylabel('Error of |Magnetization|') pyalps.plot.plot(binning) plt.legend() plt.show() # make individual plots for each system size for dataset in binning:
}) prefix = 'ed04a' input_file = pyalps.writeInputFiles(prefix, parms) # res = pyalps.runApplication('sparsediag', input_file, MPI=2, mpirun='mpirun') res = pyalps.runApplication('sparsediag', input_file) data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=prefix)) # To perform CFT assignments, we need to calculate the ground state # and the first excited state for each L. # The output of the above load operation will be a hierarchical list sorted # by L, so we can just iterate through it E0 = {} E1 = {} for Lsets in data: L = pyalps.flatten(Lsets)[0].props['L'] # Make a big list of all energy values allE = [] for q in pyalps.flatten(Lsets): allE += list(q.y) allE = np.sort(allE) E0[L] = allE[0] E1[L] = allE[1] # Subtract E0, divide by gap, multiply by 1/8, which we know # to be the smallest non-vanishing scaling dimension of the Ising CFT for q in pyalps.flatten(data): L = q.props['L'] q.y = (q.y - E0[L]) / (E1[L] - E0[L]) * (1. / 8.) spectrum = pyalps.collectXY(data, 'TOTAL_MOMENTUM', 'Energy', foreach=['L'])
import sys, os import numpy as np import matplotlib.pyplot as plt import pyalps from pyalps.plot import plot files = pyalps.getResultFiles(dirname='data') data = pyalps.loadMeasurements(files , ['|m|','m^2', 'Connected Susceptibility', 'Binder Cumulant U2']) for d in pyalps.flatten(data): d.props['M/L'] = d.props['M'] / d.props['L'] m = pyalps.collectXY(data, 'Jx', '|m|', foreach=['L', 'M']) chi = pyalps.collectXY(data, 'Jx', 'Connected Susceptibility', foreach=['L', 'M']) binder = pyalps.collectXY(data, 'Jx', 'Binder Cumulant U2', foreach=['L', 'M']) for d in pyalps.flatten(m): d.x = np.exp(2.*d.props['Jy'])*d.x plt.figure() plot(m) plt.xlabel('$J/\\Gamma$') plt.ylabel('magnetization') plt.legend(loc='best', frameon=False) for d in pyalps.flatten(chi): d.x = np.exp(2.*d.props['Jy'])*d.x plt.figure() plot(chi)
# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT # SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE # FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # **************************************************************************** import pyalps import pyalps.plot as alpsplot import matplotlib.pyplot as pyplot data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm9a'), [ 'Specific Heat', 'Magnetization Density^2', 'Binder Ratio of Magnetization' ]) for item in pyalps.flatten(data): item.props['L'] = int(item.props['L']) magnetization2 = pyalps.collectXY(data, x='T', y='Magnetization Density^2', foreach=['L']) magnetization2.sort(key=lambda item: item.props['L']) specificheat = pyalps.collectXY(data, x='T', y='Specific Heat', foreach=['L']) specificheat.sort(key=lambda item: item.props['L']) binderratio = pyalps.collectXY(data, x='T', y='Binder Ratio of Magnetization', foreach=['L'])
'THERMALIZATION' : 500, 'U' : u, 'J' : j, 't0' : 0.5, 't1' : 1 } ) # For more precise calculations we propose to enhance the SWEEPS #write the input file and run the simulation for p in parms: input_file = pyalps.writeParameterFile('parm_u_'+str(p['U'])+'_j_'+str(p['J']),p) res = pyalps.runDMFT(input_file) listobs = ['0', '2'] # flavor 0 is SYMMETRIZED with 1, flavor 2 is SYMMETRIZED with 3 data = pyalps.loadMeasurements(pyalps.getResultFiles(pattern='parm_u_*h5'), respath='/simulation/results/G_tau', what=listobs, verbose=True) for d in pyalps.flatten(data): d.x = d.x*d.props["BETA"]/float(d.props["N"]) d.y = -d.y d.props['label'] = r'$U=$'+str(d.props['U'])+'; flavor='+str(d.props['observable'][len(d.props['observable'])-1]) plt.figure() plt.yscale('log') plt.xlabel(r'$\tau$') plt.ylabel(r'$G_{flavor}(\tau)$') plt.title('DMFT-05: Orbitally Selective Mott Transition on the Bethe lattice') pyalps.plot.plot(data) plt.legend() plt.show()
import numpy as np #prepare the input parameters parms = [{ 'LATTICE': "ladder", 'MODEL': "spin", 'CONSERVED_QUANTUMNUMBERS': 'Sz', 'local_S': 0.5, 'J0': 1, 'J1': 1, 'L': 6 }] #write the input file and run the simulation input_file = pyalps.writeInputFiles('ed06b', parms) res = pyalps.runApplication('fulldiag', input_file) #run the evaluation and load all the plots data = pyalps.evaluateFulldiagVersusT(pyalps.getResultFiles(prefix='ed06b'), DELTA_T=0.05, T_MIN=0.05, T_MAX=5.0) #make plot for s in pyalps.flatten(data): plt.figure() plt.title("Antiferromagnetic Heisenberg ladder") pyalps.plot.plot(s) plt.show()
listobs = ['0'] # we look at a single flavor (=0) res_files = pyalps.getResultFiles( pattern='parm_*.h5') # we look for result files ################################################################################## ## Display all iterations of the Green's function in imaginary time representation ################################################################################## ## load all iterations of G_{flavor=0}(tau) data = pyalps.loadDMFTIterations(res_files, observable="G_tau", measurements=listobs, verbose=False) ## create a figure for each BETA grouped = pyalps.groupSets(pyalps.flatten(data), ['BETA']) for sim in grouped: common_props = pyalps.dict_intersect([d.props for d in sim]) ## rescale x-axis and set label for d in sim: d.x = d.x * d.props['BETA'] / float(d.props['N']) d.props['label'] = 'it' + d.props['iteration'] ## plot all iterations for this BETA plt.figure() plt.xlabel(r'$\tau$') plt.ylabel(r'$G_{flavor=0}(\tau)$') plt.title('Simulation at ' + r'$\beta = {beta}$'.format(beta=common_props['BETA'])) pyalps.plot.plot(sim)
p['update_each'] = 1 p['COMPLEX'] = 1 parms.append(p) ## write input files and run application input_file = pyalps.writeInputFiles(basename + '.dynamic', parms) res = pyalps.runApplication('mps_evolve', input_file) ## simulation results data = pyalps.loadIterationMeasurements(pyalps.getResultFiles(prefix=basename + '.dynamic'), what=['Overlap']) LE = pyalps.collectXY(data, x='Time', y='Overlap', foreach=['tau']) for d in pyalps.flatten(LE): d.x = (d.x + 1.) * d.props['dt'] # convert time index to real time d.y = abs( d.y)**2 # Loschmidt Echo defined as the module squared of the overlap d.props['label'] = r'$\tau={0}$'.format(d.props['tau']) plt.figure() pyalps.plot.plot(LE) plt.xlabel('Time $t$') plt.ylabel('Loschmidt Echo $|< \psi(0)|\psi(t) > |^2$') plt.title('Loschmidt Echo vs. Time') plt.legend(loc='lower right') ## Read V[Time] from props Ufig = pyalps.collectXY(data, x='Time', y='V', foreach=['tau']) for d in pyalps.flatten(Ufig):
'CUTOFF': c }) #write the input file and run the simulation input_file = pyalps.writeInputFiles('mc06d', parms) pyalps.runApplication('qwl', input_file) #run the evaluation and load all the plots results = pyalps.evaluateQWL(pyalps.getResultFiles(prefix='mc06d'), DELTA_T=0.05, T_MIN=0.5, T_MAX=1.5) #extract just the staggered structure factor S(Q) and rescale it by L^{-2+\eta} data = [] for s in pyalps.flatten(results): if s.props['ylabel'] == 'Staggered Structure Factor per Site': print 'yes' d = copy.deepcopy(s) # make a deep copy to not change the original l = s.props['L'] d.props['label'] = 'L=' + str(l) d.y = d.y * pow(float(l), -1.97) data.append(d) #make plot plt.figure() plt.title("Scaling plot for cubic lattice Heisenberg antiferromagnet") pyalps.plot.plot(data) plt.legend() plt.xlabel('Temperature $T/J$') plt.ylabel('$S(\pi,\pi,\pi) L^{-2+\eta}$')
'MODEL' : "spin", 'local_S' : 0.5, 'J' : 1, 'NUMBER_EIGENVALUES' : 2, 'CONSERVED_QUANTUMNUMBER' : 'Sz', 'Sz_total' : Szt, 'J1' : J1, 'L' : L }) input_file = pyalps.writeInputFiles(prefix,parms) res = pyalps.runApplication('sparsediag', input_file) data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix=prefix)) # join all momenta grouped = pyalps.groupSets(pyalps.flatten(data), ['J1', 'L', 'Sz_total']) nd = [] for group in grouped: ally = [] allx = [] for q in group: ally += list(q.y) allx += list(q.x) r = pyalps.DataSet() sel = np.argsort(ally) r.y = np.array(ally)[sel] r.x = np.array(allx)[sel] r.props = pyalps.dict_intersect([q.props for q in group]) nd.append( r ) data = nd
input_file = pyalps.writeInputFiles('parm_spin_one', parms) res = pyalps.runApplication('mps_optim', input_file, writexml=True) #load all measurements for all states data = pyalps.loadEigenstateMeasurements( pyalps.getResultFiles(prefix='parm_spin_one')) # print properties of the eigenvector: for s in data[0]: print(s.props['observable'], ' : ', s.y[0]) # load and plot iteration history iterations = pyalps.loadIterationMeasurements( pyalps.getResultFiles(prefix='parm_spin_one'), what=['Energy', 'TruncatedWeight']) energy_iteration = pyalps.collectXY(pyalps.flatten(iterations), 'iteration', 'Energy') for d in energy_iteration: d.x = range(0, len(d.y)) truncation_iteration = pyalps.collectXY(pyalps.flatten(iterations), 'iteration', 'TruncatedWeight') for d in truncation_iteration: d.x = range(0, len(d.y)) plt.figure() pyalps.plot.plot(energy_iteration) plt.title('Iteration history of ground state energy (S=1)') plt.ylabel('$E_0$') plt.xlabel('iteration') plt.figure()
p['ALWAYS_MEASURE'] = 'Local Magnetization' p['chkp_each'] = nsteps p['measure_each'] = 5 p['COMPLEX'] = 1 parms.append(p) ## write input files and run application input_file = pyalps.writeInputFiles(basename, parms) res = pyalps.runApplication('mps_evolve', input_file) ## simulation results data = pyalps.loadIterationMeasurements(pyalps.getResultFiles(prefix=basename), what=['Local Magnetization']) for q in pyalps.flatten(data): L = q.props['L'] #Compute the integrated flow of magnetization through the center \Delta M=\sum_{n>L/2}^{L} (<S_n^z(t)>+1/2) #\Delta M= L/4 loc = 0.5 * (L / 2) #\Delta M-=<S_n^z(t)> from n=L/2 to L q.y = np.array([0.5 * (L / 2) - sum(q.y[0][L / 2:L])]) #Plot the Error in the magnetization one site to the right of the chain center Mag = pyalps.collectXY(data, x='Time', y='Local Magnetization', foreach=['Jz']) for d in Mag: d.x = (d.x + 1) * d.props['DT'] plt.figure() pyalps.plot.plot(Mag) plt.xlabel('Time $t$')
for i in range(L): parmsi['U'+str(i)+'[Time]'] = ','.join([mathematica(UW(W)) for W in quench(W_i, W_f, 2*tau, dt)]) parmslist.append(parmsi) input_file = pyalps.writeInputFiles(basename+'.dynamic',parmslist) res = pyalps.runApplication('mps_evolve',input_file,writexml=True) end = datetime.datetime.now() ## simulation results data = pyalps.loadIterationMeasurements(pyalps.getResultFiles(prefix=basename+'.dynamic'), what=['Overlap']) p = [] F = pyalps.collectXY(data, x='Time', y='Overlap', foreach=['tau']) for d in pyalps.flatten(F): p.append([(d.x[-1] + 1) * d.props['dt'], 1 - abs(d.y[-1])**2]) # d.x = (d.x + 1.) * d.props['dt'] # convert time index to real time # d.y = abs(d.y)**2 # Loschmidt Echo defined as the module squared of the overlap # d.props['label']=r'$\tau={0}$'.format( d.props['tau'] ) print p # print F # plt.figure() # pyalps.plot.plot(F) # plt.xlabel('Time $t$') # plt.ylabel('Loschmidt Echo $|< \psi(0)|\psi(t) > |^2$') # plt.title('Loschmidt Echo vs. Time') # plt.legend(loc='lower right') #
try: if 'simulation' in ar.list_children('/'): iteration_path = '/simulation/iteration' else: iteration_path = '/spectrum/iteration' sweeps = ar.list_children(iteration_path) sweeps = [int(s) for s in sweeps] max_sweep = max(sweeps) truncated_weight = ar[iteration_path+'/'+str(max_sweep)+'/results/TruncatedWeight/mean/value'] ss.props['TruncatedWeight'] = max(truncated_weight) return ss.props['TruncatedWeight'] except Exception as e: print 'Warning:', 'no TruncatedWeight found in', ss.props['filename'] print e def loadEigenstateMeasurements(*args, **kwargs): try: import pyalps except ImportError, e: print 'ERROR: To extract new observbales from the raw data you need the ALPS.Python library.' raise e data = pyalps.loadEigenstateMeasurements(*args, **kwargs) for d in pyalps.flatten(data): if isinstance(d, pyalps.DataSet): d.props['TruncatedWeight'] = load_truncated_weight_for_dset(d) d.props['EnergyVariance'] = load_variance_for_dset(d) return data